Remove SSH handling methods

Move fuelweb_test.models.EnvironmentModel.get_admin_remote() method to
  fuel-devops.devops.models.Environment class
  Move fuelweb_test.models.EnvironmentModel.get_private_keys() method to
  fuel-devops.devops.models.Environment class
  Move fuelweb_test.models.EnvironmentModel.get_ssh_to_remote() method to
  fuel-devops.devops.models.Environment class
  Move fuelweb_test.models.EnvironmentModel.get_ssh_to_remote_by_key()
  method to fuel-devops.devops.models.Environment class
  Remove fuelweb_test.models.EnvironmentModel.get_ssh_to_remote_by_name(). Use
  fuel-devops.devops.models.Environment.get_ssh_to_remote() instead
  Remove fuelweb_test.models.EnvironmentModel._get_network(). Use
  fuel-devops.devops.models.Environment.get_network() instead
  Remove fuelweb_test.models.EnvironmentModel.get_net_mask(). Use
  fuel-devops.devops.models.Network.netmask instead
  Add fix for admin_net and admin_net2

Related to change: https://review.openstack.org/159375/
Change-Id: I30317f1a9520abf56866ad40a0325fa6010d7b65
Implements: blueprint system-tests-and-devops-refactoring
This commit is contained in:
Ivan Kliuk 2015-02-26 11:47:23 +02:00
parent 179c7e5f64
commit 4a762767c0
24 changed files with 322 additions and 286 deletions

View File

@ -77,7 +77,7 @@ def log_snapshot_on_error(func):
logger.error("Fetching of diagnostic snapshot failed: {0}".
format(traceback.format_exc()))
try:
admin_remote = args[0].env.get_admin_remote()
admin_remote = args[0].env.d_env.get_admin_remote()
pull_out_logs_via_ssh(admin_remote, name)
except:
logger.error("Fetching of raw logs failed: {0}".
@ -116,7 +116,7 @@ def upload_manifests(func):
logger.warning("Can't upload manifests: method of "
"unexpected class is decorated.")
return result
remote = environment.get_admin_remote()
remote = environment.d_env.get_admin_remote()
remote.execute('rm -rf /etc/puppet/modules/*')
remote.upload(settings.UPLOAD_MANIFESTS_PATH,
'/etc/puppet/modules/')
@ -160,7 +160,7 @@ def update_ostf(func):
raise ValueError('REFSPEC should be set for CI tests.')
logger.info("Uploading new patchset from {0}"
.format(settings.GERRIT_REFSPEC))
remote = args[0].environment.get_admin_remote()
remote = args[0].environment.d_env.get_admin_remote()
remote.upload(settings.PATCH_PATH.rstrip('/'),
'/var/www/nailgun/fuel-ostf')
remote.execute('dockerctl shell ostf '
@ -243,7 +243,7 @@ def check_fuel_statistics(func):
fuel_settings = args[0].env.get_fuel_settings()
nailgun_actions = args[0].env.nailgun_actions
postgres_actions = args[0].env.postgres_actions
remote_collector = args[0].env.get_ssh_to_remote_by_key(
remote_collector = args[0].env.d_env.get_ssh_to_remote_by_key(
settings.FUEL_STATS_HOST,
'{0}/.ssh/id_rsa'.format(expanduser("~")))
master_uuid = args[0].env.get_masternode_uuid()

View File

@ -29,13 +29,17 @@ from fuelweb_test import logwrap
@logwrap
def configure_second_admin_cobbler(self):
dhcp_template = '/etc/cobbler/dnsmasq.template'
remote = self.get_admin_remote()
main_admin_ip = str(self.d_env.nodes().admin.
get_ip_address_by_network_name(self.admin_net))
second_admin_ip = str(self.d_env.nodes().admin.
get_ip_address_by_network_name(self.admin_net2))
second_admin_network = self._get_network(self.admin_net2).split('/')[0]
second_admin_netmask = self.get_net_mask(self.admin_net2)
remote = self.d_env.get_admin_remote()
admin_net = self.d_env.admin_net
admin_net2 = self.d_env.admin_net2
main_admin_ip = str(
self.d_env.nodes().admin.get_ip_address_by_network_name(admin_net))
second_admin_ip = str(
self.d_env.nodes().admin.get_ip_address_by_network_name(admin_net2))
admin_net2_object = self.d_env.get_network(name=admin_net2)
second_admin_network = admin_net2_object.ip.ip
second_admin_netmask = admin_net2_object.ip.netmask
network = IPNetwork('{0}/{1}'.format(second_admin_network,
second_admin_netmask))
discovery_subnet = [net for net in network.iter_subnets(1)][-1]
@ -56,7 +60,7 @@ def configure_second_admin_cobbler(self):
@logwrap
def configure_second_admin_firewall(self, network, netmask):
remote = self.get_admin_remote()
remote = self.d_env.get_admin_remote()
# Allow forwarding and correct remote logging
# for nodes from the second admin network
rules = [
@ -71,10 +75,10 @@ def configure_second_admin_firewall(self, network, netmask):
format(network, netmask),
('-I FORWARD -i {0} -o docker0 -p tcp -m state --state NEW -m tcp'
' --dport 514 -m comment --comment "rsyslog-tcp-514-accept" -j '
'ACCEPT').format(settings.INTERFACES.get(self.admin_net2)),
'ACCEPT').format(settings.INTERFACES.get(self.d_env.admin_net2)),
('-I FORWARD -i {0} -o docker0 -p udp -m state --state NEW -m udp'
' --dport 514 -m comment --comment "rsyslog-udp-514-accept" -j '
'ACCEPT').format(settings.INTERFACES.get(self.admin_net2))
'ACCEPT').format(settings.INTERFACES.get(self.d_env.admin_net2))
]
for rule in rules:
@ -93,8 +97,8 @@ def configure_second_admin_firewall(self, network, netmask):
@logwrap
def configure_second_dhcrelay(self):
remote = self.get_admin_remote()
second_admin_if = settings.INTERFACES.get(self.admin_net2)
remote = self.d_env.get_admin_remote()
second_admin_if = settings.INTERFACES.get(self.d_env.admin_net2)
sed_cmd = "/ interface:/a \ interface: {0}".format(second_admin_if)
self.fuel_web.modify_python_file(remote, sed_cmd,
settings.FUEL_SETTINGS_YAML)

View File

@ -216,7 +216,7 @@ class CustomRepo(object):
total_pkgs = len(self.pkgs_list)
logger.info('Found {0} custom package(s)'.format(total_pkgs))
remote = self.env.get_admin_remote()
remote = self.env.d_env.get_admin_remote()
for npkg, pkg in enumerate(self.pkgs_list):
# TODO: Previous versions of the updating packages must be removed
# to avoid unwanted packet manager dependences resolution
@ -248,7 +248,7 @@ class CustomRepo(object):
def update_yaml(self, yaml_versions):
# Update the corresponding .yaml with the new package version.
for pkg in self.pkgs_list:
remote = self.env.get_admin_remote()
remote = self.env.d_env.get_admin_remote()
result = remote.execute('grep -e "^{0}: " {1}'
''.format(pkg["package:"], yaml_versions))
if result['exit_code'] == 0:
@ -276,7 +276,7 @@ class CustomRepo(object):
# Uploading scripts that prepare local repositories:
# 'regenerate_centos_repo' and 'regenerate_ubuntu_repo'
try:
remote = self.env.get_admin_remote()
remote = self.env.d_env.get_admin_remote()
remote.upload('{0}/{1}'.format(self.path_scripts,
regenerate_script),
self.remote_path_scripts)
@ -321,7 +321,7 @@ class CustomRepo(object):
""" Check puppet-agent.log files on all nodes for package
dependency errors during a cluster deployment (ubuntu)"""
remote = self.env.get_admin_remote()
remote = self.env.d_env.get_admin_remote()
err_start = 'The following packages have unmet dependencies:'
err_end = ('Unable to correct problems,'
@ -364,7 +364,7 @@ class CustomRepo(object):
""" Check puppet-agent.log files on all nodes for package
dependency errors during a cluster deployment (centos)"""
remote = self.env.get_admin_remote()
remote = self.env.d_env.get_admin_remote()
cmd = ('fgrep -h -e "Error: Package: " -e " Requires: " /var/log/'
'docker-logs/remote/node-*/puppet*.log')

View File

@ -31,7 +31,7 @@ class SecurityChecks(object):
@logwrap
def _listen_random_port(self, ip_address, protocol, tmp_file_path):
remote = self.environment.get_ssh_to_remote(ip_address)
remote = self.environment.d_env.get_ssh_to_remote(ip_address)
# Install socat
if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
cmd = '/usr/bin/apt-get install -y {pkg}'.format(pkg='socat')
@ -83,7 +83,7 @@ class SecurityChecks(object):
@logwrap
def verify_firewall(self, cluster_id):
admin_remote = self.environment.get_admin_remote()
admin_remote = self.environment.d_env.get_admin_remote()
# Install NetCat
if not self.environment.admin_install_pkg('nc') == 0:
raise Exception('Can not install package "nc".')
@ -106,7 +106,7 @@ class SecurityChecks(object):
format(opts=nc_opts, string=check_string, ip=node['ip'],
port=port)
admin_remote.execute(cmd)
remote = self.environment.get_ssh_to_remote(node['ip'])
remote = self.environment.d_env.get_ssh_to_remote(node['ip'])
cmd = 'cat {0}; mv {0}{{,.old}}'.format(tmp_file_path)
result = remote.execute(cmd)
if ''.join(result['stdout']).strip() == check_string:

View File

@ -96,7 +96,8 @@ def store_astute_yaml(env):
nailgun_node = env.fuel_web.get_nailgun_node_by_devops_node(node)
if node.driver.node_active(node) and nailgun_node['roles']:
try:
remote = env.get_ssh_to_remote_by_name(node.name)
_ip = env.fuel_web.get_nailgun_node_by_name(node.name)['ip']
remote = env.d_env.get_ssh_to_remote(_ip)
filename = '{0}/{1}-{2}.yaml'.format(settings.LOGS_DIR,
func_name, node.name)
logger.info("Storing {0}".format(filename))

View File

@ -17,13 +17,9 @@ import yaml
from devops.helpers.helpers import _tcp_ping
from devops.helpers.helpers import _wait
from devops.helpers.helpers import SSHClient
from devops.helpers.helpers import wait
from devops.models import Environment
from ipaddr import IPNetwork
from keystoneclient import exceptions
from paramiko import Agent
from paramiko import RSAKey
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_true
@ -44,16 +40,15 @@ from fuelweb_test import logger
class EnvironmentModel(object):
def __init__(self):
self._virtual_environment = None
self._keys = None
self.fuel_web = FuelWebClient(self.get_admin_node_ip(), self)
@property
def nailgun_actions(self):
return NailgunActions(self.get_admin_remote())
return NailgunActions(self.d_env.get_admin_remote())
@property
def postgres_actions(self):
return PostgresActions(self.get_admin_remote())
return PostgresActions(self.d_env.get_admin_remote())
@property
def admin_node_ip(self):
@ -79,21 +74,10 @@ class EnvironmentModel(object):
wait(lambda: all(self.nailgun_nodes(devops_nodes)), 15, timeout)
for node in self.nailgun_nodes(devops_nodes):
self.sync_node_time(self.get_ssh_to_remote(node["ip"]))
self.sync_node_time(self.d_env.get_ssh_to_remote(node["ip"]))
return self.nailgun_nodes(devops_nodes)
@logwrap
def get_admin_remote(self, login=settings.SSH_CREDENTIALS['login'],
password=settings.SSH_CREDENTIALS['password']):
"""SSH to admin node
:rtype : SSHClient
"""
return self.d_env.nodes().admin.remote(
self.d_env.admin_net,
login=login,
password=password)
@logwrap
def get_admin_node_ip(self):
return str(
@ -110,7 +94,8 @@ class EnvironmentModel(object):
params = {
'ip': node.get_ip_address_by_network_name(
self.d_env.admin_net),
'mask': self.get_net_mask(self.d_env.admin_net),
'mask': self.d_env.get_network(
name=self.d_env.admin_net).ip.netmask,
'gw': self.d_env.router(),
'hostname': '.'.join((self.d_env.hostname,
self.d_env.domain)),
@ -136,42 +121,6 @@ class EnvironmentModel(object):
) % params
return keys
@logwrap
def get_private_keys(self, force=False):
if force or self._keys is None:
self._keys = []
for key_string in ['/root/.ssh/id_rsa',
'/root/.ssh/bootstrap.rsa']:
with self.get_admin_remote().open(key_string) as f:
self._keys.append(RSAKey.from_private_key(f))
return self._keys
@logwrap
def get_ssh_to_remote(self, ip):
return SSHClient(ip,
username=settings.SSH_CREDENTIALS['login'],
password=settings.SSH_CREDENTIALS['password'],
private_keys=self.get_private_keys())
@logwrap
def get_ssh_to_remote_by_key(self, ip, keyfile):
try:
with open(keyfile) as f:
keys = [RSAKey.from_private_key(f)]
return SSHClient(ip, private_keys=keys)
except IOError:
logger.warning('Loading of SSH key from file failed. Trying to use'
' SSH agent ...')
keys = Agent().get_keys()
return SSHClient(ip, private_keys=keys)
@logwrap
def get_ssh_to_remote_by_name(self, node_name):
return self.get_ssh_to_remote(
self.fuel_web.get_nailgun_node_by_devops_node(
self.d_env.get_node(name=node_name))['ip']
)
def get_target_devs(self, devops_nodes):
return [
interface.target_dev for interface in [
@ -188,18 +137,6 @@ class EnvironmentModel(object):
self._virtual_environment.define()
return self._virtual_environment
def _get_network(self, net_name):
return str(
IPNetwork(
self.d_env.get_network(name=net_name).
ip_network))
def get_net_mask(self, net_name):
return str(
IPNetwork(
self.d_env.get_network(
name=net_name).ip_network).netmask)
def make_snapshot(self, snapshot_name, description="", is_make=False):
if settings.MAKE_SNAPSHOT or is_make:
self.d_env.suspend(verbose=False)
@ -268,20 +205,22 @@ class EnvironmentModel(object):
continue
try:
logger.info("Sync time on revert for node %s" % node.name)
_ip = self.fuel_web.get_nailgun_node_by_name(
node.name)['ip']
self.sync_node_time(
self.get_ssh_to_remote_by_name(node.name))
self.d_env.get_ssh_to_remote(_ip))
except Exception as e:
logger.warning(
'Exception caught while trying to sync time on {0}:'
' {1}'.format(node.name, e))
self.run_nailgun_agent(
self.get_ssh_to_remote_by_name(node.name))
_ip = self.fuel_web.get_nailgun_node_by_name(node.name)['ip']
self.run_nailgun_agent(self.d_env.get_ssh_to_remote(_ip))
return True
return False
def set_admin_ssh_password(self):
try:
remote = self.get_admin_remote(
remote = self.d_env.get_admin_remote(
login=settings.SSH_CREDENTIALS['login'],
password=settings.SSH_CREDENTIALS['password'])
self.execute_remote_cmd(remote, 'date')
@ -289,7 +228,8 @@ class EnvironmentModel(object):
except Exception:
logger.debug('Accessing admin node using SSH credentials:'
' FAIL, trying to change password from default')
remote = self.get_admin_remote(login='root', password='r00tme')
remote = self.d_env.get_admin_remote(
login='root', password='r00tme')
self.execute_remote_cmd(
remote, 'echo -e "{1}\\n{1}" | passwd {0}'
.format(settings.SSH_CREDENTIALS['login'],
@ -300,7 +240,7 @@ class EnvironmentModel(object):
settings.SSH_CREDENTIALS['password']))
def set_admin_keystone_password(self):
remote = self.get_admin_remote()
remote = self.d_env.get_admin_remote()
try:
self.fuel_web.client.get_releases()
except exceptions.Unauthorized:
@ -359,7 +299,7 @@ class EnvironmentModel(object):
def setup_customisation(self):
self.wait_for_provisioning()
try:
remote = self.get_admin_remote()
remote = self.d_env.get_admin_remote()
pid = remote.execute("pgrep 'fuelmenu'")['stdout'][0]
pid.rstrip('\n')
remote.execute("kill -sigusr1 {0}".format(pid))
@ -383,7 +323,7 @@ class EnvironmentModel(object):
@logwrap
def sync_time_admin_node(self):
logger.info("Sync time on revert for admin")
remote = self.get_admin_remote()
remote = self.d_env.get_admin_remote()
self.execute_remote_cmd(remote, 'hwclock -s')
# Sync time using ntpd
try:
@ -406,9 +346,10 @@ class EnvironmentModel(object):
logger.info("Master node time: {0}".format(remote_date))
def verify_network_configuration(self, node_name):
node = self.fuel_web.get_nailgun_node_by_name(node_name)['ip']
checkers.verify_network_configuration(
node=self.fuel_web.get_nailgun_node_by_name(node_name),
remote=self.get_ssh_to_remote_by_name(node_name)
node=self.fuel_web.get_nailgun_node_by_name(node),
remote=self.d_env.get_ssh_to_remote(node['ip'])
)
def wait_bootstrap(self):
@ -418,19 +359,19 @@ class EnvironmentModel(object):
float(settings.PUPPET_TIMEOUT)))
wait(
lambda: not
self.get_admin_remote().execute(
self.d_env.get_admin_remote().execute(
"grep 'Fuel node deployment' '%s'" % log_path
)['exit_code'],
timeout=(float(settings.PUPPET_TIMEOUT))
)
result = self.get_admin_remote().execute("grep 'Fuel node deployment "
"complete' '%s'" % log_path
)['exit_code']
result = self.d_env.get_admin_remote().execute(
"grep 'Fuel node deployment "
"complete' '%s'" % log_path)['exit_code']
if result != 0:
raise Exception('Fuel node deployment failed.')
def dhcrelay_check(self):
admin_remote = self.get_admin_remote()
admin_remote = self.d_env.get_admin_remote()
out = admin_remote.execute("dhcpcheck discover "
"--ifaces eth0 "
"--repeat 3 "
@ -445,7 +386,7 @@ class EnvironmentModel(object):
def get_fuel_settings(self, remote=None):
if not remote:
remote = self.get_admin_remote()
remote = self.d_env.get_admin_remote()
cmd = 'cat {cfg_file}'.format(cfg_file=settings.FUEL_SETTINGS_YAML)
result = remote.execute(cmd)
if result['exit_code'] == 0:
@ -458,7 +399,7 @@ class EnvironmentModel(object):
def admin_install_pkg(self, pkg_name):
"""Install a package <pkg_name> on the admin node"""
admin_remote = self.get_admin_remote()
admin_remote = self.d_env.get_admin_remote()
remote_status = admin_remote.execute("rpm -q {0}'".format(pkg_name))
if remote_status['exit_code'] == 0:
logger.info("Package '{0}' already installed.".format(pkg_name))
@ -473,7 +414,7 @@ class EnvironmentModel(object):
def admin_run_service(self, service_name):
"""Start a service <service_name> on the admin node"""
admin_remote = self.get_admin_remote()
admin_remote = self.d_env.get_admin_remote()
admin_remote.execute("service {0} start".format(service_name))
remote_status = admin_remote.execute("service {0} status"
.format(service_name))
@ -491,7 +432,7 @@ class EnvironmentModel(object):
# * adds 'nameservers' at start of resolv.conf if merge=True
# * replaces resolv.conf with 'nameservers' if merge=False
def modify_resolv_conf(self, nameservers=[], merge=True):
remote = self.get_admin_remote()
remote = self.d_env.get_admin_remote()
resolv_conf = remote.execute('cat /etc/resolv.conf')
assert_equal(0, resolv_conf['exit_code'], 'Executing "{0}" on the '
'admin node has failed with: {1}'
@ -520,10 +461,10 @@ class EnvironmentModel(object):
@logwrap
def describe_second_admin_interface(self):
remote = self.get_admin_remote()
second_admin_network = self._get_network(
self.d_env.admin_net2).split('/')[0]
second_admin_netmask = self.get_net_mask(self.d_env.admin_net2)
remote = self.d_env.get_admin_remote()
admin_net2_object = self.d_env.get_network(name=self.d_env.admin_net2)
second_admin_network = admin_net2_object.split('/')[0]
second_admin_netmask = admin_net2_object.ip.netmask
second_admin_if = settings.INTERFACES.get(self.d_env.admin_net2)
second_admin_ip = str(self.d_env.nodes(
).admin.get_ip_address_by_network_name(self.d_env.admin_net2))

View File

@ -561,7 +561,7 @@ class FuelWebClient(object):
def get_ssh_for_node(self, node_name):
ip = self.get_nailgun_node_by_devops_node(
self.environment.d_env.get_node(name=node_name))['ip']
return self.environment.get_ssh_to_remote(ip)
return self.environment.d_env.get_ssh_to_remote(ip)
@logwrap
def get_ssh_for_role(self, nodes_dict, role):
@ -888,7 +888,7 @@ class FuelWebClient(object):
def common_net_settings(self, network_configuration):
nc = network_configuration["networking_parameters"]
public = IPNetwork(self.environment._get_network("public"))
public = self.environment.d_env.get_network(name="public").ip_network
float_range = public if not BONDING else list(public.subnet(27))[0]
nc["floating_ranges"] = self.get_range(float_range, 1)
@ -902,8 +902,8 @@ class FuelWebClient(object):
elif net_name in nets_wo_floating:
self.net_settings(net_config, net_name)
else:
pub_subnets = list(IPNetwork(
self.environment._get_network("public")).subnet(27))
ip_obj = self.environment.d_env.get_network(name="public").ip
pub_subnets = list(ip_obj.subnet(27))
if "floating" == net_name:
self.net_settings(net_config, pub_subnets[0],
floating=True, jbond=True)
@ -927,8 +927,8 @@ class FuelWebClient(object):
elif net_name in 'fuelweb_admin':
self.net_settings(net_config, admin_net)
else:
pub_subnets = list(IPNetwork(
self.environment._get_network(public_net)).subnet(27))
ip_obj = self.environment.d_env.get_network(name=public_net).ip
pub_subnets = list(ip_obj.subnet(27))
if "floating" == net_name:
self.net_settings(net_config, pub_subnets[0],
@ -943,7 +943,8 @@ class FuelWebClient(object):
if jbond:
ip_network = net_name
else:
ip_network = IPNetwork(self.environment._get_network(net_name))
ip_network = self.environment.d_env.get_network(
name=net_name).ip_network
if 'admin' in net_name:
net_config['ip_ranges'] = self.get_range(ip_network, 2)
@ -973,7 +974,7 @@ class FuelWebClient(object):
def get_floating_ranges(self, network_set=''):
net_name = 'public{0}'.format(network_set)
net = list(IPNetwork(self.environment._get_network(net_name)))
net = list(self.environment.d_env.get_network(name=net_name).ip)
ip_ranges, expected_ips = [], []
for i in [0, -20, -40]:
@ -1022,7 +1023,8 @@ class FuelWebClient(object):
wait(
lambda: self.get_nailgun_node_by_devops_node(node)['online'],
timeout=60 * 10)
remote = self.environment.get_ssh_to_remote_by_name(node.name)
_ip = self.get_nailgun_node_by_name(node.name)['ip']
remote = self.environment.d_env.get_ssh_to_remote(_ip)
try:
self.environment.sync_node_time(remote)
except Exception as e:
@ -1100,7 +1102,8 @@ class FuelWebClient(object):
return ''.join(result['stderr']).strip()
for node_name in node_names:
remote = self.environment.get_ssh_to_remote_by_name(node_name)
_ip = self.get_nailgun_node_by_name(node_name)['ip']
remote = self.environment.d_env.get_ssh_to_remote(_ip)
try:
wait(lambda: _get_galera_status(remote) == 'ON',
timeout=30 * 4)
@ -1116,7 +1119,8 @@ class FuelWebClient(object):
def wait_cinder_is_up(self, node_names):
logger.info("Waiting for all Cinder services up.")
for node_name in node_names:
remote = self.environment.get_ssh_to_remote_by_name(node_name)
_ip = self.get_nailgun_node_by_name(node_name)['ip']
remote = self.environment.d_env.get_ssh_to_remote(_ip)
try:
wait(lambda: checkers.check_cinder_status(remote),
timeout=300)
@ -1172,7 +1176,7 @@ class FuelWebClient(object):
else:
cmd = 'service ceph restart'
for node in ceph_nodes:
remote = self.environment.get_ssh_to_remote(node['ip'])
remote = self.environment.d_env.get_ssh_to_remote(node['ip'])
self.environment.sync_node_time(remote)
result = remote.execute(cmd)
if not result['exit_code'] == 0:
@ -1190,7 +1194,7 @@ class FuelWebClient(object):
logger.info('Waiting until Ceph service become up...')
for node in ceph_nodes:
remote = self.environment.get_ssh_to_remote(node['ip'])
remote = self.environment.d_env.get_ssh_to_remote(node['ip'])
try:
wait(lambda: checkers.check_ceph_ready(remote) is True,
interval=20, timeout=600)
@ -1202,7 +1206,7 @@ class FuelWebClient(object):
logger.info('Ceph service is ready')
logger.info('Checking Ceph Health...')
for node in ceph_nodes:
remote = self.environment.get_ssh_to_remote(node['ip'])
remote = self.environment.d_env.get_ssh_to_remote(node['ip'])
health_status = checkers.get_ceph_health(remote)
if 'HEALTH_OK' in health_status:
continue
@ -1235,7 +1239,7 @@ class FuelWebClient(object):
logger.info('Checking Ceph OSD Tree...')
for node in ceph_nodes:
remote = self.environment.get_ssh_to_remote(node['ip'])
remote = self.environment.d_env.get_ssh_to_remote(node['ip'])
checkers.check_ceph_disks(remote, [n['id'] for n in ceph_nodes])
logger.info('Ceph cluster status is OK')

View File

@ -38,8 +38,8 @@ class PuppetEnvironment(EnvironmentModel):
def execute_cmd(self, command, debug=True):
"""Execute command on node."""
return self.get_admin_remote().execute(command,
verbose=debug)['exit_code']
return self.d_env.get_admin_remote().execute(
command, verbose=debug)['exit_code']
def await(self, timeout=1200):
wait(

View File

@ -53,13 +53,13 @@ class ExamplePlugin(TestBasic):
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(),
self.env.d_env.get_admin_remote(),
EXAMPLE_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))
segment_type = 'vlan'
@ -95,15 +95,16 @@ class ExamplePlugin(TestBasic):
logger.debug("Start to check service on node {0}".format('slave-01'))
cmd_curl = 'curl localhost:8234'
cmd = 'pgrep -f fuel-simple-service'
res_pgrep = self.env.get_ssh_to_remote_by_name(
'slave-01').execute(cmd)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
res_pgrep = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0}'.format(res_pgrep['stderr']))
assert_equal(1, len(res_pgrep['stdout']),
'Failed with error {0}'.format(res_pgrep['stderr']))
# curl to service
res_curl = self.env.get_ssh_to_remote_by_name(
'slave-01').execute(cmd_curl)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
res_curl = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd_curl)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0}'.format(res_curl['stderr']))
@ -139,12 +140,12 @@ class ExamplePlugin(TestBasic):
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), EXAMPLE_PLUGIN_PATH, '/var')
self.env.d_env.get_admin_remote(), EXAMPLE_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
@ -176,8 +177,8 @@ class ExamplePlugin(TestBasic):
logger.debug("Start to check service on node {0}".format(node))
cmd_curl = 'curl localhost:8234'
cmd = 'pgrep -f fuel-simple-service'
res_pgrep = self.env.get_ssh_to_remote_by_name(
node).execute(cmd)
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
res_pgrep = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0} '
'on node {1}'.format(res_pgrep['stderr'], node))
@ -185,8 +186,8 @@ class ExamplePlugin(TestBasic):
'Failed with error {0} on the '
'node {1}'.format(res_pgrep['stderr'], node))
# curl to service
res_curl = self.env.get_ssh_to_remote_by_name(
node).execute(cmd_curl)
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
res_curl = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd_curl)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0} '
'on node {1}'.format(res_curl['stderr'], node))
@ -226,12 +227,12 @@ class ExamplePlugin(TestBasic):
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), EXAMPLE_PLUGIN_PATH, '/var')
self.env.d_env.get_admin_remote(), EXAMPLE_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
@ -265,15 +266,16 @@ class ExamplePlugin(TestBasic):
logger.debug("Start to check service on node {0}".format('slave-01'))
cmd_curl = 'curl localhost:8234'
cmd = 'pgrep -f fuel-simple-service'
res_pgrep = self.env.get_ssh_to_remote_by_name(
'slave-01').execute(cmd)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
res_pgrep = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0}'.format(res_pgrep['stderr']))
assert_equal(1, len(res_pgrep['stdout']),
'Failed with error {0}'.format(res_pgrep['stderr']))
# curl to service
res_curl = self.env.get_ssh_to_remote_by_name(
'slave-01').execute(cmd_curl)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
res_curl = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd_curl)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0}'.format(res_curl['stderr']))
@ -291,8 +293,9 @@ class ExamplePlugin(TestBasic):
logger.debug("Start to check service on node {0}".format(node))
cmd_curl = 'curl localhost:8234'
cmd = 'pgrep -f fuel-simple-service'
res_pgrep = self.env.get_ssh_to_remote_by_name(
node).execute(cmd)
_ip = self.fuel_web.get_nailgun_node(node)['ip']
res_pgrep = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0} '
'on node {1}'.format(res_pgrep['stderr'], node))
@ -300,8 +303,8 @@ class ExamplePlugin(TestBasic):
'Failed with error {0} on the '
'node {1}'.format(res_pgrep['stderr'], node))
# curl to service
res_curl = self.env.get_ssh_to_remote_by_name(
node).execute(cmd_curl)
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
res_curl = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd_curl)
assert_equal(0, res_pgrep['exit_code'],
'Failed with error {0} '
'on node {1}'.format(res_curl['stderr'], node))

View File

@ -68,12 +68,12 @@ class GlusterfsPlugin(TestBasic):
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), GLUSTER_PLUGIN_PATH, '/var')
self.env.d_env.get_admin_remote(), GLUSTER_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(GLUSTER_PLUGIN_PATH))
settings = None
@ -110,8 +110,9 @@ class GlusterfsPlugin(TestBasic):
self.fuel_web.deploy_cluster_wait(cluster_id)
for node in ('slave-01', 'slave-03'):
_ip = self.fuel_web.get_nailgun_node(node)['ip']
self.check_glusterfs_conf(
remote=self.env.get_ssh_to_remote_by_name(node),
remote=self.env.d_env.get_ssh_to_remote(_ip),
path='/etc/cinder/glusterfs',
gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)
@ -153,12 +154,12 @@ class GlusterfsPlugin(TestBasic):
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), GLUSTER_PLUGIN_PATH, '/var')
self.env.d_env.get_admin_remote(), GLUSTER_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(GLUSTER_PLUGIN_PATH))
settings = None
@ -194,8 +195,9 @@ class GlusterfsPlugin(TestBasic):
)
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-03")['ip']
self.check_glusterfs_conf(
remote=self.env.get_ssh_to_remote_by_name('slave-03'),
remote=self.env.d_env.get_ssh_to_remote(_ip),
path='/etc/cinder/glusterfs',
gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)
@ -215,8 +217,9 @@ class GlusterfsPlugin(TestBasic):
self.fuel_web.deploy_cluster_wait(cluster_id)
for node in ('slave-03', 'slave-04', 'slave-05'):
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
self.check_glusterfs_conf(
remote=self.env.get_ssh_to_remote_by_name(node),
remote=self.env.d_env.get_ssh_to_remote(_ip),
path='/etc/cinder/glusterfs',
gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)

View File

@ -114,12 +114,12 @@ class LbaasPlugin(TestBasic):
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), LBAAS_PLUGIN_PATH, '/var')
self.env.d_env.get_admin_remote(), LBAAS_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(LBAAS_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
@ -198,12 +198,12 @@ class LbaasPlugin(TestBasic):
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), LBAAS_PLUGIN_PATH, '/var')
self.env.d_env.get_admin_remote(), LBAAS_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(LBAAS_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(

View File

@ -80,7 +80,8 @@ class TestAdminNode(TestBasic):
if OPENSTACK_RELEASE_CENTOS not in OPENSTACK_RELEASE:
raise SkipTest()
self.env.revert_snapshot("empty")
ps_output = self.env.get_admin_remote().execute('ps ax')['stdout']
ps_output = self.env.d_env.get_admin_remote().execute(
'ps ax')['stdout']
astute_master = filter(lambda x: 'astute master' in x, ps_output)
logger.info("Found astute processes: %s" % astute_master)
assert_equal(len(astute_master), 1)
@ -110,12 +111,13 @@ class TestAdminNodeBackupRestore(TestBasic):
"""
self.env.revert_snapshot("empty")
self.fuel_web.backup_master(self.env.get_admin_remote())
checkers.backup_check(self.env.get_admin_remote())
self.fuel_web.restore_master(self.env.get_admin_remote())
self.fuel_web.restore_check_nailgun_api(self.env.get_admin_remote())
checkers.restore_check_sum(self.env.get_admin_remote())
checkers.iptables_check(self.env.get_admin_remote())
self.fuel_web.backup_master(self.env.d_env.get_admin_remote())
checkers.backup_check(self.env.d_env.get_admin_remote())
self.fuel_web.restore_master(self.env.d_env.get_admin_remote())
self.fuel_web.restore_check_nailgun_api(
self.env.d_env.get_admin_remote())
checkers.restore_check_sum(self.env.d_env.get_admin_remote())
checkers.iptables_check(self.env.d_env.get_admin_remote())
@test(groups=["setup_master_custom"])

View File

@ -391,9 +391,10 @@ class VmBackedWithCephMigrationBasic(TestBasic):
time.sleep(100)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
md5before = os.get_md5sum(
"/home/test_file",
self.env.get_ssh_to_remote_by_name("slave-01"),
self.env.d_env.get_ssh_to_remote(_ip),
floating_ip.ip, creds)
logger.info("Get available computes")
@ -403,9 +404,10 @@ class VmBackedWithCephMigrationBasic(TestBasic):
new_srv = os.migrate_server(srv, avail_hosts[0], timeout=200)
logger.info("Check cluster and server state after migration")
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
md5after = os.get_md5sum(
"/home/test_file",
self.env.get_ssh_to_remote_by_name("slave-01"),
self.env.d_env.get_ssh_to_remote(_ip),
floating_ip.ip, creds)
assert_true(
@ -414,8 +416,9 @@ class VmBackedWithCephMigrationBasic(TestBasic):
"Before migration md5 was equal to: {bef}"
"Now it eqals: {aft}".format(bef=md5before, aft=md5after))
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
res = os.execute_through_host(
self.env.get_ssh_to_remote_by_name("slave-01"),
self.env.d_env.get_ssh_to_remote(_ip),
floating_ip.ip, "ping -q -c3 -w10 %s | grep 'received' |"
" grep -v '0 packets received'", creds)
logger.info("Ping 8.8.8.8 result on vm is: %s" % res)
@ -450,12 +453,14 @@ class VmBackedWithCephMigrationBasic(TestBasic):
time.sleep(100)
logger.info("Create filesystem and mount volume")
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
os.execute_through_host(
self.env.get_ssh_to_remote_by_name('slave-01'),
self.env.d_env.get_ssh_to_remote(_ip),
floating_ip.ip, 'sudo sh /home/mount_volume.sh', creds)
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
os.execute_through_host(
self.env.get_ssh_to_remote_by_name('slave-01'),
self.env.d_env.get_ssh_to_remote(_ip),
floating_ip.ip, 'sudo touch /mnt/file-on-volume', creds)
logger.info("Get available computes")
@ -466,14 +471,16 @@ class VmBackedWithCephMigrationBasic(TestBasic):
logger.info("Check cluster and server state after migration")
logger.info("Mount volume after migration")
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
out = os.execute_through_host(
self.env.get_ssh_to_remote_by_name('slave-01'),
self.env.d_env.get_ssh_to_remote(_ip),
floating_ip.ip, 'sudo mount /dev/vdb /mnt', creds)
logger.info("out of mounting volume is: %s" % out)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
assert_true("file-on-volume" in os.execute_through_host(
self.env.get_ssh_to_remote_by_name('slave-01'),
self.env.d_env.get_ssh_to_remote(_ip),
floating_ip.ip, "sudo ls /mnt", creds),
"File is abscent in /mnt")
@ -548,8 +555,9 @@ class CheckCephPartitionsAfterReboot(TestBasic):
self.fuel_web.deploy_cluster_wait(cluster_id)
for node in ["slave-02", "slave-03"]:
logger.info("Get partitions for {node}".format(node=node))
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
before_reboot_partitions = [checkers.get_ceph_partitions(
self.env.get_ssh_to_remote_by_name(node),
self.env.d_env.get_ssh_to_remote(_ip),
"/dev/vd{p}".format(p=part)) for part in ["b", "c"]]
logger.info("Warm-restart nodes")
@ -559,8 +567,9 @@ class CheckCephPartitionsAfterReboot(TestBasic):
logger.info("Get partitions for {node} once again".format(
node=node
))
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
after_reboot_partitions = [checkers.get_ceph_partitions(
self.env.get_ssh_to_remote_by_name(node),
self.env.d_env.get_ssh_to_remote(_ip),
"/dev/vd{p}".format(p=part)) for part in ["b", "c"]]
if before_reboot_partitions != after_reboot_partitions:
@ -576,8 +585,9 @@ class CheckCephPartitionsAfterReboot(TestBasic):
self.fuel_web.cold_restart_nodes(
[self.fuel_web.environment.d_env.get_node(name=node)])
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
after_reboot_partitions = [checkers.get_ceph_partitions(
self.env.get_ssh_to_remote_by_name(node),
self.env.d_env.get_ssh_to_remote(_ip),
"/dev/vd{p}".format(p=part)) for part in ["b", "c"]]
if before_reboot_partitions != after_reboot_partitions:

View File

@ -87,15 +87,18 @@ class TestHaVLAN(TestBasic):
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=16, networks_count=8, timeout=300)
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
self.fuel_web.check_fixed_nova_splited_cidr(
os_conn, self.fuel_web.get_nailgun_cidr_nova(cluster_id),
self.env.get_ssh_to_remote_by_name('slave-01'))
self.env.d_env.get_ssh_to_remote(_ip))
self.fuel_web.verify_network(cluster_id)
devops_node = self.fuel_web.get_nailgun_primary_controller(
self.env.d_env.nodes().slaves[0])
logger.debug("devops node name is {0}".format(devops_node.name))
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
_ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
checkers.check_swift_ring(remote)
self.fuel_web.run_ostf(
@ -163,7 +166,9 @@ class TestHaFlat(TestBasic):
devops_node = self.fuel_web.get_nailgun_primary_controller(
self.env.d_env.nodes().slaves[0])
logger.debug("devops node name is {0}".format(devops_node.name))
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
_ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
checkers.check_swift_ring(remote)
self.fuel_web.security.verify_firewall(cluster_id)
@ -279,7 +284,9 @@ class TestHaFlatScalability(TestBasic):
devops_node = self.fuel_web.get_nailgun_primary_controller(
self.env.d_env.nodes().slaves[0])
logger.debug("devops node name is {0}".format(devops_node.name))
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
_ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
checkers.check_swift_ring(remote)
self.fuel_web.update_nodes(
@ -296,7 +303,9 @@ class TestHaFlatScalability(TestBasic):
devops_node = self.fuel_web.get_nailgun_primary_controller(
self.env.d_env.nodes().slaves[0])
logger.debug("devops node name is {0}".format(devops_node.name))
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
_ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
checkers.check_swift_ring(remote)
self.fuel_web.update_nodes(
@ -320,8 +329,10 @@ class TestHaFlatScalability(TestBasic):
self.fuel_web.security.verify_firewall(cluster_id)
devops_node = self.fuel_web.get_nailgun_primary_controller(
self.env.d_env.nodes().slaves[0])
logger.debug("devops node name is {0}".format(devops_node.name))
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
logger.debug("devops node name is {0}".format(devops_node.name))\
_ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
checkers.check_swift_ring(remote)
self.fuel_web.run_ostf(
@ -361,8 +372,8 @@ class BackupRestoreHa(TestBasic):
'novaHaFlat', 'novaHaFlat', 'novaHaFlat')
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=16, networks_count=1, timeout=300)
self.fuel_web.backup_master(self.env.get_admin_remote())
checkers.backup_check(self.env.get_admin_remote())
self.fuel_web.backup_master(self.env.d_env.get_admin_remote())
checkers.backup_check(self.env.d_env.get_admin_remote())
self.env.bootstrap_nodes(
self.env.d_env.nodes().slaves[5:6])
self.fuel_web.update_nodes(
@ -372,10 +383,11 @@ class BackupRestoreHa(TestBasic):
assert_equal(
6, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))
self.fuel_web.restore_master(self.env.get_admin_remote())
checkers.restore_check_sum(self.env.get_admin_remote())
self.fuel_web.restore_check_nailgun_api(self.env.get_admin_remote())
checkers.iptables_check(self.env.get_admin_remote())
self.fuel_web.restore_master(self.env.d_env.get_admin_remote())
checkers.restore_check_sum(self.env.d_env.get_admin_remote())
self.fuel_web.restore_check_nailgun_api(
self.env.d_env.get_admin_remote())
checkers.iptables_check(self.env.d_env.get_admin_remote())
assert_equal(
5, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))

View File

@ -125,8 +125,10 @@ class HAOneControllerFlat(TestBasic):
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
self.fuel_web.check_fixed_network_cidr(
cluster_id, self.env.get_ssh_to_remote_by_name('slave-01'))
cluster_id, self.env.d_env.get_ssh_to_remote(_ip))
self.fuel_web.verify_network(cluster_id)
@ -163,7 +165,8 @@ class HAOneControllerFlat(TestBasic):
self.fuel_web.get_public_vip(cluster_id),
data['user'], data['password'], data['tenant'])
remote = self.env.get_ssh_to_remote_by_name('slave-01')
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
remote.execute("echo 'Hello World' > /root/test.txt")
server_files = {"/root/test.txt": 'Hello World'}
instance = os.create_server_for_migration(file=server_files)
@ -416,7 +419,8 @@ class HAOneControllerVlan(TestBasic):
self.fuel_web.run_ostf(cluster_id=cluster_id)
remote = self.env.get_ssh_to_remote_by_name('slave-03')
_ip = self.fuel_web.get_nailgun_node_by_name("slave-03")['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
result = remote.execute('readlink /etc/astute.yaml')['stdout']
@ -622,8 +626,9 @@ class HAOneControllerCinder(TestBasic):
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
self.fuel_web.check_fixed_network_cidr(
cluster_id, self.env.get_ssh_to_remote_by_name('slave-01'))
cluster_id, self.env.d_env.get_ssh_to_remote(_ip))
self.fuel_web.verify_network(cluster_id)
self.env.verify_network_configuration("slave-01")
@ -974,8 +979,8 @@ class BackupRestoreHAOneController(TestBasic):
'novaSimpleFlat', 'novaSimpleFlat', 'novaSimpleFlat')
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
self.fuel_web.backup_master(self.env.get_admin_remote())
checkers.backup_check(self.env.get_admin_remote())
self.fuel_web.backup_master(self.env.d_env.get_admin_remote())
checkers.backup_check(self.env.d_env.get_admin_remote())
self.fuel_web.update_nodes(
cluster_id, {'slave-03': ['compute']}, True, False)
@ -983,10 +988,11 @@ class BackupRestoreHAOneController(TestBasic):
assert_equal(
3, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))
self.fuel_web.restore_master(self.env.get_admin_remote())
checkers.restore_check_sum(self.env.get_admin_remote())
self.fuel_web.restore_check_nailgun_api(self.env.get_admin_remote())
checkers.iptables_check(self.env.get_admin_remote())
self.fuel_web.restore_master(self.env.d_env.get_admin_remote())
checkers.restore_check_sum(self.env.d_env.get_admin_remote())
self.fuel_web.restore_check_nailgun_api(
self.env.d_env.get_admin_remote())
checkers.iptables_check(self.env.d_env.get_admin_remote())
assert_equal(
2, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))

View File

@ -47,8 +47,13 @@ class TestMultipleClusterNets(TestBasic):
self.env.revert_snapshot("ready_with_5_slaves")
# Get network parts of IP addresses with /24 netmask
networks = ['.'.join(self.env._get_network(n).split('.')[0:-1]) for n
in [self.env.admin_net, self.env.admin_net2]]
admin_net = self.env.d_env.admin_net
admin_net2 = self.env.d_env.admin_net2
get_network = lambda x: self.env.d_env.get_network(x).ip_network
# This should be refactored
networks = ['.'.join(get_network(n).split('.')[0:-1])
for n in [admin_net, admin_net2]]
nodes_addresses = ['.'.join(node['ip'].split('.')[0:-1]) for node in
self.fuel_web.client.list_nodes()]

View File

@ -73,8 +73,9 @@ class NeutronGre(TestBasic):
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
# assert_equal(str(cluster['net_segment_type']), segment_type)
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
self.fuel_web.check_fixed_network_cidr(
cluster_id, self.env.get_ssh_to_remote_by_name('slave-01'))
cluster_id, self.env.d_env.get_ssh_to_remote(_ip))
self.fuel_web.verify_network(cluster_id)
@ -195,7 +196,8 @@ class NeutronGreHa(TestBasic):
devops_node = self.fuel_web.get_nailgun_primary_controller(
self.env.d_env.nodes().slaves[0])
logger.debug("devops node name is {0}".format(devops_node.name))
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
_ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
checkers.check_swift_ring(remote)
self.fuel_web.run_ostf(
@ -317,14 +319,16 @@ class NeutronVlanHa(TestBasic):
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
# assert_equal(str(cluster['net_segment_type']), segment_type)
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
self.fuel_web.check_fixed_network_cidr(
cluster_id, self.env.get_ssh_to_remote_by_name('slave-01'))
cluster_id, self.env.d_env.get_ssh_to_remote(_ip))
self.fuel_web.verify_network(cluster_id)
devops_node = self.fuel_web.get_nailgun_primary_controller(
self.env.d_env.nodes().slaves[0])
logger.debug("devops node name is {0}".format(devops_node.name))
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
_ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
checkers.check_swift_ring(remote)
self.fuel_web.run_ostf(
@ -386,8 +390,9 @@ class NeutronVlanHaPublicNetwork(TestBasic):
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
# assert_equal(str(cluster['net_segment_type']), segment_type)
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
self.fuel_web.check_fixed_network_cidr(
cluster_id, self.env.get_ssh_to_remote_by_name('slave-01'))
cluster_id, self.env.d_env.get_ssh_to_remote(_ip))
self.fuel_web.verify_network(cluster_id)

View File

@ -96,8 +96,9 @@ class SaharaHAOneController(TestBasic):
os_conn, smiles_count=5, networks_count=2, timeout=300)
LOGGER.debug('Verify Sahara service on controller')
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='sahara-all')
LOGGER.debug('Run all sanity and smoke tests')
@ -201,8 +202,9 @@ class SaharaHA(TestBasic):
LOGGER.debug('Verify Sahara service on all controllers')
for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name(slave),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='sahara-all')
LOGGER.debug('Run all sanity and smoke tests')
@ -301,8 +303,9 @@ class MuranoHAOneController(TestBasic):
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=5, networks_count=2, timeout=300)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='murano-api')
common_func = Common(self.fuel_web.get_public_vip(cluster_id),
@ -424,8 +427,9 @@ class MuranoHA(TestBasic):
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=13, networks_count=2, timeout=300)
for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name(slave),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='murano-api')
common_func = Common(cluster_vip, data['user'], data['password'],
@ -588,12 +592,14 @@ class CeilometerHAOneControllerMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
_ip = self.fuel_web.get_nailgun_node_by_name("slave-03")['ip']
partitions = checkers.get_mongo_partitions(
self.env.get_ssh_to_remote_by_name("slave-03"), "vda5")
self.env.d_env.get_ssh_to_remote("slave-03"), "vda5")
assert_equal(partitions[0].rstrip(), mongo_disk_gb,
'Mongo size {0} before deployment is not equal'
' to size after {1}'.format(mongo_disk_gb, partitions))
@ -639,8 +645,9 @@ class CeilometerHAOneControllerMongo(OSTFCeilometerHelper):
)
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
self.run_tests(cluster_id)
@ -693,8 +700,9 @@ class CeilometerHAMongo(OSTFCeilometerHelper):
)
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
self.run_tests(cluster_id)
@ -740,8 +748,9 @@ class CeilometerHAMongo(OSTFCeilometerHelper):
)
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
self.run_tests(cluster_id)
@ -803,12 +812,14 @@ class HeatHAOneController(TestBasic):
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=5, networks_count=2, timeout=300)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='heat-api', count=3)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
LOGGER.debug('Run Heat OSTF platform tests')
@ -882,12 +893,14 @@ class HeatHAOneController(TestBasic):
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='heat-api', count=3)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name("slave-01"),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
LOGGER.debug('Run Heat OSTF platform tests')
@ -973,12 +986,14 @@ class HeatHA(TestBasic):
os_conn, smiles_count=13, networks_count=2, timeout=300)
for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name(slave),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='heat-api', count=3)
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name(slave),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
LOGGER.debug('Run Heat OSTF platform tests')

View File

@ -78,24 +78,26 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
self.env.revert_snapshot("ceph_multinode_compact")
cluster_id = self.fuel_web.get_last_created_cluster()
remote = self.env.get_ssh_to_remote_by_name('slave-01')
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
expected_kernel = self.get_slave_kernel(remote)
checkers.upload_tarball(self.env.get_admin_remote(),
checkers.upload_tarball(self.env.d_env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
checkers.untar(self.env.d_env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
checkers.run_script(self.env.get_admin_remote(), '/var',
checkers.run_script(self.env.d_env.get_admin_remote(), '/var',
'upgrade.sh', password=
hlp_data.KEYSTONE_CREDS['password'])
checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
phrase='*** UPGRADE DONE SUCCESSFULLY')
checkers.check_upgraded_containers(self.env.get_admin_remote(),
checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_FROM,
hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
@ -117,7 +119,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
os_conn, smiles_count=10, networks_count=1, timeout=300)
self.fuel_web.run_ostf(cluster_id=cluster_id)
if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
remote = self.env.get_ssh_to_remote_by_name('slave-04')
_ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
kernel = self.get_slave_kernel(remote)
checkers.check_kernel(kernel, expected_kernel)
create_diagnostic_snapshot(
@ -148,21 +151,21 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
self.env.revert_snapshot("ceph_multinode_compact")
cluster_id = self.fuel_web.get_last_created_cluster()
checkers.upload_tarball(self.env.get_admin_remote(),
checkers.upload_tarball(self.env.d_env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
checkers.untar(self.env.d_env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
checkers.run_script(self.env.get_admin_remote(), '/var',
checkers.run_script(self.env.d_env.get_admin_remote(), '/var',
'upgrade.sh', password=
hlp_data.KEYSTONE_CREDS['password'])
checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
phrase='*** UPGRADE DONE SUCCESSFULLY')
checkers.check_upgraded_containers(self.env.get_admin_remote(),
checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_FROM,
hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
@ -204,21 +207,21 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
cluster_id = self.fuel_web.get_last_created_cluster()
available_releases_before = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
checkers.upload_tarball(self.env.get_admin_remote(),
checkers.upload_tarball(self.env.d_env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
checkers.untar(self.env.d_env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
checkers.run_script(self.env.get_admin_remote(), '/var',
checkers.run_script(self.env.d_env.get_admin_remote(), '/var',
'upgrade.sh', password=
hlp_data.KEYSTONE_CREDS['password'])
checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
phrase='*** UPGRADE DONE SUCCESSFULLY')
checkers.check_upgraded_containers(self.env.get_admin_remote(),
checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_FROM,
hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
@ -264,8 +267,10 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=8, timeout=300)
if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
remote = self.env.get_ssh_to_remote_by_name('slave-06')
self.check_upgraded_kernel(self.env.get_admin_remote(), remote)
_ip = self.fuel_web.get_nailgun_node_by_name('slave-06')['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
self.check_upgraded_kernel(
self.env.d_env.get_admin_remote(), remote)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
@ -295,21 +300,21 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
cluster_id = self.fuel_web.get_last_created_cluster()
available_releases_before = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
checkers.upload_tarball(self.env.get_admin_remote(),
checkers.upload_tarball(self.env.d_env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
checkers.untar(self.env.d_env.get_admin_remote(),
os.path.basename(hlp_data.TARBALL_PATH),
'/var')
checkers.run_script(self.env.get_admin_remote(), '/var',
checkers.run_script(self.env.d_env.get_admin_remote(), '/var',
'upgrade.sh', password=
hlp_data.KEYSTONE_CREDS['password'])
checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
phrase='*** UPGRADE DONE SUCCESSFULLY')
checkers.check_upgraded_containers(self.env.get_admin_remote(),
checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_FROM,
hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
@ -349,8 +354,10 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
remote = self.env.get_ssh_to_remote_by_name('slave-04')
self.check_upgraded_kernel(self.env.get_admin_remote(), remote)
_ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
self.check_upgraded_kernel(
self.env.d_env.get_admin_remote(), remote)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_ha_after_upgrade")
@ -378,21 +385,23 @@ class RollbackFuelMaster(base_test_data.TestBasic):
self.env.revert_snapshot("deploy_neutron_gre_ha")
cluster_id = self.fuel_web.get_last_created_cluster()
checkers.upload_tarball(self.env.get_admin_remote(),
checkers.upload_tarball(self.env.d_env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
checkers.untar(self.env.d_env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh',
checkers.run_script(self.env.d_env.get_admin_remote(),
'/var',
'upgrade.sh',
password=
hlp_data.KEYSTONE_CREDS['password'],
rollback=True, exit_code=255)
checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
checkers.check_upgraded_containers(self.env.get_admin_remote(),
checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000)
checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_TO,
hlp_data.UPGRADE_FUEL_FROM)
logger.debug("all containers are ok")
@ -435,26 +444,30 @@ class RollbackFuelMaster(base_test_data.TestBasic):
self.env.revert_snapshot("deploy_neutron_gre")
cluster_id = self.fuel_web.get_last_created_cluster()
remote = self.env.get_ssh_to_remote_by_name('slave-01')
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote)
checkers.upload_tarball(self.env.get_admin_remote(),
checkers.upload_tarball(self.env.d_env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
checkers.untar(self.env.d_env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
#we expect 255 exit code here because upgrade failed
# and exit status is 255
checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh',
checkers.run_script(self.env.d_env.get_admin_remote(),
'/var',
'upgrade.sh',
password=
hlp_data.KEYSTONE_CREDS['password'],
rollback=True, exit_code=255)
checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
checkers.check_upgraded_containers(self.env.get_admin_remote(),
checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000)
checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_TO,
hlp_data.UPGRADE_FUEL_FROM)
logger.debug("all containers are ok")
@ -474,7 +487,8 @@ class RollbackFuelMaster(base_test_data.TestBasic):
)
self.fuel_web.deploy_cluster_wait(cluster_id)
if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
remote = self.env.get_ssh_to_remote_by_name('slave-04')
_ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
kernel = UpgradeFuelMaster.get_slave_kernel(remote)
checkers.check_kernel(kernel, expected_kernel)
self.fuel_web.run_ostf(cluster_id=cluster_id)

View File

@ -164,7 +164,8 @@ class VcenterDeploy(TestBasic):
ips_for_check.append(net_prefs['addr'])
# Wait until vm is booted
ssh = self.env.get_ssh_to_remote_by_name("slave-01")
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
ssh = self.env.d_env.get_ssh_to_remote(_ip)
wait(
lambda: not ssh.execute('curl -s -m1 http://' + ips_for_check[0] +
':22 |grep -iq "[a-z]"')['exit_code'],
@ -320,8 +321,9 @@ class VcenterDeploy(TestBasic):
# Verify ceilometer API
for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
checkers.verify_service(
self.env.get_ssh_to_remote_by_name(slave),
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
# Create list with ceilometer tests: test_classes

View File

@ -52,7 +52,8 @@ class HAOneControllerZabbix(TestBasic):
"""
self.env.revert_snapshot("ready_with_3_slaves")
node_ssh = self.env.get_ssh_to_remote(self.fuel_web.admin_node_ip)
node_ssh = self.env.d_env.get_ssh_to_remote(
self.fuel_web.admin_node_ip)
# Turn on experimental mode
checkers.check_enable_experimental_mode(

View File

@ -68,7 +68,8 @@ class TestPatch(TestBasic):
self.env.revert_snapshot(self.snapshot)
logger.info("Start upload upgrade archive")
node_ssh = self.env.get_ssh_to_remote(self.fuel_web.admin_node_ip)
node_ssh = self.env.d_env.get_ssh_to_remote(
self.fuel_web.admin_node_ip)
# 2. Upload tarball
checkers.upload_tarball(

View File

@ -154,8 +154,9 @@ class DeployHAOneControllerMasterNodeFail(base_test_case.TestBasic):
self.env.revert_snapshot("deploy_ha_flat_dns_ntp")
remote = self.env.get_admin_remote()
remote_slave = self.env.get_ssh_to_remote_by_name('slave-01')
remote = self.env.d_env.get_admin_remote()
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
remote_slave = self.env.d_env.get_ssh_to_remote(_ip)
remote.execute("dockerctl shell cobbler killall dnsmasq")
checkers.external_dns_check(remote_slave)
@ -175,8 +176,9 @@ class DeployHAOneControllerMasterNodeFail(base_test_case.TestBasic):
self.env.revert_snapshot("deploy_ha_flat_dns_ntp")
cluster_id = self.fuel_web.get_last_created_cluster()
remote = self.env.get_admin_remote()
remote_slave = self.env.get_ssh_to_remote_by_name('slave-01')
remote = self.env.d_env.get_admin_remote()
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
remote_slave = self.env.d_env.get_ssh_to_remote(_ip)
vip = self.fuel_web.get_public_vip(cluster_id)
remote.execute("pkill -9 ntpd")
checkers.external_ntp_check(remote_slave, vip)

View File

@ -153,7 +153,8 @@ class TestNeutronFailover(base_test_case.TestBasic):
net_id = os_conn.get_network('net04')['id']
devops_node = self.get_node_with_dhcp(self, os_conn, net_id)
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
_ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
dhcp_namespace = ''.join(remote.execute('ip netns | grep {0}'.format(
net_id))['stdout']).rstrip()
@ -169,8 +170,9 @@ class TestNeutronFailover(base_test_case.TestBasic):
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
node_with_l3 = os_conn.get_l3_agent_hosts(router_id)[0]
new_devops = self.get_node_with_l3(self, node_with_l3)
new_remote = self.env.get_ssh_to_remote_by_name(new_devops.name)
self.get_node_with_l3(self, node_with_l3)
_ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
new_remote = self.env.d_env.get_ssh_to_remote(_ip)
new_remote.execute("pcs resource ban p_neutron-l3-agent {0}".format(
node_with_l3))
@ -218,7 +220,8 @@ class TestNeutronFailover(base_test_case.TestBasic):
net_id = os_conn.get_network('net04')['id']
devops_node = self.get_node_with_dhcp(self, os_conn, net_id)
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
_ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
dhcp_namespace = ''.join(remote.execute('ip netns | grep {0}'.format(
net_id))['stdout']).rstrip()
@ -277,7 +280,8 @@ class TestNeutronFailover(base_test_case.TestBasic):
net_id = os_conn.get_network('net04')['id']
devops_node = self.get_node_with_dhcp(self, os_conn, net_id)
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
_ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
dhcp_namespace = ''.join(remote.execute('ip netns | grep {0}'.format(
net_id))['stdout']).rstrip()
@ -352,7 +356,8 @@ class TestNeutronFailover(base_test_case.TestBasic):
instance = os_conn.create_server_for_migration(neutron=True)
floating_ip = os_conn.assign_floating_ip(instance)
logger.debug("instance floating ip is {0}".format(floating_ip.ip))
remote = self.env.get_ssh_to_remote_by_name('slave-01')
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
mtu_cmd = r"cat /sys/class/net/$(ip r g {0} |" \
r" sed -rn" \
r" 's/.*dev\s+(\S+)\s.*/\1/p')/mtu".format(floating_ip.ip)