Code style changes
Mass fix: 1. shadow builtins in locals 2. binary operators on new line 3. import class as lowercase (not readable type of object in namespace) API not changed. Change-Id: Ia171e466b502d95aa96ff4801eb37d528d659b64
This commit is contained in:
parent
7f99a7ebd2
commit
81f1c0d739
|
@ -54,8 +54,8 @@ def check_cinder_status(remote):
|
|||
if result['exit_code'] == 0:
|
||||
return all(' up ' in x.split('enabled')[1]
|
||||
for x in cinder_services.split('\n')
|
||||
if 'cinder' in x and 'enabled' in x
|
||||
and len(x.split('enabled')))
|
||||
if 'cinder' in x and 'enabled' in x and
|
||||
len(x.split('enabled')))
|
||||
return False
|
||||
|
||||
|
||||
|
|
|
@ -21,10 +21,10 @@ from fuelweb_test.settings import PATH_TO_CERT
|
|||
|
||||
|
||||
from cinderclient import client as cinderclient
|
||||
from glanceclient.v1 import Client as glanceclient
|
||||
from keystoneclient.v2_0 import Client as keystoneclient
|
||||
from glanceclient.v1 import Client as GlanceClient
|
||||
from keystoneclient.v2_0 import Client as KeystoneClient
|
||||
from keystoneclient.exceptions import ClientException
|
||||
from novaclient.v1_1 import Client as novaclient
|
||||
from novaclient.v1_1 import Client as NovaClient
|
||||
import neutronclient.v2_0.client as neutronclient
|
||||
from proboscis.asserts import assert_equal
|
||||
|
||||
|
@ -43,7 +43,7 @@ class Common(object):
|
|||
path_to_cert = PATH_TO_CERT
|
||||
|
||||
LOGGER.debug('Auth URL is {0}'.format(auth_url))
|
||||
self.nova = novaclient(username=user,
|
||||
self.nova = NovaClient(username=user,
|
||||
api_key=password,
|
||||
project_id=tenant,
|
||||
auth_url=auth_url,
|
||||
|
@ -69,9 +69,9 @@ class Common(object):
|
|||
LOGGER.debug('Token is {0}'.format(token))
|
||||
glance_endpoint = self.keystone.service_catalog.url_for(
|
||||
service_type='image', endpoint_type='publicURL')
|
||||
LOGGER.debug('Glance endpoind is {0}'.format(glance_endpoint))
|
||||
LOGGER.debug('Glance endpoint is {0}'.format(glance_endpoint))
|
||||
|
||||
self.glance = glanceclient(endpoint=glance_endpoint,
|
||||
self.glance = GlanceClient(endpoint=glance_endpoint,
|
||||
token=token,
|
||||
cacert=path_to_cert)
|
||||
|
||||
|
@ -171,14 +171,14 @@ class Common(object):
|
|||
for i in range(retries):
|
||||
try:
|
||||
if ca_cert:
|
||||
keystone = keystoneclient(username=username,
|
||||
keystone = KeystoneClient(username=username,
|
||||
password=password,
|
||||
tenant_name=tenant_name,
|
||||
auth_url=auth_url,
|
||||
cacert=ca_cert)
|
||||
|
||||
else:
|
||||
keystone = keystoneclient(username=username,
|
||||
keystone = KeystoneClient(username=username,
|
||||
password=password,
|
||||
tenant_name=tenant_name,
|
||||
auth_url=auth_url)
|
||||
|
|
|
@ -86,8 +86,7 @@ def log_snapshot_after_test(func):
|
|||
description = "Failed in method '%s'." % func.__name__
|
||||
if args[0].env is not None:
|
||||
try:
|
||||
create_diagnostic_snapshot(args[0].env,
|
||||
"fail", name)
|
||||
create_diagnostic_snapshot(args[0].env, "fail", name)
|
||||
except:
|
||||
logger.error("Fetching of diagnostic snapshot failed: {0}".
|
||||
format(traceback.format_exc()))
|
||||
|
@ -342,9 +341,7 @@ def update_ostf(func):
|
|||
|
||||
def create_diagnostic_snapshot(env, status, name=""):
|
||||
task = env.fuel_web.task_wait(env.fuel_web.client.generate_logs(), 60 * 10)
|
||||
url = "http://{}:8000{}".format(
|
||||
env.get_admin_node_ip(), task['message']
|
||||
)
|
||||
url = "http://{}:8000{}".format(env.get_admin_node_ip(), task['message'])
|
||||
log_file_name = '{status}_{name}-{basename}'.format(
|
||||
status=status,
|
||||
name=name,
|
||||
|
|
|
@ -16,7 +16,7 @@ import json
|
|||
import traceback
|
||||
import urllib2
|
||||
|
||||
from keystoneclient.v2_0 import Client as keystoneclient
|
||||
from keystoneclient.v2_0 import Client as KeystoneClient
|
||||
from keystoneclient import exceptions
|
||||
from fuelweb_test import logger
|
||||
|
||||
|
@ -36,7 +36,7 @@ class HTTPClient(object):
|
|||
try:
|
||||
logger.info('Initialize keystoneclient with url %s',
|
||||
self.keystone_url)
|
||||
self.keystone = keystoneclient(
|
||||
self.keystone = KeystoneClient(
|
||||
auth_url=self.keystone_url, **self.creds)
|
||||
# it depends on keystone version, some versions doing auth
|
||||
# explicitly some don't, but we are making it explicitly always
|
||||
|
|
|
@ -129,8 +129,9 @@ class OpenStackActions(common.Common):
|
|||
# Find external net id for tenant
|
||||
nets = self.neutron.list_networks()['networks']
|
||||
err_msg = "Active external network not found in nets:{}"
|
||||
ext_net_ids = [net['id'] for net in nets if net['router:external']
|
||||
and net['status'] == "ACTIVE"]
|
||||
ext_net_ids = [
|
||||
net['id'] for net in nets
|
||||
if net['router:external'] and net['status'] == "ACTIVE"]
|
||||
asserts.assert_true(ext_net_ids, err_msg.format(nets))
|
||||
net_id = ext_net_ids[0]
|
||||
# Find instance port
|
||||
|
@ -262,14 +263,13 @@ class OpenStackActions(common.Common):
|
|||
|
||||
def execute_through_host(self, ssh, vm_host, cmd, creds=()):
|
||||
logger.debug("Making intermediate transport")
|
||||
interm_transp = ssh._ssh.get_transport()
|
||||
intermediate_transport = ssh._ssh.get_transport()
|
||||
|
||||
logger.debug("Opening channel to VM")
|
||||
interm_chan = interm_transp.open_channel('direct-tcpip',
|
||||
(vm_host, 22),
|
||||
(ssh.host, 0))
|
||||
intermediate_channel = intermediate_transport.open_channel(
|
||||
'direct-tcpip', (vm_host, 22), (ssh.host, 0))
|
||||
logger.debug("Opening paramiko transport")
|
||||
transport = paramiko.Transport(interm_chan)
|
||||
transport = paramiko.Transport(intermediate_channel)
|
||||
logger.debug("Starting client")
|
||||
transport.start_client()
|
||||
logger.info("Passing authentication to VM: {}".format(creds))
|
||||
|
@ -453,8 +453,8 @@ class OpenStackActions(common.Common):
|
|||
def get_neutron_dhcp_ports(self, net_id):
|
||||
ports = self.neutron.list_ports()['ports']
|
||||
network_ports = [x for x in ports
|
||||
if x['device_owner'] == 'network:dhcp'
|
||||
and x['network_id'] == net_id]
|
||||
if x['device_owner'] == 'network:dhcp' and
|
||||
x['network_id'] == net_id]
|
||||
return network_ports
|
||||
|
||||
def create_pool(self, pool_name):
|
||||
|
|
|
@ -126,8 +126,8 @@ def get_node_packages(remote, func_name, node_role,
|
|||
logger.debug("node packages are {0}".format(node_packages))
|
||||
packages_dict[func_name][node_role] = node_packages\
|
||||
if node_role not in packages_dict[func_name].keys()\
|
||||
else list(set(packages_dict[func_name][node_role])
|
||||
| set(node_packages))
|
||||
else list(set(packages_dict[func_name][node_role]) |
|
||||
set(node_packages))
|
||||
return packages_dict
|
||||
|
||||
|
||||
|
@ -237,7 +237,7 @@ class TimeStat(object):
|
|||
self.begin_time = time.time()
|
||||
return self
|
||||
|
||||
def __exit__(self, exp_type, exp_value, trcback):
|
||||
def __exit__(self, exc_type, exc_value, exc_tb):
|
||||
self.end_time = time.time()
|
||||
self.total_time = self.end_time - self.begin_time
|
||||
|
||||
|
@ -395,7 +395,7 @@ def run_on_remote_get_results(remote, cmd, clear=False, err_msg=None,
|
|||
|
||||
def json_deserialize(json_string):
|
||||
"""
|
||||
Deserealize json_string and return object
|
||||
Deserialize json_string and return object
|
||||
|
||||
:param json_string: string or list with json
|
||||
:return: obj
|
||||
|
@ -574,7 +574,7 @@ class RunLimit(object):
|
|||
signal.signal(signal.SIGALRM, self.handle_timeout)
|
||||
signal.alarm(self.seconds)
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
def __exit__(self, exc_type, value, traceback):
|
||||
signal.alarm(0)
|
||||
|
||||
|
||||
|
|
|
@ -277,8 +277,8 @@ class FuelWebClient(object):
|
|||
def assert_release_role_present(self, release_name, role_name):
|
||||
logger.info('Assert role %s is available in release %s',
|
||||
role_name, release_name)
|
||||
id = self.assert_release_state(release_name)
|
||||
release_data = self.client.get_releases_details(release_id=id)
|
||||
release_id = self.assert_release_state(release_name)
|
||||
release_data = self.client.get_releases_details(release_id=release_id)
|
||||
assert_equal(
|
||||
True, role_name in release_data['roles'],
|
||||
message='There is no {0} role in release id {1}'.format(
|
||||
|
@ -377,10 +377,9 @@ class FuelWebClient(object):
|
|||
@logwrap
|
||||
def assert_pacemaker(self, ctrl_node, online_nodes, offline_nodes):
|
||||
logger.info('Assert pacemaker status at devops node %s', ctrl_node)
|
||||
fqdn_names = lambda nodes: sorted([self.fqdn(n) for n in nodes])
|
||||
|
||||
online = fqdn_names(online_nodes)
|
||||
offline = fqdn_names(offline_nodes)
|
||||
online = sorted([self.fqdn(n) for n in online_nodes])
|
||||
offline = sorted([self.fqdn(n) for n in offline_nodes])
|
||||
try:
|
||||
wait(lambda: self.get_pcm_nodes(ctrl_node)['Online'] == online and
|
||||
self.get_pcm_nodes(ctrl_node)['Offline'] == offline,
|
||||
|
@ -975,8 +974,9 @@ class FuelWebClient(object):
|
|||
@logwrap
|
||||
def is_node_discovered(self, nailgun_node):
|
||||
return any(
|
||||
map(lambda node: node['mac'] == nailgun_node['mac']
|
||||
and node['status'] == 'discover', self.client.list_nodes()))
|
||||
map(lambda node:
|
||||
node['mac'] == nailgun_node['mac'] and
|
||||
node['status'] == 'discover', self.client.list_nodes()))
|
||||
|
||||
@logwrap
|
||||
def run_network_verify(self, cluster_id):
|
||||
|
@ -1811,11 +1811,11 @@ class FuelWebClient(object):
|
|||
passed_count = []
|
||||
failed_count = []
|
||||
test_name_to_run = test_name or OSTF_TEST_NAME
|
||||
retr = test_retries or OSTF_TEST_RETRIES_COUNT
|
||||
retries = test_retries or OSTF_TEST_RETRIES_COUNT
|
||||
test_path = map_ostf.OSTF_TEST_MAPPING.get(test_name_to_run)
|
||||
logger.info('Test path is {0}'.format(test_path))
|
||||
|
||||
for i in range(0, retr):
|
||||
for i in range(0, retries):
|
||||
result = self.run_single_ostf_test(
|
||||
cluster_id=cluster_id, test_sets=['smoke', 'sanity'],
|
||||
test_name=test_path,
|
||||
|
@ -2273,18 +2273,18 @@ class FuelWebClient(object):
|
|||
|
||||
logger.debug("ids are {}".format(ids))
|
||||
assert_true(ids, "osd ids for {} weren't found".format(hostname))
|
||||
for id in ids:
|
||||
remote_ceph.execute("ceph osd out {}".format(id))
|
||||
for osd_id in ids:
|
||||
remote_ceph.execute("ceph osd out {}".format(osd_id))
|
||||
wait(lambda: ceph.is_health_ok(remote_ceph),
|
||||
interval=30, timeout=10 * 60)
|
||||
for id in ids:
|
||||
for osd_id in ids:
|
||||
if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
|
||||
remote_ceph.execute("stop ceph-osd id={}".format(id))
|
||||
remote_ceph.execute("stop ceph-osd id={}".format(osd_id))
|
||||
else:
|
||||
remote_ceph.execute("service ceph stop osd.{}".format(id))
|
||||
remote_ceph.execute("ceph osd crush remove osd.{}".format(id))
|
||||
remote_ceph.execute("ceph auth del osd.{}".format(id))
|
||||
remote_ceph.execute("ceph osd rm osd.{}".format(id))
|
||||
remote_ceph.execute("service ceph stop osd.{}".format(osd_id))
|
||||
remote_ceph.execute("ceph osd crush remove osd.{}".format(osd_id))
|
||||
remote_ceph.execute("ceph auth del osd.{}".format(osd_id))
|
||||
remote_ceph.execute("ceph osd rm osd.{}".format(osd_id))
|
||||
# remove ceph node from crush map
|
||||
remote_ceph.execute("ceph osd crush remove {}".format(hostname))
|
||||
|
||||
|
|
|
@ -252,9 +252,9 @@ def get_tests_results(systest_build, os):
|
|||
test_class['duration'] += float(one['duration'])
|
||||
if one['status'].lower() in ('failed', 'error'):
|
||||
test_class["failCount"] += 1
|
||||
if one['status'].lower() in ('passed'):
|
||||
if one['status'].lower() == 'passed':
|
||||
test_class["passCount"] += 1
|
||||
if one['status'].lower() in ('skipped'):
|
||||
if one['status'].lower() == 'skipped':
|
||||
test_class["skipCount"] += 1
|
||||
|
||||
for klass in test_classes:
|
||||
|
@ -273,9 +273,10 @@ def get_tests_results(systest_build, os):
|
|||
duration='{0}s'.format(int(test['duration']) + 1),
|
||||
url='{0}testReport/(root)/{1}/'.format(test_build.url,
|
||||
test['name']),
|
||||
version='_'.join([test_build.build_data["id"]] +
|
||||
(test_build.build_data["description"]
|
||||
or test['name']).split()),
|
||||
version='_'.join(
|
||||
[test_build.build_data["id"]] + (
|
||||
test_build.build_data["description"] or
|
||||
test['name']).split()),
|
||||
description=test_build.build_data["description"] or
|
||||
test['name'],
|
||||
comments=test['skippedMessage']
|
||||
|
@ -318,9 +319,10 @@ def get_tests_results(systest_build, os):
|
|||
duration='{0}s'.format(int(test_duration) + 1),
|
||||
url='{0}testReport/(root)/{1}/'.format(test_build.url,
|
||||
test_name),
|
||||
version='_'.join([test_build.build_data["id"]] +
|
||||
(test_build.build_data["description"]
|
||||
or test_name).split()),
|
||||
version='_'.join(
|
||||
[test_build.build_data["id"]] + (
|
||||
test_build.build_data["description"] or
|
||||
test_name).split()),
|
||||
description=test_build.build_data["description"] or
|
||||
test_name,
|
||||
comments=test_comments,
|
||||
|
@ -333,8 +335,8 @@ def get_tests_results(systest_build, os):
|
|||
def publish_results(project, milestone_id, test_plan,
|
||||
suite_id, config_id, results):
|
||||
test_run_ids = [run['id'] for entry in test_plan['entries']
|
||||
for run in entry['runs'] if suite_id == run['suite_id']
|
||||
and config_id in run['config_ids']]
|
||||
for run in entry['runs'] if suite_id == run['suite_id'] and
|
||||
config_id in run['config_ids']]
|
||||
logger.debug('Looking for previous tests runs on "{0}" using tests suite '
|
||||
'"{1}"...'.format(project.get_config(config_id)['name'],
|
||||
project.get_suite(suite_id)['name']))
|
||||
|
|
|
@ -309,8 +309,8 @@ class NeutronVlanCephMongo(TestBasic):
|
|||
with self.fuel_web.get_ssh_for_node(node) as remote:
|
||||
gd.run_check_from_task(
|
||||
remote=remote,
|
||||
path=self.get_post_test(tasks,
|
||||
'tope-role-mongo')[0]['cmd'])
|
||||
path=self.get_post_test(
|
||||
tasks, 'top-role-mongo')[0]['cmd'])
|
||||
|
||||
self.env.make_snapshot('step_3_run_top_role_mongo_single')
|
||||
|
||||
|
@ -1439,13 +1439,13 @@ class NeutronVlanCephMongo(TestBasic):
|
|||
cluster_id=cluster_id,
|
||||
node_ids=nodes_ids)
|
||||
|
||||
contr_ids = [n['id'] for n in
|
||||
self.fuel_web.client.list_cluster_nodes(cluster_id)
|
||||
if 'controller' in n['roles']]
|
||||
controller_ids = [n['id'] for n in
|
||||
self.fuel_web.client.list_cluster_nodes(cluster_id)
|
||||
if 'controller' in n['roles']]
|
||||
|
||||
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
|
||||
cluster_id, data=data,
|
||||
node_id=str(contr_ids).strip('[]'))
|
||||
node_id=str(controller_ids).strip('[]'))
|
||||
logger.debug('res info is {0}'.format(res))
|
||||
|
||||
self.fuel_web.assert_task_success(task=res)
|
||||
|
|
|
@ -304,7 +304,7 @@ class BackupRestoreHA(NeutronTunHaBase):
|
|||
with RunLimit(
|
||||
seconds=60 * 10,
|
||||
error_message="'dockerctl restore' "
|
||||
"runned longer then 600 sec"):
|
||||
"ran longer then 600 sec"):
|
||||
self.fuel_web.restore_master(remote)
|
||||
checkers.restore_check_sum(remote)
|
||||
|
||||
|
|
|
@ -491,12 +491,12 @@ class BondingHA(BondingTest):
|
|||
admin_bond_ifaces_latest = ifaces_data_latest[-1]['slaves']
|
||||
assert_equal(len(admin_bond_ifaces), len(admin_bond_ifaces_latest),
|
||||
"Admin interface bond config is inconsistent; "
|
||||
"interface(s) have dissapeared from the bond")
|
||||
"interface(s) have disappeared from the bond")
|
||||
others_bond_ifaces = ifaces_data[-2]['slaves']
|
||||
others_bond_ifaces_latest = ifaces_data_latest[-2]['slaves']
|
||||
assert_equal(len(others_bond_ifaces), len(others_bond_ifaces_latest),
|
||||
"Other network interfaces bond config is inconsistent; "
|
||||
"interface(s) have dissapeared from the bond")
|
||||
"interface(s) have disappeared from the bond")
|
||||
|
||||
self.show_step(9)
|
||||
with self.env.d_env.get_admin_remote() as admin_node:
|
||||
|
|
|
@ -21,14 +21,15 @@ from proboscis import SkipTest
|
|||
|
||||
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
||||
from fuelweb_test.tests import base_test_case as base_test_data
|
||||
from fuelweb_test.tests.test_os_upgrade import TestOSupgrade as upgrade
|
||||
from fuelweb_test.tests.test_os_upgrade import TestOSupgrade
|
||||
|
||||
|
||||
@test(groups=["clone_env_for_os_upgrade", "os_upgrade"])
|
||||
class TestCloneEnv(base_test_data.TestBasic):
|
||||
|
||||
@test(depends_on=[upgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_environment"])
|
||||
@test(
|
||||
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_environment"])
|
||||
@log_snapshot_after_test
|
||||
def test_clone_environment(self):
|
||||
"""Test clone environment
|
||||
|
@ -113,8 +114,9 @@ class TestCloneEnv(base_test_data.TestBasic):
|
|||
assert_equal(old_network["vlan_start"],
|
||||
network["vlan_start"])
|
||||
|
||||
@test(depends_on=[upgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_nonexistent_cluster"])
|
||||
@test(
|
||||
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_nonexistent_cluster"])
|
||||
@log_snapshot_after_test
|
||||
def test_clone_nonexistent_cluster(self):
|
||||
"""Test clone environment with nonexistent cluster id as argument
|
||||
|
@ -141,8 +143,9 @@ class TestCloneEnv(base_test_data.TestBasic):
|
|||
else:
|
||||
fail("Doesn't raise needed error")
|
||||
|
||||
@test(depends_on=[upgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_wo_name_in_body"])
|
||||
@test(
|
||||
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_wo_name_in_body"])
|
||||
@log_snapshot_after_test
|
||||
def test_clone_wo_name_in_body(self):
|
||||
"""Test clone without name in POST body
|
||||
|
@ -174,8 +177,9 @@ class TestCloneEnv(base_test_data.TestBasic):
|
|||
else:
|
||||
fail("Doesn't raise needed error")
|
||||
|
||||
@test(depends_on=[upgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_wo_release_id_in_body"])
|
||||
@test(
|
||||
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_wo_release_id_in_body"])
|
||||
@log_snapshot_after_test
|
||||
def test_clone_wo_release_id_in_body(self):
|
||||
"""Test clone without release id in POST body
|
||||
|
@ -204,8 +208,9 @@ class TestCloneEnv(base_test_data.TestBasic):
|
|||
else:
|
||||
fail("Doesn't raise needed error")
|
||||
|
||||
@test(depends_on=[upgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_with_empty_body"])
|
||||
@test(
|
||||
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_with_empty_body"])
|
||||
@log_snapshot_after_test
|
||||
def test_clone_with_empty_body(self):
|
||||
"""Test clone with empty body
|
||||
|
@ -230,8 +235,9 @@ class TestCloneEnv(base_test_data.TestBasic):
|
|||
else:
|
||||
fail("Doesn't raise needed error")
|
||||
|
||||
@test(depends_on=[upgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_with_nonexistent_release_id"])
|
||||
@test(
|
||||
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_with_nonexistent_release_id"])
|
||||
@log_snapshot_after_test
|
||||
def test_clone_with_nonexistent_release_id(self):
|
||||
"""Test clone with nonexistent release id in POST body
|
||||
|
@ -262,8 +268,9 @@ class TestCloneEnv(base_test_data.TestBasic):
|
|||
else:
|
||||
fail("Doesn't raise needed error")
|
||||
|
||||
@test(depends_on=[upgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_with_incorrect_release_id"])
|
||||
@test(
|
||||
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_clone_with_incorrect_release_id"])
|
||||
@log_snapshot_after_test
|
||||
def test_clone_with_incorrect_release_id(self):
|
||||
"""Test clone with incorrect release id in POST body
|
||||
|
@ -294,8 +301,9 @@ class TestCloneEnv(base_test_data.TestBasic):
|
|||
else:
|
||||
fail("Doesn't raise needed error")
|
||||
|
||||
@test(depends_on=[upgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_double_clone_environment"])
|
||||
@test(
|
||||
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["test_double_clone_environment"])
|
||||
@log_snapshot_after_test
|
||||
def test_double_clone_environment(self):
|
||||
"""Test double clone environment
|
||||
|
|
|
@ -91,11 +91,11 @@ class TestNetworkTemplatesBase(TestBasic):
|
|||
network_types = tmpl['templates_for_node_role'][role]
|
||||
for network_type in network_types:
|
||||
endpoints.update(tmpl['network_scheme'][network_type]['endpoints'])
|
||||
for type in tmpl['network_scheme']:
|
||||
for net_role in tmpl['network_scheme'][type]['roles']:
|
||||
for scheme_type in tmpl['network_scheme']:
|
||||
for net_role in tmpl['network_scheme'][scheme_type]['roles']:
|
||||
if net_role in skip_net_roles:
|
||||
endpoints.discard(
|
||||
tmpl['network_scheme'][type]['roles'][net_role])
|
||||
tmpl['network_scheme'][scheme_type]['roles'][net_role])
|
||||
for net in tmpl['network_assignments']:
|
||||
if tmpl['network_assignments'][net]['ep'] in endpoints:
|
||||
networks.add(net)
|
||||
|
|
|
@ -278,7 +278,7 @@ class TestHaNeutronScalability(TestBasic):
|
|||
else:
|
||||
checkers.check_swift_ring(remote)
|
||||
|
||||
def _check_pacemarker(devops_nodes):
|
||||
def _check_pacemaker(devops_nodes):
|
||||
for devops_node in devops_nodes:
|
||||
with QuietLogger():
|
||||
self.fuel_web.assert_pacemaker(
|
||||
|
@ -333,7 +333,7 @@ class TestHaNeutronScalability(TestBasic):
|
|||
|
||||
logger.info("STEP6: Deploy 3 ctrl node cluster has finished")
|
||||
controllers = ['slave-01', 'slave-02', 'slave-03']
|
||||
_check_pacemarker(self.env.d_env.get_nodes(name__in=controllers))
|
||||
_check_pacemaker(self.env.d_env.get_nodes(name__in=controllers))
|
||||
|
||||
primary_node_s3 = self.fuel_web.get_nailgun_primary_node(
|
||||
self.env.d_env.nodes().slaves[0])
|
||||
|
@ -364,7 +364,7 @@ class TestHaNeutronScalability(TestBasic):
|
|||
logger.info("STEP10: Deploy 5 ctrl node cluster has finished")
|
||||
controllers = ['slave-01', 'slave-02', 'slave-03', 'slave-04',
|
||||
'slave-05']
|
||||
_check_pacemarker(self.env.d_env.get_nodes(name__in=controllers))
|
||||
_check_pacemaker(self.env.d_env.get_nodes(name__in=controllers))
|
||||
|
||||
primary_node_s9 = self.fuel_web.get_nailgun_primary_node(
|
||||
self.env.d_env.nodes().slaves[0])
|
||||
|
@ -401,7 +401,7 @@ class TestHaNeutronScalability(TestBasic):
|
|||
cluster_id, ['controller'])
|
||||
devops_nodes = [self.fuel_web.get_devops_node_by_nailgun_node(node)
|
||||
for node in nodes]
|
||||
_check_pacemarker(devops_nodes)
|
||||
_check_pacemaker(devops_nodes)
|
||||
|
||||
logger.info("STEP13-14: Scale down happened. "
|
||||
"3 controller should be now")
|
||||
|
|
|
@ -21,14 +21,15 @@ from proboscis import SkipTest
|
|||
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
||||
|
||||
from fuelweb_test.tests import base_test_case as base_test_data
|
||||
from fuelweb_test.tests.test_os_upgrade import TestOSupgrade as upgrade
|
||||
from fuelweb_test.tests.test_os_upgrade import TestOSupgrade
|
||||
|
||||
|
||||
@test(groups=["reassign_node_for_os_upgrade", "os_upgrade"])
|
||||
class TestReassignNode(base_test_data.TestBasic):
|
||||
|
||||
@test(depends_on=[upgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["reassign_node_to_cloned_environment"])
|
||||
@test(
|
||||
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["reassign_node_to_cloned_environment"])
|
||||
@log_snapshot_after_test
|
||||
def reassign_node_to_cloned_environment(self):
|
||||
"""Test reassign node
|
||||
|
@ -108,8 +109,9 @@ class TestReassignNode(base_test_data.TestBasic):
|
|||
)
|
||||
self.fuel_web.assert_task_success(task)
|
||||
|
||||
@test(depends_on=[upgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["reassign_node_to_nonexistent_cluster"])
|
||||
@test(
|
||||
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["reassign_node_to_nonexistent_cluster"])
|
||||
@log_snapshot_after_test
|
||||
def reassign_node_to_nonexistent_cluster(self):
|
||||
"""Test reassign node to nonexistent cluster
|
||||
|
@ -144,8 +146,9 @@ class TestReassignNode(base_test_data.TestBasic):
|
|||
"to non-existing"
|
||||
"cluster 123456".format(controller_node["id"]))
|
||||
|
||||
@test(depends_on=[upgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["reassign_node_with_empty_body"])
|
||||
@test(
|
||||
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["reassign_node_with_empty_body"])
|
||||
@log_snapshot_after_test
|
||||
def reassign_node_with_empty_body(self):
|
||||
"""Test reassign node with empty body
|
||||
|
@ -183,8 +186,9 @@ class TestReassignNode(base_test_data.TestBasic):
|
|||
fail("Doesn't raise HTTP 400 error on request"
|
||||
"to reassigning node with empty body")
|
||||
|
||||
@test(depends_on=[upgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["reassign_node_with_incorrect_node"])
|
||||
@test(
|
||||
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["reassign_node_with_incorrect_node"])
|
||||
@log_snapshot_after_test
|
||||
def reassign_node_with_incorrect_node(self):
|
||||
"""Test reassign node with incorrect node in POST body
|
||||
|
@ -226,8 +230,9 @@ class TestReassignNode(base_test_data.TestBasic):
|
|||
fail("Doesn't raise HTTP 400 error on request"
|
||||
"to reassigning node with incorrect node_id")
|
||||
|
||||
@test(depends_on=[upgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["reassign_nonexistent_node_to_cloned_environment"])
|
||||
@test(
|
||||
depends_on=[TestOSupgrade.upgrade_ha_ceph_for_all_ubuntu_neutron_vlan],
|
||||
groups=["reassign_nonexistent_node_to_cloned_environment"])
|
||||
@log_snapshot_after_test
|
||||
def reassign_nonexistent_node_to_cloned_environment(self):
|
||||
"""Test reassign node with nonexistent node in POST body
|
||||
|
|
|
@ -313,8 +313,9 @@ class PatchingMasterTests(TestBasic):
|
|||
|
||||
# Step #2
|
||||
if settings.LATE_ARTIFACTS_JOB_URL:
|
||||
data = urllib2.urlopen(settings.LATE_ARTIFACTS_JOB_URL
|
||||
+ "/artifact/artifacts/artifacts.txt")
|
||||
data = urllib2.urlopen(
|
||||
settings.LATE_ARTIFACTS_JOB_URL +
|
||||
"/artifact/artifacts/artifacts.txt")
|
||||
for package in data:
|
||||
os.system("wget --directory-prefix"
|
||||
" {0} {1}".format(settings.UPDATE_FUEL_PATH,
|
||||
|
|
|
@ -317,7 +317,7 @@ class TestHaFailoverBase(TestBasic):
|
|||
resource)
|
||||
assert_true(len(new_nodes) == 1,
|
||||
"After ip deletion resource should run on a single"
|
||||
" node, but runned on {0}. On {1} attempt".format(
|
||||
" node, but ran on {0}. On {1} attempt".format(
|
||||
[n.name for n in new_nodes],
|
||||
check_counter))
|
||||
logger.info(
|
||||
|
@ -434,8 +434,7 @@ class TestHaFailoverBase(TestBasic):
|
|||
devops_node.name, config))
|
||||
assert_not_equal(
|
||||
re.search("vip__public\s+\(ocf::fuel:ns_IPaddr2\):\s+Started",
|
||||
config)
|
||||
and
|
||||
config) and
|
||||
re.search("Clone Set:\s+clone_ping_vip__public\s+"
|
||||
"\[ping_vip__public\]\s+Started:\s+\[ {0} \]"
|
||||
.format(pcm_nodes), config),
|
||||
|
@ -545,10 +544,12 @@ class TestHaFailoverBase(TestBasic):
|
|||
with self.fuel_web.get_ssh_for_node(devops_node.name) as remote:
|
||||
remote.execute("kill -9 `pgrep nova-compute`")
|
||||
wait(
|
||||
lambda: len(remote.execute('pgrep nova-compute')['stdout'])
|
||||
== 1, timeout=120)
|
||||
assert_true(len(remote.execute('pgrep nova-compute')['stdout'])
|
||||
== 1, 'Nova service was not restarted')
|
||||
lambda:
|
||||
len(remote.execute('pgrep nova-compute')['stdout']) == 1,
|
||||
timeout=120)
|
||||
assert_true(
|
||||
len(remote.execute('pgrep nova-compute')['stdout']) == 1,
|
||||
'Nova service was not restarted')
|
||||
assert_true(len(remote.execute(
|
||||
"grep \"nova-compute.*trying to restart\" "
|
||||
"/var/log/monit.log")['stdout']) > 0,
|
||||
|
|
|
@ -191,8 +191,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
|
|||
|
||||
available_releases_after = self.fuel_web.get_releases_list_for_os(
|
||||
release_name=hlp_data.OPENSTACK_RELEASE)
|
||||
added_release = [id for id in available_releases_after
|
||||
if id not in available_releases_before]
|
||||
added_release = [release_id for release_id in available_releases_after
|
||||
if release_id not in available_releases_before]
|
||||
self.env.bootstrap_nodes(
|
||||
self.env.d_env.nodes().slaves[5:7])
|
||||
cluster_id = self.fuel_web.create_cluster(
|
||||
|
@ -297,8 +297,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
|
|||
# Deploy new cluster
|
||||
available_releases_after = self.fuel_web.get_releases_list_for_os(
|
||||
release_name=hlp_data.OPENSTACK_RELEASE)
|
||||
added_release = [id for id in available_releases_after
|
||||
if id not in available_releases_before]
|
||||
added_release = [release_id for release_id in available_releases_after
|
||||
if release_id not in available_releases_before]
|
||||
|
||||
self.env.bootstrap_nodes(
|
||||
self.env.d_env.nodes().slaves[5:7])
|
||||
|
@ -361,8 +361,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
|
|||
self.fuel_web.assert_nailgun_upgrade_migration()
|
||||
available_releases_after = self.fuel_web.get_releases_list_for_os(
|
||||
release_name=hlp_data.OPENSTACK_RELEASE)
|
||||
added_release = [id for id in available_releases_after
|
||||
if id not in available_releases_before]
|
||||
added_release = [release_id for release_id in available_releases_after
|
||||
if release_id not in available_releases_before]
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
self.fuel_web.run_ostf(cluster_id=cluster_id,
|
||||
test_sets=['ha', 'smoke', 'sanity'])
|
||||
|
@ -454,8 +454,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
|
|||
# Deploy new cluster
|
||||
available_releases_after = self.fuel_web.get_releases_list_for_os(
|
||||
release_name=hlp_data.OPENSTACK_RELEASE)
|
||||
added_release = [id for id in available_releases_after
|
||||
if id not in available_releases_before]
|
||||
added_release = [release_id for release_id in available_releases_after
|
||||
if release_id not in available_releases_before]
|
||||
|
||||
self.env.bootstrap_nodes(
|
||||
self.env.d_env.nodes().slaves[3:6])
|
||||
|
|
|
@ -110,8 +110,8 @@ class UpgradeFuelChains(base_test_data.TestBasic):
|
|||
|
||||
available_releases_after = self.fuel_web.get_releases_list_for_os(
|
||||
release_name=hlp_data.OPENSTACK_RELEASE)
|
||||
added_release = [id for id in available_releases_after
|
||||
if id not in available_releases_before]
|
||||
added_release = [release_id for release_id in available_releases_after
|
||||
if release_id not in available_releases_before]
|
||||
self.env.bootstrap_nodes(
|
||||
self.env.d_env.nodes().slaves[3:6])
|
||||
data = {
|
||||
|
|
Loading…
Reference in New Issue