Python3 code compatibility: filter, map, zip

Do not use filter(), map() or zip() for not iterator tasks

blueprint fuel-qa-python3-compatibility
Related-bug: #1556791

Change-Id: I6e031ed2ca18080a15eecd5d0c1db275f9ec9aa1
This commit is contained in:
Alexey Stepanov 2016-03-17 14:15:50 +03:00
parent 235959b861
commit 4c6ef9699b
24 changed files with 90 additions and 84 deletions

View File

@ -94,7 +94,7 @@ def verify_service(ip, service_name, count=1,
ip=ip,
cmd='ps ax'
)['stdout']
api = filter(lambda x: service_name in x, ps_output)
api = [ps for ps in ps_output if service_name in ps]
logger.debug("{} \\n: {}".format(service_name, str(api)))
if not ignore_count_of_proccesses:
assert_equal(len(api), count,

View File

@ -308,10 +308,10 @@ class OpenStackActions(common.Common):
def get_hosts_for_migr(self, srv_host_name):
# Determine which host is available for live migration
host_list = filter(lambda host: host.host_name != srv_host_name,
self.nova.hosts.list())
return filter(lambda host: host._info['service'] == 'compute',
host_list)
return [
host for host in self.nova.hosts.list()
if host.host_name != srv_host_name and
host._info['service'] == 'compute']
def get_md5sum(self, file_path, controller_ssh, vm_ip, creds=()):
logger.info("Get file md5sum and compare it with previous one")

View File

@ -650,8 +650,7 @@ def get_node_hiera_roles(remote):
cmd = 'hiera roles'
roles = ''.join(run_on_remote(remote, cmd)).strip()
# Content string with roles like a ["ceph-osd", "controller"] to list
roles = map(lambda s: s.strip('" '), roles.strip("[]").split(','))
return roles
return [role.strip('" ') for role in roles.strip("[]").split(',')]
class RunLimit(object):

View File

@ -274,10 +274,8 @@ class EnvironmentModel(object):
self.resume_environment()
def nailgun_nodes(self, devops_nodes):
return map(
lambda node: self.fuel_web.get_nailgun_node_by_devops_node(node),
devops_nodes
)
return [self.fuel_web.get_nailgun_node_by_devops_node(node)
for node in devops_nodes]
def check_slaves_are_ready(self):
devops_nodes = [node for node in self.d_env.nodes().slaves

View File

@ -235,13 +235,8 @@ class FuelWebClient(object):
for set_result in set_result_list:
if set_result['testset'] not in test_sets:
continue
failed += len(
filter(
lambda test: test['status'] == 'failure' or
test['status'] == 'error',
set_result['tests']
)
)
failed += len([test for test in set_result['tests']
if test['status'] in {'failure', 'error'}])
for test in set_result['tests']:
test_result.update({test['name']: test['status']})
@ -777,8 +772,7 @@ class FuelWebClient(object):
with self.environment.d_env.get_admin_remote() as admin_remote:
copy_cert_from_master(admin_remote, cluster_id)
n_nodes = self.client.list_cluster_nodes(cluster_id)
n_nodes = filter(lambda n: 'ready' in n['status'], n_nodes)
for n in n_nodes:
for n in filter(lambda n: 'ready' in n['status'], n_nodes):
node = self.get_devops_node_by_nailgun_node(n)
if node:
node_name = node.name
@ -1224,7 +1218,7 @@ class FuelWebClient(object):
self.client.update_nodes(nodes_data)
nailgun_nodes = self.client.list_cluster_nodes(cluster_id)
cluster_node_ids = map(lambda _node: str(_node['id']), nailgun_nodes)
cluster_node_ids = [str(_node['id']) for _node in nailgun_nodes]
assert_true(
all([node_id in cluster_node_ids for node_id in node_ids]))
@ -1267,7 +1261,7 @@ class FuelWebClient(object):
interfaces_dict[iface].append('fuelweb_admin')
def get_iface_by_name(ifaces, name):
iface = filter(lambda iface: iface['name'] == name, ifaces)
iface = [_iface for _iface in ifaces if _iface['name'] == name]
assert_true(len(iface) > 0,
"Interface with name {} is not present on "
"node. Please check override params.".format(name))

View File

@ -376,7 +376,9 @@ class NailgunClient(object):
def do_cluster_action(self, cluster_id, node_ids=None, action="provision"):
if not node_ids:
nailgun_nodes = self.list_cluster_nodes(cluster_id)
# pylint: disable=map-builtin-not-iterating
node_ids = map(lambda _node: str(_node['id']), nailgun_nodes)
# pylint: enable=map-builtin-not-iterating
return self.client.put(
"/api/clusters/{0}/{1}?nodes={2}".format(
cluster_id,

View File

@ -463,14 +463,14 @@ BUILD_IMAGES = get_var_as_bool('BUILD_IMAGES', False)
STORE_ASTUTE_YAML = get_var_as_bool('STORE_ASTUTE_YAML', False)
EXTERNAL_DNS = map(
str.strip,
EXTERNAL_DNS = [
string.strip() for string in
os.environ.get('EXTERNAL_DNS', '208.67.220.220').split(',')
)
EXTERNAL_NTP = map(
str.strip,
]
EXTERNAL_NTP = [
string.strip() for string in
os.environ.get('EXTERNAL_NTP', 'ua.pool.ntp.org').split(',')
)
]
DNS_SUFFIX = os.environ.get('DNS_SUFFIX', '.test.domain.local')
FUEL_MASTER_HOSTNAME = os.environ.get('FUEL_MASTER_HOSTNAME', 'nailgun')

View File

@ -489,7 +489,8 @@ def main():
logger.error('There is no TestPlan to process, exiting...')
return 1
run_ids = () if not args.run_ids else map(int, args.run_ids.split(','))
run_ids = () if not args.run_ids else tuple(
int(arg) for arg in args.run_ids.split(','))
generator = StatisticsGenerator(testrail_project,
args.plan_id,

View File

@ -433,8 +433,7 @@ class TestRailProject(object):
custom_step_results = []
steps = case.get('custom_test_case_steps', None)
if steps and len(steps) == len(results.steps):
steps = zip(steps, results.steps)
for s in steps:
for s in zip(steps, results.steps):
custom_step_results.append({
"content": s[0]["content"],
"expected": s[0]["expected"],

View File

@ -82,10 +82,12 @@ class TestAdminNode(TestBasic):
self.env.revert_snapshot("empty")
with self.env.d_env.get_admin_remote() as remote:
ps_output = remote.execute('ps ax')['stdout']
astute_master = filter(lambda x: 'astute master' in x, ps_output)
astute_master = [
master for master in ps_output if 'astute master' in master]
logger.info("Found astute processes: {:s}".format(astute_master))
assert_equal(len(astute_master), 1)
astute_workers = filter(lambda x: 'astute worker' in x, ps_output)
astute_workers = [
worker for worker in ps_output if 'astute worker' in worker]
logger.info(
"Found {len:d} astute worker processes: {workers!s}"
"".format(len=len(astute_workers), workers=astute_workers))

View File

@ -314,12 +314,11 @@ class CephHA(TestBasic):
ceph_version = versions[0]['ceph_version']
def check_ver(v):
# Check version. True if version is not equal @ceph_version
return (parse_version(v['ceph_version']) !=
parse_version(ceph_version))
bad_nodes = [
ver for ver in versions
if parse_version(ver['ceph_version']) != parse_version(
ceph_version)]
bad_nodes = filter(check_ver, versions)
assert_true(len(bad_nodes) == 0,
message="Nodes should same Ceph version on all nodes. "
"Expecting version {0}, the following nodes "

View File

@ -128,7 +128,8 @@ class HAOneControllerNeutron(HAOneControllerNeutronBase):
cluster_id, {'slave-02': ['compute']}, False, True)
task = self.fuel_web.deploy_cluster(cluster_id)
self.fuel_web.assert_task_success(task)
nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
nodes = [
node for node in nailgun_nodes if node["pending_deletion"] is True]
assert_true(
len(nodes) == 1, "Verify 1 node has pending deletion status"
)
@ -313,7 +314,8 @@ class HAOneControllerNeutron(HAOneControllerNeutronBase):
cluster_id = self.fuel_web.get_last_created_cluster()
self.fuel_web.client.delete_cluster(cluster_id)
nailgun_nodes = self.fuel_web.client.list_nodes()
nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
nodes = [
node for node in nailgun_nodes if node["pending_deletion"] is True]
assert_true(
len(nodes) == 2, "Verify 2 node has pending deletion status"
)

View File

@ -84,7 +84,7 @@ class TestJumboFrames(base_test_case.TestBasic):
ifaces = ifaces.splitlines()[1:]
bridge_iface = ifaces[0].split()[-1]
ifaces = map(lambda iface: iface.strip(), ifaces[1:])
ifaces = [iface.strip() for iface in ifaces[1:]]
ifaces.append(bridge_iface)
return ifaces

View File

@ -532,10 +532,9 @@ class TestNetworkTemplates(TestNetworkTemplatesBase):
cluster_id=cluster_id, network_template=network_template)
self.show_step(6)
mgmt_net = filter(
lambda grp: grp['name'] == 'management',
self.fuel_web.client.get_network_groups()
).pop()
mgmt_net = [
grp for grp in self.fuel_web.client.get_network_groups()
if grp['name'] == 'management'].pop()
assert_true(
self.fuel_web.client.del_network_group(mgmt_net['id']).code

View File

@ -132,7 +132,7 @@ class ServicesReconfiguration(TestBasic):
:return: a dictionary of ip nodes and process uptime
"""
nodes = [x['ip'] for x in nodes]
uptimes = dict(zip(nodes, range(len(nodes))))
uptimes = {}
for node in nodes:
with self.env.d_env.get_ssh_to_remote(node) as remote:
uptimes[node] = \
@ -1201,8 +1201,9 @@ class ServicesReconfiguration(TestBasic):
self.show_step(2)
cluster_id = self.fuel_web.get_last_created_cluster()
bs_node = filter(lambda x: x.name == 'slave-05',
self.env.d_env.get_nodes())
bs_node = [
node for node in self.env.d_env.get_nodes()
if node.name == 'slave-05']
self.env.bootstrap_nodes(bs_node)
self.fuel_web.update_nodes(
cluster_id,

View File

@ -41,8 +41,8 @@ class TestUseMirror(TestBasic):
def _get_cluster_repos(self, cluster_id):
all_repos = self.fuel_web.get_cluster_repos(cluster_id)['value']
return {
'ubuntu': filter(lambda x: 'ubuntu' in x['name'], all_repos),
'mos': filter(lambda x: 'mos' in x['name'], all_repos),
'ubuntu': [repo for repo in all_repos if 'ubuntu' in repo['name']],
'mos': [repo for repo in all_repos if 'mos' in repo['name']],
'all': all_repos}
def _fix_fuel_mirror_config(self, admin_ip):
@ -130,12 +130,14 @@ class TestUseMirror(TestBasic):
self.show_step(8)
cluster_repos = self._get_cluster_repos(cluster_id)
remote_ubuntu_repos = filter(
lambda x: admin_ip not in x['uri'] and
'{settings.MASTER_IP}' not in x['uri'], cluster_repos['ubuntu'])
local_mos_repos = filter(
lambda x: admin_ip in x['uri'] or
'{settings.MASTER_IP}' in x['uri'], cluster_repos['mos'])
remote_ubuntu_repos = [
repo for repo in cluster_repos['ubuntu']
if admin_ip not in repo['uri'] and
'{settings.MASTER_IP}' not in repo['uri']]
local_mos_repos = [
repo for repo in cluster_repos['mos']
if admin_ip in repo['uri'] or
'{settings.MASTER_IP}' in repo['uri']]
repos_log = pretty_log(
{'All': cluster_repos['all'],
'Remote Ubuntu': remote_ubuntu_repos,
@ -163,9 +165,10 @@ class TestUseMirror(TestBasic):
self.show_step(11)
cluster_repos = self._get_cluster_repos(cluster_id)['all']
remote_repos = filter(
lambda x: admin_ip not in x['uri'] and
'{settings.MASTER_IP}' not in x['uri'], cluster_repos)
remote_repos = [
repo for repo in cluster_repos
if admin_ip not in repo['uri'] and
'{settings.MASTER_IP}' not in repo['uri']]
message = pretty_log(cluster_repos)
logger.info(message)
assert_false(remote_repos,

View File

@ -226,8 +226,8 @@ class PatchingTests(TestBasic):
nailgun_node = self.fuel_web.update_nodes(
cluster_id, node, False, True)
nodes = filter(
lambda x: x["pending_deletion"] is True, nailgun_node)
nodes = [_node for _node in nailgun_node
if _node["pending_deletion"] is True]
self.fuel_web.deploy_cluster(cluster_id)
wait(
lambda: self.fuel_web.is_node_discovered(nodes[0]),
@ -385,8 +385,8 @@ class PatchingMasterTests(TestBasic):
self.fuel_web.prepare_ceph_to_delete(remote)
nailgun_node = self.fuel_web.update_nodes(
cluster_id, node, False, True)
nodes = filter(
lambda x: x["pending_deletion"] is True, nailgun_node)
nodes = [_node for _node in nailgun_node
if _node["pending_deletion"] is True]
self.fuel_web.deploy_cluster(cluster_id)
wait(
lambda: self.fuel_web.is_node_discovered(nodes[0]),

View File

@ -127,9 +127,9 @@ class TestNessus(NeutronTunHaBase):
scan_name = "Scan CPA {0}".format(scan_start_date)
policies_list = nessus_client.list_policy_templates()
cpa_policy_template = filter(
lambda template: template['title'] == 'Credentialed Patch Audit',
policies_list)[0]
cpa_policy_template = [
template for template in policies_list
if template['title'] == 'Credentialed Patch Audit'][0]
policy_id = nessus_client.add_cpa_policy(
scan_name, settings.ENV_NAME, cpa_policy_template['uuid'])
@ -183,9 +183,9 @@ class TestNessus(NeutronTunHaBase):
scan_name = "Scan WAT {0}".format(scan_start_date)
policies_list = nessus_client.list_policy_templates()
wat_policy_template = filter(
lambda template: template['title'] == 'Web Application Tests',
policies_list)[0]
wat_policy_template = [
template for template in policies_list
if template['title'] == 'Web Application Tests'][0]
policy_id = nessus_client.add_wat_policy(
scan_name, settings.ENV_NAME, wat_policy_template['uuid'])
@ -242,9 +242,9 @@ class TestNessus(NeutronTunHaBase):
scan_name = "Scan CPA {0}".format(scan_start_date)
policies_list = nessus_client.list_policy_templates()
cpa_policy_template = filter(
lambda template: template['title'] == 'Credentialed Patch Audit',
policies_list)[0]
cpa_policy_template = [
template for template in policies_list
if template['title'] == 'Credentialed Patch Audit'][0]
policy_id = nessus_client.add_cpa_policy(
scan_name, settings.ENV_NAME, cpa_policy_template['uuid'])

View File

@ -259,7 +259,8 @@ class SeparateDbFailover(TestBasic):
nailgun_node = self.fuel_web.update_nodes(cluster_id, node,
False, True)
nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_node)
nodes = [_node for _node in nailgun_node
if _node["pending_deletion"] is True]
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
wait(lambda: self.fuel_web.is_node_discovered(nodes[0]),
timeout=6 * 60)

View File

@ -244,7 +244,8 @@ class SeparateHorizonFailover(TestBasic):
nailgun_node = self.fuel_web.update_nodes(cluster_id, node,
False, True)
nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_node)
nodes = [_node for _node in nailgun_node
if _node["pending_deletion"] is True]
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
wait(lambda: self.fuel_web.is_node_discovered(nodes[0]),
timeout=6 * 60)

View File

@ -284,7 +284,8 @@ class SeparateKeystoneFailover(TestBasic):
nailgun_node = self.fuel_web.update_nodes(cluster_id, node,
False, True)
nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_node)
nodes = [_node for _node in nailgun_node
if _node["pending_deletion"] is True]
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
wait(lambda: self.fuel_web.is_node_discovered(nodes[0]),
timeout=6 * 60)

View File

@ -262,7 +262,8 @@ class SeparateRabbitFailover(TestBasic):
nailgun_node = self.fuel_web.update_nodes(cluster_id, node,
False, True)
nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_node)
nodes = [_node for _node in nailgun_node
if _node["pending_deletion"] is True]
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
wait(lambda: self.fuel_web.is_node_discovered(nodes[0]),
timeout=6 * 60)

View File

@ -55,9 +55,10 @@ class StrengthActions(object):
"No destroyed nodes in Environment")
def wait_offline_nodes():
n_nodes = map(self.fuel_web.get_nailgun_node_by_devops_node,
self.destroyed_devops_nodes)
n_nodes = map(lambda x: x['online'], n_nodes)
n_nodes = [
self.fuel_web.get_nailgun_node_by_devops_node(node) for node in
self.destroyed_devops_nodes]
n_nodes = [node['online'] for node in n_nodes]
return n_nodes.count(False) == 0
wait(wait_offline_nodes, timeout=60 * 5)
@ -83,9 +84,10 @@ class StrengthActions(object):
n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
self.cluster_id,
['controller'])
d_ctrls = map(self.fuel_web.get_devops_node_by_nailgun_node, n_ctrls)
d_ctrls = {self.fuel_web.get_devops_node_by_nailgun_node(node)
for node in n_ctrls}
self.fuel_web.wait_mysql_galera_is_up(
[n.name for n in set(d_ctrls) - set(self.destroyed_devops_nodes)],
[n.name for n in d_ctrls - set(self.destroyed_devops_nodes)],
timeout=300)
@deferred_decorator([make_snapshot_if_step_fail])
@ -95,8 +97,9 @@ class StrengthActions(object):
n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
self.cluster_id,
['controller'])
d_ctrls = map(self.fuel_web.get_devops_node_by_nailgun_node, n_ctrls)
online_d_ctrls = set(d_ctrls) - set(self.destroyed_devops_nodes)
d_ctrls = {self.fuel_web.get_devops_node_by_nailgun_node(node)
for node in n_ctrls}
online_d_ctrls = d_ctrls - set(self.destroyed_devops_nodes)
for node in online_d_ctrls:
logger.info("Check pacemaker status on {}".format(node.name))

View File

@ -24,7 +24,7 @@ def get_basepath():
def get_list_confignames(filelist):
"""Get list of config name from file list"""
return map(get_configname, filelist)
return [get_configname(filename) for filename in filelist]
def get_configname(path):