Merge "Miscalenous py3 fixes"

This commit is contained in:
Zuul 2019-07-22 18:49:14 +00:00 committed by Gerrit Code Review
commit ef85b1fd57
4 changed files with 28 additions and 19 deletions

View File

@ -16,6 +16,9 @@ import os
import sys
import six
class Metadata(object):
def __init__(self):
@ -35,7 +38,7 @@ class Metadata(object):
def get_hardware_metadata(self, sys_data):
hard_dict = {}
for item, dictionary in sys_data.iteritems():
for item, dictionary in six.iteritems(sys_data):
if any(node in sys_data[item]['group_names'] for node in self._supported_node_types):
if 'hardware_details' not in hard_dict:
hard_dict['hardware_details'] = []
@ -59,24 +62,24 @@ class Metadata(object):
def get_environment_metadata(self, sys_data):
env_dict = {}
for item, dictionary in sys_data.iteritems():
for item, dictionary in six.iteritems(sys_data):
if 'environment_setup' not in env_dict:
env_dict['environment_setup'] = {}
for key, value in sys_data[item].iteritems():
for key, value in six.iteritems(sys_data[item]):
if 'stockpile_osp_env' in key:
for nodes, number in value.iteritems():
for nodes, number in six.iteritems(value):
env_dict['environment_setup'][nodes] = number
return env_dict
def get_software_metadata(self, sys_data):
soft_all_dict = []
bad_output_list = [{},[],""]
for item, dictionary in sys_data.iteritems():
for item, dictionary in six.iteritems(sys_data):
if any(node in sys_data[item]['group_names'] for node in self._supported_node_types):
software_dict = {}
sample_vuln_dict = {}
node = sys_data[item]['inventory_hostname']
for key, output in sys_data[item].iteritems():
for key, output in six.iteritems(sys_data[item]):
if 'stockpile_yum' in key and output not in bad_output_list:
software_dict['repos_enabled'] = {}
software_dict['repos_enabled']['repos'] = []
@ -113,10 +116,10 @@ class Metadata(object):
software_dict[service_name]['node_name'] = node
if key_name not in software_dict[service_name].keys():
software_dict[service_name][key_name] = {}
for obj, value in output.iteritems():
for obj, value in six.iteritems(output):
software_dict[service_name][key_name][obj] = value
else:
for obj, value in output.iteritems():
for obj, value in six.iteritems(output):
if obj not in software_dict.keys():
software_dict[obj] = value
software_dict[obj]['node_name'] = node

View File

@ -27,6 +27,9 @@ from browbeat.path import get_workload_venv
from browbeat.path import results_path
from browbeat.workloads import base
import six
class PerfKit(base.WorkloadBase):
def __init__(self, config, result_dir_ts):
@ -41,7 +44,7 @@ class PerfKit(base.WorkloadBase):
def string_to_dict(self, string):
"""Function for converting "|" quoted hash data into python dictionary."""
dict_data = {}
split_data = string.split('|,|')
split_data = '|,|'.split()
split_data[0] = split_data[0][1:]
split_data[-1] = split_data[-1][:-1]
for item in split_data:
@ -110,7 +113,7 @@ class PerfKit(base.WorkloadBase):
get_workload_venv('perfkit', True),
self.overcloudrc,
get_workload_venv('perfkit', False), cloud_type))
for parameter, value in benchmark_config.iteritems():
for parameter, value in six.iteritems(benchmark_config):
if not parameter == 'name':
self.logger.debug(
"Parameter: {}, Value: {}".format(parameter, value))
@ -168,7 +171,7 @@ class PerfKit(base.WorkloadBase):
self.logger.info("Benchmark: {}".format(workload['name']))
self.update_total_scenarios()
# Add default parameters as necessary
for default_item, value in self.config['perfkit']['default'].iteritems():
for default_item, value in six.iteritems(self.config['perfkit']['default']):
if default_item not in workload:
workload[default_item] = value

View File

@ -28,6 +28,9 @@ from browbeat.path import results_path
from browbeat.workloads import base
import six
class Shaker(base.WorkloadBase):
def __init__(self, config, result_dir_ts):
@ -59,7 +62,7 @@ class Shaker(base.WorkloadBase):
def accommodation_to_list(self, accommodation):
accommodation_list = []
for key, value in accommodation.iteritems():
for key, value in six.iteritems(accommodation):
if value is True:
accommodation_list.append(key)
else:
@ -100,7 +103,7 @@ class Shaker(base.WorkloadBase):
return True
# Dictionary to capture common test data
shaker_test_meta = {}
for scenario in data['scenarios'].iterkeys():
for scenario in data['scenarios']:
# Populating common test data
if 'shaker_test_info' not in shaker_test_meta:
shaker_test_meta['shaker_test_info'] = data[
@ -135,7 +138,7 @@ class Shaker(base.WorkloadBase):
shaker_test_meta['deployment']['template'] = data[
'scenarios'][scenario]['deployment']['template']
# Iterating through each record to get result values
for record in data['records'].iterkeys():
for record in data['records']:
if data['records'][record]['status'] == "ok" and data[
'records'][record]['executor'] != "shell":
if 'stdout' in data['records'][record]:
@ -148,7 +151,7 @@ class Shaker(base.WorkloadBase):
outputs[metric[0]] = metric[1]
# Iterate over each result type for each sample in record and
# get associated value
for key in outputs.iterkeys():
for key in outputs:
if key == "time":
continue
# Iterate in step lock over each list of samples in the
@ -259,7 +262,7 @@ class Shaker(base.WorkloadBase):
def get_uuidlist(self, data):
uuidlist = []
for key in data['records'].iterkeys():
for key in data['records']:
uuidlist.append(key)
return uuidlist

View File

@ -14,10 +14,10 @@ import csv
import datetime
import json
import logging
import StringIO
import time
import browbeat.elastic
import six
from rally.common import sshutils
from rally_openstack import consts
@ -237,7 +237,7 @@ class BrowbeatPbenchUperf(neutron_utils.NeutronScenario,
'timestamp': es_ts,
'num_pairs': num_pairs}}
elastic = browbeat.elastic.Elastic(config, 'pbench')
json_result = StringIO.StringIO(stdout_json)
json_result = six.StringIO(stdout_json)
json_data = json.load(json_result)
for iteration in json_data:
elastic.index_result(iteration, test_name, 'results/')
@ -245,7 +245,7 @@ class BrowbeatPbenchUperf(neutron_utils.NeutronScenario,
LOG.error("Error with PBench Results")
# Parse results
result = StringIO.StringIO('\n'.join(stdout.split('\n')[1:]))
result = six.StringIO('\n'.join(stdout.split('\n')[1:]))
creader = csv.reader(result)
report = []
for row in creader: