Replace six.iteritems() with .items()

update  where ```iteritems``` is used
1.As mentioned in [1], we should avoid using
six.iteritems to achieve iterators. We can
use dict.items instead, as it will return
iterators in PY3 as well. And dict.items/keys
will more readable. 2.In py2, the performance
about list should be negligible, see the link [2].
[1] https://wiki.openstack.org/wiki/Python3
[2] http://lists.openstack.org/pipermail/openstack-dev/2015-June/066391.html

Change-Id: Ic82c26e0f37bd3ecea98bc85bd98bd62c8d762a2
This commit is contained in:
loooosy 2017-04-02 23:38:23 +08:00
parent 13078eff1b
commit 978157fe32
20 changed files with 50 additions and 50 deletions

View File

@ -127,7 +127,7 @@ class Collector(util.Dimensions):
collect_stats = [] collect_stats = []
dimensions = {'component': 'monasca-agent', 'service': 'monitoring'} dimensions = {'component': 'monasca-agent', 'service': 'monitoring'}
# Add in metrics on the collector run # Add in metrics on the collector run
for name, value in self.collection_metrics.iteritems(): for name, value in self.collection_metrics.items():
metric = metrics.Metric(name, metric = metrics.Metric(name,
self._set_dimensions(dimensions), self._set_dimensions(dimensions),
tenant=None) tenant=None)

View File

@ -260,7 +260,7 @@ class DynamicCheckHelper(object):
:return: dictionary mapping source labels to applicable DimMapping objects :return: dictionary mapping source labels to applicable DimMapping objects
""" """
result = {} result = {}
for dim, spec in config.get('dimensions', {}).iteritems(): for dim, spec in config.get('dimensions', {}).items():
if isinstance(spec, dict): if isinstance(spec, dict):
label = spec.get('source_key', dim) label = spec.get('source_key', dim)
sepa = spec.get('separator', '-') sepa = spec.get('separator', '-')
@ -310,7 +310,7 @@ class DynamicCheckHelper(object):
self._grp_metric_map[iname] = {} self._grp_metric_map[iname] = {}
self._grp_metric_cache[iname] = {} self._grp_metric_cache[iname] = {}
self._grp_dimension_map[iname] = {} self._grp_dimension_map[iname] = {}
for grp, gspec in groups.iteritems(): for grp, gspec in groups.items():
self._grp_metric_map[iname][grp] = gspec self._grp_metric_map[iname][grp] = gspec
self._grp_metric_cache[iname][grp] = {} self._grp_metric_cache[iname][grp] = {}
self._grp_dimension_map[iname][grp] = DynamicCheckHelper._build_dimension_map(gspec) self._grp_dimension_map[iname][grp] = DynamicCheckHelper._build_dimension_map(gspec)
@ -498,7 +498,7 @@ class DynamicCheckHelper(object):
else: else:
ext_labels = labels.copy() ext_labels = labels.copy()
for element, child in metric_dict.iteritems(): for element, child in metric_dict.items():
# if child is a dictionary, then recurse # if child is a dictionary, then recurse
if isinstance(child, dict) and curr_depth < max_depth: if isinstance(child, dict) and curr_depth < max_depth:
self.push_metric_dict(instance, child, ext_labels, group, timestamp, fixed_dimensions, self.push_metric_dict(instance, child, ext_labels, group, timestamp, fixed_dimensions,
@ -542,7 +542,7 @@ class DynamicCheckHelper(object):
""" """
ext_labels = None ext_labels = None
# collect additional dimensions first from non-metrics # collect additional dimensions first from non-metrics
for element, child in metric_dict.iteritems(): for element, child in metric_dict.items():
if isinstance(child, str) and len(self._get_mappings(instance_name, group, element)) > 0: if isinstance(child, str) and len(self._get_mappings(instance_name, group, element)) > 0:
if not ext_labels: if not ext_labels:
ext_labels = labels.copy() ext_labels = labels.copy()
@ -628,7 +628,7 @@ class DynamicCheckHelper(object):
metric_list.extend(metric_map.get(DynamicCheckHelper.COUNTERS_KEY, [])) metric_list.extend(metric_map.get(DynamicCheckHelper.COUNTERS_KEY, []))
# collect group specific metrics # collect group specific metrics
grp_metric_map = self._grp_metric_map.get(iname, {}) grp_metric_map = self._grp_metric_map.get(iname, {})
for gname, gmmap in grp_metric_map.iteritems(): for gname, gmmap in grp_metric_map.items():
metric_list.extend(gmmap.get(DynamicCheckHelper.GAUGES_KEY, [])) metric_list.extend(gmmap.get(DynamicCheckHelper.GAUGES_KEY, []))
metric_list.extend(gmmap.get(DynamicCheckHelper.RATES_KEY, [])) metric_list.extend(gmmap.get(DynamicCheckHelper.RATES_KEY, []))
metric_list.extend(gmmap.get(DynamicCheckHelper.COUNTERS_KEY, [])) metric_list.extend(gmmap.get(DynamicCheckHelper.COUNTERS_KEY, []))
@ -645,7 +645,7 @@ class DynamicCheckHelper(object):
""" """
dims = default_dimensions.copy() dims = default_dimensions.copy()
# map all specified dimension all keys # map all specified dimension all keys
for labelname, labelvalue in labels.iteritems(): for labelname, labelvalue in labels.items():
mapping_arr = self._get_mappings(instance_name, group, labelname) mapping_arr = self._get_mappings(instance_name, group, labelname)
target_dim = None target_dim = None

View File

@ -93,7 +93,7 @@ class CadvisorHost(AgentCheck):
def _parse_memory(self, memory_data, dimensions): def _parse_memory(self, memory_data, dimensions):
memory_metrics = METRICS['memory_metrics'] memory_metrics = METRICS['memory_metrics']
for cadvisor_key, (metric_name, metric_types, metric_units) in six.iteritems(memory_metrics): for cadvisor_key, (metric_name, metric_types, metric_units) in memory_metrics.items():
if cadvisor_key in memory_data: if cadvisor_key in memory_data:
self._send_metrics("mem." + metric_name, memory_data[cadvisor_key], dimensions, self._send_metrics("mem." + metric_name, memory_data[cadvisor_key], dimensions,
metric_types, metric_units) metric_types, metric_units)
@ -103,7 +103,7 @@ class CadvisorHost(AgentCheck):
for filesystem in filesystem_data: for filesystem in filesystem_data:
file_dimensions = dimensions.copy() file_dimensions = dimensions.copy()
file_dimensions['device'] = filesystem['device'] file_dimensions['device'] = filesystem['device']
for cadvisor_key, (metric_name, metric_types, metric_units) in six.iteritems(filesystem_metrics): for cadvisor_key, (metric_name, metric_types, metric_units) in filesystem_metrics.items():
if cadvisor_key in filesystem: if cadvisor_key in filesystem:
self._send_metrics("fs." + metric_name, filesystem[cadvisor_key], file_dimensions, self._send_metrics("fs." + metric_name, filesystem[cadvisor_key], file_dimensions,
metric_types, metric_units) metric_types, metric_units)
@ -114,7 +114,7 @@ class CadvisorHost(AgentCheck):
for interface in network_interfaces: for interface in network_interfaces:
network_dimensions = dimensions.copy() network_dimensions = dimensions.copy()
network_dimensions['interface'] = interface['name'] network_dimensions['interface'] = interface['name']
for cadvisor_key, (metric_name, metric_types, metric_units) in six.iteritems(network_metrics): for cadvisor_key, (metric_name, metric_types, metric_units) in network_metrics.items():
if cadvisor_key in interface: if cadvisor_key in interface:
self._send_metrics("net." + metric_name, interface[cadvisor_key], network_dimensions, self._send_metrics("net." + metric_name, interface[cadvisor_key], network_dimensions,
metric_types, metric_units) metric_types, metric_units)
@ -122,14 +122,14 @@ class CadvisorHost(AgentCheck):
def _parse_cpu(self, cpu_data, dimensions): def _parse_cpu(self, cpu_data, dimensions):
cpu_metrics = METRICS['cpu_metrics'] cpu_metrics = METRICS['cpu_metrics']
cpu_usage = cpu_data['usage'] cpu_usage = cpu_data['usage']
for cadvisor_key, (metric_name, metric_types, metric_units) in six.iteritems(cpu_metrics): for cadvisor_key, (metric_name, metric_types, metric_units) in cpu_metrics.items():
if cadvisor_key in cpu_usage: if cadvisor_key in cpu_usage:
# Convert nanoseconds to seconds # Convert nanoseconds to seconds
cpu_usage_sec = cpu_usage[cadvisor_key] / 1000000000.0 cpu_usage_sec = cpu_usage[cadvisor_key] / 1000000000.0
self._send_metrics("cpu." + metric_name, cpu_usage_sec, dimensions, metric_types, metric_units) self._send_metrics("cpu." + metric_name, cpu_usage_sec, dimensions, metric_types, metric_units)
def _parse_send_metrics(self, metrics, dimensions): def _parse_send_metrics(self, metrics, dimensions):
for host, cadvisor_metrics in six.iteritems(metrics): for host, cadvisor_metrics in metrics.items():
host_dimensions = dimensions.copy() host_dimensions = dimensions.copy()
# Grab first set of metrics from return data # Grab first set of metrics from return data
cadvisor_metrics = cadvisor_metrics[0] cadvisor_metrics = cadvisor_metrics[0]

View File

@ -51,7 +51,7 @@ class Cpu(checks.AgentCheck):
# Call lscpu command to get cpu frequency # Call lscpu command to get cpu frequency
self._add_cpu_freq(data) self._add_cpu_freq(data)
for key, value in data.iteritems(): for key, value in data.items():
if data[key] is None or instance.get('cpu_idle_only') and 'idle_perc' not in key: if data[key] is None or instance.get('cpu_idle_only') and 'idle_perc' not in key:
continue continue
self.gauge(key, value, dimensions) self.gauge(key, value, dimensions)

View File

@ -153,7 +153,7 @@ class HAProxy(AgentCheck):
def _process_status_metric(self, hosts_statuses): def _process_status_metric(self, hosts_statuses):
agg_statuses = defaultdict(lambda: {'available': 0, 'unavailable': 0}) agg_statuses = defaultdict(lambda: {'available': 0, 'unavailable': 0})
status_dimensions = self.dimensions.copy() status_dimensions = self.dimensions.copy()
for (service, status), count in hosts_statuses.iteritems(): for (service, status), count in hosts_statuses.items():
status = status.lower() status = status.lower()
status_dimensions.update({'status': status, 'service': service}) status_dimensions.update({'status': status, 'service': service})
@ -165,7 +165,7 @@ class HAProxy(AgentCheck):
agg_statuses[service]['unavailable'] += count agg_statuses[service]['unavailable'] += count
for service in agg_statuses: for service in agg_statuses:
for status, count in agg_statuses[service].iteritems(): for status, count in agg_statuses[service].items():
status_dimensions.update({'status': status, 'service': service}) status_dimensions.update({'status': status, 'service': service})
self.gauge("haproxy.count_per_status", count, dimensions=status_dimensions) self.gauge("haproxy.count_per_status", count, dimensions=status_dimensions)

View File

@ -51,7 +51,7 @@ class KafkaCheck(checks.AgentCheck):
consumer_groups = dict() consumer_groups = dict()
try: try:
for group, topics in raw_val.iteritems(): for group, topics in raw_val.items():
assert isinstance(group, basestring) assert isinstance(group, basestring)
if isinstance(topics, dict): if isinstance(topics, dict):
self.log.info("Found old config format, discarding partition list") self.log.info("Found old config format, discarding partition list")
@ -68,7 +68,7 @@ class KafkaCheck(checks.AgentCheck):
# Query Kafka for consumer offsets # Query Kafka for consumer offsets
consumer_offsets = {} consumer_offsets = {}
topic_partitions = collections.defaultdict(set) topic_partitions = collections.defaultdict(set)
for consumer_group, topics in consumer_groups.iteritems(): for consumer_group, topics in consumer_groups.items():
for topic in topics: for topic in topics:
kafka_consumer = None kafka_consumer = None
try: try:
@ -99,7 +99,7 @@ class KafkaCheck(checks.AgentCheck):
# Query Kafka for the broker offsets, done in a separate loop so only one query is done # Query Kafka for the broker offsets, done in a separate loop so only one query is done
# per topic/partition even if multiple consumer groups watch the same topic # per topic/partition even if multiple consumer groups watch the same topic
broker_offsets = {} broker_offsets = {}
for topic, partitions in topic_partitions.iteritems(): for topic, partitions in topic_partitions.items():
offset_responses = [] offset_responses = []
for p in partitions: for p in partitions:
try: try:
@ -131,7 +131,7 @@ class KafkaCheck(checks.AgentCheck):
# Report the broker data if full output # Report the broker data if full output
if full_output: if full_output:
broker_dimensions = dimensions.copy() broker_dimensions = dimensions.copy()
for (topic, partition), broker_offset in broker_offsets.iteritems(): for (topic, partition), broker_offset in broker_offsets.items():
broker_dimensions.update({'topic': topic, 'partition': str(partition)}) broker_dimensions.update({'topic': topic, 'partition': str(partition)})
broker_offset = broker_offsets.get((topic, partition)) broker_offset = broker_offsets.get((topic, partition))
self.gauge('kafka.broker_offset', broker_offset, self.gauge('kafka.broker_offset', broker_offset,
@ -139,9 +139,9 @@ class KafkaCheck(checks.AgentCheck):
# Report the consumer data # Report the consumer data
consumer_dimensions = dimensions.copy() consumer_dimensions = dimensions.copy()
for (consumer_group, topic), offsets in consumer_offsets.iteritems(): for (consumer_group, topic), offsets in consumer_offsets.items():
if per_partition: if per_partition:
for partition, consumer_offset in offsets.iteritems(): for partition, consumer_offset in offsets.items():
# Get the broker offset # Get the broker offset
broker_offset = broker_offsets.get((topic, partition)) broker_offset = broker_offsets.get((topic, partition))
# Report the consumer offset and lag # Report the consumer offset and lag
@ -155,7 +155,7 @@ class KafkaCheck(checks.AgentCheck):
else: else:
consumer_dimensions.update({'topic': topic, 'consumer_group': consumer_group}) consumer_dimensions.update({'topic': topic, 'consumer_group': consumer_group})
total_lag = 0 total_lag = 0
for partition, consumer_offset in offsets.iteritems(): for partition, consumer_offset in offsets.items():
broker_offset = broker_offsets.get((topic, partition)) broker_offset = broker_offsets.get((topic, partition))
total_lag += broker_offset - consumer_offset total_lag += broker_offset - consumer_offset

View File

@ -317,7 +317,7 @@ class Kubernetes(checks.AgentCheck):
def _parse_memory(self, memory_data, container_dimensions, pod_key, pod_map): def _parse_memory(self, memory_data, container_dimensions, pod_key, pod_map):
memory_metrics = CADVISOR_METRICS['memory_metrics'] memory_metrics = CADVISOR_METRICS['memory_metrics']
for cadvisor_key, metric_name in six.iteritems(memory_metrics): for cadvisor_key, metric_name in memory_metrics.items():
if cadvisor_key in memory_data: if cadvisor_key in memory_data:
metric_value = memory_data[cadvisor_key] metric_value = memory_data[cadvisor_key]
if self.report_container_metrics: if self.report_container_metrics:
@ -334,7 +334,7 @@ class Kubernetes(checks.AgentCheck):
for filesystem in filesystem_data: for filesystem in filesystem_data:
file_dimensions = container_dimensions.copy() file_dimensions = container_dimensions.copy()
file_dimensions['device'] = filesystem['device'] file_dimensions['device'] = filesystem['device']
for cadvisor_key, metric_name in six.iteritems(filesystem_metrics): for cadvisor_key, metric_name in filesystem_metrics.items():
if cadvisor_key in filesystem: if cadvisor_key in filesystem:
self._send_metrics("container." + metric_name, filesystem[cadvisor_key], file_dimensions, self._send_metrics("container." + metric_name, filesystem[cadvisor_key], file_dimensions,
METRIC_TYPES_UNITS[metric_name][0], METRIC_TYPES_UNITS[metric_name][0],
@ -347,7 +347,7 @@ class Kubernetes(checks.AgentCheck):
network_dimensions = container_dimensions.copy() network_dimensions = container_dimensions.copy()
network_interface = interface['name'] network_interface = interface['name']
network_dimensions['interface'] = network_interface network_dimensions['interface'] = network_interface
for cadvisor_key, metric_name in six.iteritems(network_metrics): for cadvisor_key, metric_name in network_metrics.items():
if cadvisor_key in interface: if cadvisor_key in interface:
metric_value = interface[cadvisor_key] metric_value = interface[cadvisor_key]
if self.report_container_metrics: if self.report_container_metrics:
@ -368,7 +368,7 @@ class Kubernetes(checks.AgentCheck):
def _parse_cpu(self, cpu_data, container_dimensions, pod_key, pod_metrics): def _parse_cpu(self, cpu_data, container_dimensions, pod_key, pod_metrics):
cpu_metrics = CADVISOR_METRICS['cpu_metrics'] cpu_metrics = CADVISOR_METRICS['cpu_metrics']
cpu_usage = cpu_data['usage'] cpu_usage = cpu_data['usage']
for cadvisor_key, metric_name in six.iteritems(cpu_metrics): for cadvisor_key, metric_name in cpu_metrics.items():
if cadvisor_key in cpu_usage: if cadvisor_key in cpu_usage:
# convert nanoseconds to seconds # convert nanoseconds to seconds
cpu_usage_sec = cpu_usage[cadvisor_key] / 1000000000 cpu_usage_sec = cpu_usage[cadvisor_key] / 1000000000
@ -434,7 +434,7 @@ class Kubernetes(checks.AgentCheck):
pod_metrics = {} pod_metrics = {}
# network pod metrics # network pod metrics
pod_network_metrics = {} pod_network_metrics = {}
for container, cadvisor_metrics in six.iteritems(containers_metrics): for container, cadvisor_metrics in containers_metrics.items():
pod_key, container_dimensions = self._get_container_dimensions(container, pod_key, container_dimensions = self._get_container_dimensions(container,
dimensions, dimensions,
containers_spec[container], containers_spec[container],
@ -455,20 +455,20 @@ class Kubernetes(checks.AgentCheck):
self.send_network_pod_metrics(pod_network_metrics, pod_dimension_map) self.send_network_pod_metrics(pod_network_metrics, pod_dimension_map)
def send_pod_metrics(self, pod_metrics_map, pod_dimension_map): def send_pod_metrics(self, pod_metrics_map, pod_dimension_map):
for pod_key, pod_metrics in six.iteritems(pod_metrics_map): for pod_key, pod_metrics in pod_metrics_map.items():
pod_dimensions = pod_dimension_map[pod_key] pod_dimensions = pod_dimension_map[pod_key]
for metric_name, metric_value in six.iteritems(pod_metrics): for metric_name, metric_value in pod_metrics.items():
self._send_metrics("pod." + metric_name, metric_value, pod_dimensions, self._send_metrics("pod." + metric_name, metric_value, pod_dimensions,
METRIC_TYPES_UNITS[metric_name][0], METRIC_TYPES_UNITS[metric_name][0],
METRIC_TYPES_UNITS[metric_name][1]) METRIC_TYPES_UNITS[metric_name][1])
def send_network_pod_metrics(self, pod_network_metrics, pod_dimension_map): def send_network_pod_metrics(self, pod_network_metrics, pod_dimension_map):
for pod_key, network_interfaces in six.iteritems(pod_network_metrics): for pod_key, network_interfaces in pod_network_metrics.items():
pod_dimensions = pod_dimension_map[pod_key] pod_dimensions = pod_dimension_map[pod_key]
for network_interface, metrics in six.iteritems(network_interfaces): for network_interface, metrics in network_interfaces.items():
pod_network_dimensions = pod_dimensions.copy() pod_network_dimensions = pod_dimensions.copy()
pod_network_dimensions['interface'] = network_interface pod_network_dimensions['interface'] = network_interface
for metric_name, metric_value in six.iteritems(metrics): for metric_name, metric_value in metrics.items():
self._send_metrics("pod." + metric_name, metric_value, pod_network_dimensions, self._send_metrics("pod." + metric_name, metric_value, pod_network_dimensions,
METRIC_TYPES_UNITS[metric_name][0], METRIC_TYPES_UNITS[metric_name][0],
METRIC_TYPES_UNITS[metric_name][1]) METRIC_TYPES_UNITS[metric_name][1])

View File

@ -598,7 +598,7 @@ class LibvirtCheck(AgentCheck):
def prepare_run(self): def prepare_run(self):
"""Check if it is time for measurements to be collected""" """Check if it is time for measurements to be collected"""
for name, collection in self._collect_intervals.iteritems(): for name, collection in self._collect_intervals.items():
if collection['period'] <= 0: if collection['period'] <= 0:
continue continue

View File

@ -183,7 +183,7 @@ class MySql(checks.AgentCheck):
dimensions=dimensions) dimensions=dimensions)
def _rate_or_gauge_statuses(self, statuses, dbResults, dimensions): def _rate_or_gauge_statuses(self, statuses, dbResults, dimensions):
for status, metric in statuses.iteritems(): for status, metric in statuses.items():
metric_name, metric_type = metric metric_name, metric_type = metric
value = self._collect_scalar(status, dbResults) value = self._collect_scalar(status, dbResults)
if value is not None: if value is not None:

View File

@ -84,7 +84,7 @@ class OvsCheck(AgentCheck):
if ifx not in ctr_cache: if ifx not in ctr_cache:
ctr_cache[ifx] = {} ctr_cache[ifx] = {}
for metric_name, idx in self._get_metrics_map(measure).iteritems(): for metric_name, idx in self._get_metrics_map(measure).items():
interface_stats_key = self._get_interface_stats_key(idx, metric_name, measure, ifx) interface_stats_key = self._get_interface_stats_key(idx, metric_name, measure, ifx)
statistics_dict = interface_data[ifx]['statistics'] statistics_dict = interface_data[ifx]['statistics']
value = statistics_dict[interface_stats_key] if interface_stats_key in statistics_dict else 0 value = statistics_dict[interface_stats_key] if interface_stats_key in statistics_dict else 0
@ -136,7 +136,7 @@ class OvsCheck(AgentCheck):
tried_one_update = False tried_one_update = False
host_router_max_bw = 0 host_router_max_bw = 0
active_routers = 0 active_routers = 0
for ifx, value in ifx_deltas.iteritems(): for ifx, value in ifx_deltas.items():
port_uuid = value['port_uuid'] port_uuid = value['port_uuid']
if port_uuid not in port_cache and not tried_one_update: if port_uuid not in port_cache and not tried_one_update:
@ -187,7 +187,7 @@ class OvsCheck(AgentCheck):
if tenant_name: if tenant_name:
ops_dimensions.update({'tenant_name': tenant_name}) ops_dimensions.update({'tenant_name': tenant_name})
for metric_name, idx in self._get_metrics_map(measure).iteritems(): for metric_name, idx in self._get_metrics_map(measure).items():
# POST to customer project # POST to customer project
interface_stats_key = self._get_interface_stats_key(idx, metric_name, measure, ifx) interface_stats_key = self._get_interface_stats_key(idx, metric_name, measure, ifx)
if interface_stats_key not in value: if interface_stats_key not in value:

View File

@ -180,6 +180,6 @@ class ProcessCheck(checks.AgentCheck):
if instance.get('detailed', False): if instance.get('detailed', False):
metrics = self.get_process_metrics(pids, name) metrics = self.get_process_metrics(pids, name)
for metric_name, metric_value in metrics.iteritems(): for metric_name, metric_value in metrics.items():
if metric_value is not None: if metric_value is not None:
self.gauge(metric_name, metric_value, dimensions=dimensions) self.gauge(metric_name, metric_value, dimensions=dimensions)

View File

@ -142,8 +142,8 @@ class RabbitMQ(checks.AgentCheck):
}, },
} }
for object_type, filters in specified.iteritems(): for object_type, filters in specified.items():
for filter_type, filter_objects in filters.iteritems(): for filter_type, filter_objects in filters.items():
if type(filter_objects) != list: if type(filter_objects) != list:
raise TypeError( raise TypeError(
"{0} / {0}_regexes parameter must be a list".format(object_type)) "{0} / {0}_regexes parameter must be a list".format(object_type))

View File

@ -43,7 +43,7 @@ class SolidFire(checks.AgentCheck):
data.update(self._get_cluster_capacity()) data.update(self._get_cluster_capacity())
# Dump data upstream. # Dump data upstream.
for key, value in data.iteritems(): for key, value in data.items():
if data[key] is None: if data[key] is None:
continue continue
self.gauge(key, value, dimensions) self.gauge(key, value, dimensions)

View File

@ -145,7 +145,7 @@ class Vertica(checks.AgentCheck):
results = self._results_to_dict(results) results = self._results_to_dict(results)
resource_metric_name = 'vertica.resource.' resource_metric_name = 'vertica.resource.'
resource_metrics = results[0] resource_metrics = results[0]
for metric_name, metric_value in resource_metrics.iteritems(): for metric_name, metric_value in resource_metrics.items():
if metric_name in ['resource_rejections', 'disk_space_rejections']: if metric_name in ['resource_rejections', 'disk_space_rejections']:
self.rate(resource_metric_name + metric_name, int(metric_value), dimensions=dimensions) self.rate(resource_metric_name + metric_name, int(metric_value), dimensions=dimensions)
else: else:

View File

@ -272,7 +272,7 @@ def main():
print("#" * 80) print("#" * 80)
print("\n") print("\n")
print("You have to specify one of the following commands:") print("You have to specify one of the following commands:")
for command, desc in jmxfetch.JMX_LIST_COMMANDS.iteritems(): for command, desc in jmxfetch.JMX_LIST_COMMANDS.items():
print(" - %s [OPTIONAL: LIST OF CHECKS]: %s" % (command, desc)) print(" - %s [OPTIONAL: LIST OF CHECKS]: %s" % (command, desc))
print("Example: sudo /etc/init.d/monasca-agent jmx list_matching_attributes tomcat jmx solr") print("Example: sudo /etc/init.d/monasca-agent jmx list_matching_attributes tomcat jmx solr")
print("\n") print("\n")

View File

@ -99,7 +99,7 @@ class MetricsAggregator(object):
msg = "Too many valueMeta entries {0}, limit is {1}: {2} -> {3} valueMeta {4}" msg = "Too many valueMeta entries {0}, limit is {1}: {2} -> {3} valueMeta {4}"
log.error(msg.format(len(value_meta), VALUE_META_MAX_NUMBER, name, dimensions, value_meta)) log.error(msg.format(len(value_meta), VALUE_META_MAX_NUMBER, name, dimensions, value_meta))
return False return False
for key, value in value_meta.iteritems(): for key, value in value_meta.items():
if not key: if not key:
log.error("valueMeta name cannot be empty: {0} -> {1}".format(name, dimensions)) log.error("valueMeta name cannot be empty: {0} -> {1}".format(name, dimensions))
return False return False
@ -123,7 +123,7 @@ class MetricsAggregator(object):
delegated_tenant=None, hostname=None, device_name=None, delegated_tenant=None, hostname=None, device_name=None,
value_meta=None, timestamp=None, sample_rate=1): value_meta=None, timestamp=None, sample_rate=1):
if dimensions: if dimensions:
for k, v in dimensions.iteritems(): for k, v in dimensions.items():
if not isinstance(k, (str, unicode)): if not isinstance(k, (str, unicode)):
log.error("invalid dimension key {0} must be a string: {1} -> {2}".format(k, name, dimensions)) log.error("invalid dimension key {0} must be a string: {1} -> {2}".format(k, name, dimensions))
raise InvalidDimensionKey raise InvalidDimensionKey

View File

@ -42,7 +42,7 @@ def deep_merge(adict, other):
"""A recursive merge of two dictionaries including combining of any lists within the data structure. """A recursive merge of two dictionaries including combining of any lists within the data structure.
""" """
for key, value in other.iteritems(): for key, value in other.items():
if key in adict: if key in adict:
if isinstance(adict[key], dict) and isinstance(value, dict): if isinstance(adict[key], dict) and isinstance(value, dict):
deep_merge(adict[key], value) deep_merge(adict[key], value)

View File

@ -238,7 +238,7 @@ class Kafka(Plugin):
# If it did, delete it after use so it doesn't become a consumer group # If it did, delete it after use so it doesn't become a consumer group
if 'service_name' in self.args: if 'service_name' in self.args:
service_name += '_' + str(self.args.pop('service_name')) service_name += '_' + str(self.args.pop('service_name'))
for key, value in self.args.iteritems(): for key, value in self.args.items():
value_dict = {topic: [] for topic in value.split('/')} value_dict = {topic: [] for topic in value.split('/')}
consumers[key] = value_dict consumers[key] = value_dict
self.config['kafka_consumer'] = {'init_config': None, self.config['kafka_consumer'] = {'init_config': None,

View File

@ -120,7 +120,7 @@ def base_configuration(args):
if args.dimensions: if args.dimensions:
dimensions.update(dict(item.strip().split(":") for item in args.dimensions.split(","))) dimensions.update(dict(item.strip().split(":") for item in args.dimensions.split(",")))
args.dimensions = dict((name, value) for (name, value) in dimensions.iteritems()) args.dimensions = dict((name, value) for (name, value) in dimensions.items())
write_template(os.path.join(args.template_dir, 'agent.yaml.template'), write_template(os.path.join(args.template_dir, 'agent.yaml.template'),
os.path.join(args.config_dir, 'agent.yaml'), os.path.join(args.config_dir, 'agent.yaml'),
{'args': args, 'hostname': socket.getfqdn()}, {'args': args, 'hostname': socket.getfqdn()},
@ -142,7 +142,7 @@ def modify_config(args, detected_config):
""" """
modified_config = False modified_config = False
for detection_plugin_name, new_config in detected_config.iteritems(): for detection_plugin_name, new_config in detected_config.items():
if args.overwrite: if args.overwrite:
modified_config = True modified_config = True
if args.dry_run: if args.dry_run:

View File

@ -79,7 +79,7 @@ class TestPostfix(unittest.TestCase):
# output what went in... per queue # output what went in... per queue
print() print()
for queue, count in self.in_count.iteritems(): for queue, count in self.in_count.items():
print('Test messages put into', queue, '= ', self.in_count[queue][0]) print('Test messages put into', queue, '= ', self.in_count[queue][0])
# output postfix.py dd-agent plugin counts... per queue # output postfix.py dd-agent plugin counts... per queue