Fixing flake8 errors now that tox is working again

Change-Id: Id34991711c57f20922f003928008143032723af3
This commit is contained in:
Joe Keen 2015-07-26 22:10:37 -06:00
parent d108a0f4ab
commit 1ded0b7b1c
79 changed files with 368 additions and 398 deletions

View File

@ -143,7 +143,7 @@ class Check(util.Dimensions):
[(timestamp, value, hostname, device_name)]
else:
raise exceptions.CheckException("%s must be either gauge or counter, skipping sample at %s" %
(metric, time.ctime(timestamp)))
(metric, time.ctime(timestamp)))
if self.is_gauge(metric):
# store[metric][dimensions] = (ts, val) - only 1 value allowed
@ -636,32 +636,3 @@ class AgentCheck(util.Dimensions):
return val
else:
return cast(val)
def run_check(name, path=None):
import tests.common
# Read the config file
config = Config()
confd_path = path or os.path.join(config.get_confd_path(),
'{0}.yaml'.format(name))
try:
f = open(confd_path)
except IOError:
raise Exception('Unable to open configuration at %s' % confd_path)
config_str = f.read()
f.close()
# Run the check
check, instances = tests.common.get_check(name, config_str)
if not instances:
raise Exception('YAML configuration returned no instances.')
for instance in instances:
check.check(instance)
if check.has_events():
print("Events:\n")
pprint.pprint(check.get_events(), indent=4)
print("Metrics:\n")
pprint.pprint(check.get_metrics(), indent=4)

View File

@ -1,13 +1,13 @@
# Core modules
import logging
import socket
import system.win32 as w32
import threading
import time
import monasca_agent.common.check_status as check_status
import monasca_agent.common.metrics as metrics
import monasca_agent.common.util as util
import system.win32 as w32
log = logging.getLogger(__name__)

View File

@ -74,8 +74,7 @@ class PoolWorker(threading.Thread):
class Pool(object):
"""
The Pool class represents a pool of worker threads.
"""The Pool class represents a pool of worker threads.
It has methods which allows tasks to be offloaded to the
worker processes in a few different ways.

View File

@ -170,6 +170,7 @@ class ServicesCheck(monasca_agent.collector.checks.AgentCheck):
def _check(self, instance):
"""This function should be implemented by inherited classes.
"""
raise NotImplementedError

View File

@ -5,16 +5,20 @@ import urllib2
import urlparse
# project
import monasca_agent.common.util as util
import monasca_agent.collector.checks as checks
import monasca_agent.collector.checks.services_checks as services_checks
import monasca_agent.collector.checks.utils as utils
import monasca_agent.common.util as util
log = logging.getLogger(__name__)
class Apache(checks.AgentCheck):
"""Tracks basic connection/requests/workers metrics
See http://httpd.apache.org/docs/2.2/mod/mod_status.html for more details
"""
GAUGES = {'IdleWorkers': 'apache.performance.idle_worker_count',
'BusyWorkers': 'apache.performance.busy_worker_count',
'CPULoad': 'apache.performance.cpu_load_perc',
@ -54,9 +58,9 @@ class Apache(checks.AgentCheck):
apache_host = socket.gethostname()
dimensions = self._set_dimensions({'apache_host': apache_host,
'apache_port': apache_port,
'service': 'apache',
'component': 'apache'},
'apache_port': apache_port,
'service': 'apache',
'component': 'apache'},
instance)
try:
@ -77,7 +81,7 @@ class Apache(checks.AgentCheck):
# Loop through and extract the numerical values
for line in response.split('\n'):
values = line.split(': ')
if len(values) == 2: # match
if len(values) == 2: # match
metric, value = values
try:
value = float(value)

View File

@ -32,7 +32,6 @@ class CheckMK(AgentCheck):
def __init__(self, name, init_config, agent_config, instances=None):
AgentCheck.__init__(self, name, init_config, agent_config, instances)
def check(self, instance):
"""Run check_mk_agent and process the '<<<local>>>' results.
"""

View File

@ -1,7 +1,7 @@
import json
import urllib2
import re
import sys
import urllib2
from monasca_agent.collector.checks import AgentCheck
from monasca_agent.collector.checks.utils import add_basic_auth

View File

@ -1,5 +1,5 @@
import psutil
import logging
import psutil
import monasca_agent.collector.checks as checks
@ -18,9 +18,9 @@ class Cpu(checks.AgentCheck):
dimensions = self._set_dimensions(None, instance)
if instance is not None:
send_rollup_stats = instance.get("send_rollup_stats", False)
send_rollup_stats = instance.get("send_rollup_stats", False)
else:
send_rollup_stats = False
send_rollup_stats = False
cpu_stats = psutil.cpu_times_percent(percpu=False)
num_of_metrics = self._format_results(cpu_stats.user + cpu_stats.nice,
@ -34,7 +34,6 @@ class Cpu(checks.AgentCheck):
num_of_metrics += 1
log.debug('Collected {0} cpu metrics'.format(num_of_metrics))
def _format_results(self, us, sy, wa, idle, st, dimensions):
data = {'cpu.user_perc': us,
'cpu.system_perc': sy,

View File

@ -16,8 +16,7 @@ class Crash(checks.AgentCheck):
log.debug('crash dir: %s', self.crash_dir)
def check(self, instance):
"""
Capture crash dump statistics
"""Capture crash dump statistics
"""
dimensions = self._set_dimensions(None, instance)
dump_count = 0

View File

@ -1,9 +1,9 @@
from fnmatch import fnmatch
from os import stat
from os import walk
from os.path import abspath
from os.path import exists
from os.path import join
from os import stat
from os import walk
import time
from monasca_agent.collector.checks import AgentCheck

View File

@ -1,6 +1,7 @@
import psutil
import logging
import os
import psutil
import re
log = logging.getLogger(__name__)
@ -22,14 +23,14 @@ class Disk(checks.AgentCheck):
if instance is not None:
use_mount = instance.get("use_mount", True)
send_io_stats = instance.get("send_io_stats", True)
send_rollup_stats = instance.get("send_rollup_stats", False)
send_rollup_stats = instance.get("send_rollup_stats", False)
# If we filter devices, get the list.
device_blacklist_re = self._get_re_exclusions(instance)
fs_types_to_ignore = self._get_fs_exclusions(instance)
else:
use_mount = True
send_io_stats = True
send_rollup_stats = False
send_rollup_stats = False
device_blacklist_re = None
fs_types_to_ignore = []
@ -41,8 +42,8 @@ class Disk(checks.AgentCheck):
total_used = 0
for partition in partitions:
if partition.fstype not in fs_types_to_ignore \
or (device_blacklist_re \
and not device_blacklist_re.match(partition.device)):
or (device_blacklist_re
and not device_blacklist_re.match(partition.device)):
device_name = self._get_device_name(partition.device)
disk_usage = psutil.disk_usage(partition.mountpoint)
total_capacity += disk_usage.total
@ -80,14 +81,13 @@ class Disk(checks.AgentCheck):
if send_rollup_stats:
self.gauge("disk.total_space_mb",
total_capacity/1048576,
dimensions=rollup_dimensions)
total_capacity / 1048576,
dimensions=rollup_dimensions)
self.gauge("disk.total_used_space_mb",
total_used/1048576,
dimensions=rollup_dimensions)
total_used / 1048576,
dimensions=rollup_dimensions)
log.debug('Collected 2 rolled-up disk usage metrics')
def _get_re_exclusions(self, instance):
"""Parse device blacklist regular expression"""
filter = None
@ -95,7 +95,7 @@ class Disk(checks.AgentCheck):
filter_device_re = instance.get('device_blacklist_re', None)
if filter_device_re:
filter = re.compile(filter_device_re)
except re.error as err:
except re.error:
log.error('Error processing regular expression {0}'.format(filter_device_re))
return filter

View File

@ -3,8 +3,8 @@ import json
import os
import re
import socket
import urllib2
import urllib
import urllib2
from urlparse import urlsplit
from monasca_agent.collector.checks import AgentCheck

View File

@ -206,7 +206,6 @@ class ElasticSearch(AgentCheck):
self.STATS_URL = "/_cluster/nodes/stats?all=true"
self.NODES_URL = "/_cluster/nodes?network=true"
additional_metrics = {
"elasticsearch.cache.field.evictions": ("gauge", "indices.cache.field_evictions"),
"elasticsearch.cache.field.size": ("gauge", "indices.cache.field_size_in_bytes"),

View File

@ -232,8 +232,7 @@ class HAProxy(AgentCheck):
# Store this host status so we can check against it later
self.host_status[url][key] = data['status']
@staticmethod
def _create_event(status, hostname, lastchg, service_name):
def _create_event(self, status, hostname, lastchg, service_name):
if status == "DOWN":
alert_type = "error"
title = "HAProxy %s front-end reported %s %s" % (service_name, hostname, status)

View File

@ -12,8 +12,9 @@ from httplib2 import httplib
from httplib2 import HttpLib2Error
import monasca_agent.collector.checks.services_checks as services_checks
import monasca_agent.common.keystone as keystone
import monasca_agent.common.config as cfg
import monasca_agent.common.keystone as keystone
class HTTPCheck(services_checks.ServicesCheck):

View File

@ -84,7 +84,7 @@ class IIS(AgentCheck):
if iis_site.Name == '_Total':
dimensions.pop('site', None)
else:
dimensions.update({'site': iis_site.Name})
dimensions.update({'site': iis_site.Name})
for metric, mtype, wmi_val in self.METRICS:
if not hasattr(iis_site, wmi_val):

View File

@ -14,7 +14,7 @@ log = logging.getLogger(__name__)
class KafkaConnection(object):
""" A simple context manager for kafka connections """
"""A simple context manager for kafka connections """
def __init__(self, connect_str):
self.connect_str = connect_str
@ -31,13 +31,13 @@ class KafkaConnection(object):
class KafkaCheck(checks.AgentCheck):
""" Checks the configured kafka instance reporting the consumption lag
for each partition per topic in each consumer group. If full_output
is set also reports broker offsets and the current consumer offset.
Works on Kafka version >= 0.8.1.1
"""Checks the configured kafka instance reporting the consumption lag
for each partition per topic in each consumer group. If full_output
is set also reports broker offsets and the current consumer offset.
Works on Kafka version >= 0.8.1.1
"""
def _validate_consumer_groups(self, val):
""" Private config validation/marshalling functions
"""Private config validation/marshalling functions
"""
try:
consumer_group, topic_partitions = val.items()[0]

View File

@ -24,8 +24,8 @@ import yaml
from calendar import timegm
from datetime import datetime
from distutils.version import LooseVersion
from monasca_agent.collector.virt import inspector
from monasca_agent.collector.checks import AgentCheck
from monasca_agent.collector.virt import inspector
class LibvirtCheck(AgentCheck):
@ -93,8 +93,8 @@ class LibvirtCheck(AgentCheck):
try:
with open(self.instance_cache_file, 'w') as cache_yaml:
yaml.safe_dump(id_cache, cache_yaml)
if stat.S_IMODE(os.stat(self.instance_cache_file).st_mode) != 0600:
os.chmod(self.instance_cache_file, 0600)
if stat.S_IMODE(os.stat(self.instance_cache_file).st_mode) != 0o600:
os.chmod(self.instance_cache_file, 0o600)
except IOError as e:
self.log.error("Cannot write to {0}: {1}".format(self.instance_cache_file, e))
@ -139,8 +139,8 @@ class LibvirtCheck(AgentCheck):
try:
with open(self.metric_cache_file, 'w') as cache_yaml:
yaml.safe_dump(metric_cache, cache_yaml)
if stat.S_IMODE(os.stat(self.metric_cache_file).st_mode) != 0600:
os.chmod(self.metric_cache_file, 0600)
if stat.S_IMODE(os.stat(self.metric_cache_file).st_mode) != 0o600:
os.chmod(self.metric_cache_file, 0o600)
except IOError as e:
self.log.error("Cannot write to {0}: {1}".format(self.metric_cache_file, e))

View File

@ -1,7 +1,8 @@
import psutil
import logging
import psutil
import re
import subprocess
import sys
import monasca_agent.collector.checks as checks
import monasca_agent.common.util as util
@ -36,7 +37,7 @@ class Load(checks.AgentCheck):
# Get output from uptime
try:
uptime = subprocess.Popen(['uptime'],
stdout=sp.PIPE,
stdout=subprocess.PIPE,
close_fds=True).communicate()[0]
except Exception:
log.exception('Cannot extract load using uptime')

View File

@ -1,5 +1,5 @@
import psutil
import logging
import psutil
log = logging.getLogger(__name__)
@ -21,25 +21,25 @@ class Memory(checks.AgentCheck):
swap_info = psutil.swap_memory()
self.gauge('mem.total_mb',
int(mem_info.total/1048576),
int(mem_info.total / 1048576),
dimensions=dimensions)
self.gauge('mem.free_mb',
int(mem_info.free/1048576),
int(mem_info.free / 1048576),
dimensions=dimensions)
self.gauge('mem.usable_mb',
int(mem_info.available/1048576),
int(mem_info.available / 1048576),
dimensions=dimensions)
self.gauge('mem.usable_perc',
float(100 - mem_info.percent),
dimensions=dimensions)
self.gauge('mem.swap_total_mb',
int(swap_info.total/1048576),
int(swap_info.total / 1048576),
dimensions=dimensions)
self.gauge('mem.swap_used_mb',
int(swap_info.used/1048576),
int(swap_info.used / 1048576),
dimensions=dimensions)
self.gauge('mem.swap_free_mb',
int(swap_info.free/1048576),
int(swap_info.free / 1048576),
dimensions=dimensions)
self.gauge('mem.swap_free_perc',
float(100 - swap_info.percent),
@ -48,20 +48,20 @@ class Memory(checks.AgentCheck):
count = 8
if 'buffers' in mem_info:
self.gauge('mem.used_buffers',
int(mem_info.buffers/1048576),
int(mem_info.buffers / 1048576),
dimensions=dimensions)
count +=1
count += 1
if 'cached' in mem_info:
self.gauge('mem.used_cache',
int(mem_info.cached/1048576),
int(mem_info.cached / 1048576),
dimensions=dimensions)
count +=1
count += 1
if 'shared' in mem_info:
self.gauge('mem.used_shared',
int(mem_info.shared/1048576),
int(mem_info.shared / 1048576),
dimensions=dimensions)
count +=1
count += 1
log.debug('Collected {0} memory metrics'.format(count))

View File

@ -11,7 +11,8 @@ import socket
import subprocess
import time
from monasca_agent.collector.checks.services_checks import ServicesCheck, Status
from monasca_agent.collector.checks.services_checks import ServicesCheck
from monasca_agent.collector.checks.services_checks import Status
class WrapNagios(ServicesCheck):

View File

@ -1,7 +1,7 @@
# stdlib
import logging
import re
import psutil
import re
# project
import monasca_agent.collector.checks as checks
@ -32,7 +32,6 @@ class Network(checks.AgentCheck):
exclude_iface_re = None
nics = psutil.net_io_counters(pernic=True)
count = 0
for nic_name in nics.keys():
if self._is_nic_monitored(nic_name, excluded_ifaces, exclude_iface_re):
nic = nics[nic_name]

View File

@ -9,14 +9,14 @@ DEFAULT_PORT = "ntp"
class NtpCheck(AgentCheck):
""" Uses ntplib to grab a metric for the ntp offset
"""Uses ntplib to grab a metric for the ntp offset
"""
def check(self, instance):
dimensions = self._set_dimensions(None, instance)
req_args = {
'host': instance.get('host', DEFAULT_HOST),
'port': instance.get('port', DEFAULT_PORT),
'host': instance.get('host', DEFAULT_HOST),
'port': instance.get('port', DEFAULT_PORT),
'version': int(instance.get('version', DEFAULT_NTP_VERSION)),
'timeout': float(instance.get('timeout', DEFAULT_TIMEOUT)),
}
@ -32,4 +32,3 @@ class NtpCheck(AgentCheck):
# case the agent host's clock is messed up.
ntp_ts = ntp_stats.recv_time
self.gauge('ntp.offset', ntp_offset, timestamp=ntp_ts, dimensions=dimensions)

View File

@ -140,8 +140,8 @@ class ProcessCheck(checks.AgentCheck):
except psutil.AccessDenied:
got_denied = True
rss += float(mem.rss/1048576)
vms += float(mem.vms/1048576)
rss += float(mem.rss / 1048576)
vms += float(mem.vms / 1048576)
thr += p.get_num_threads()
cpu += p.get_cpu_percent(cpu_check_interval)
@ -151,8 +151,8 @@ class ProcessCheck(checks.AgentCheck):
io_counters = p.get_io_counters()
read_count += io_counters.read_count
write_count += io_counters.write_count
read_kbytes += float(io_counters.read_bytes/1024)
write_kbytes += float(io_counters.write_bytes/1024)
read_kbytes += float(io_counters.read_bytes / 1024)
write_kbytes += float(io_counters.write_bytes / 1024)
except psutil.AccessDenied:
self.log.debug('monasca-agent user does not have ' +
'access to I/O counters for process' +

View File

@ -194,10 +194,10 @@ class RabbitMQ(checks.AgentCheck):
# No queues/node are specified. We will process every queue/node if it's under the limit
else:
# Monasca does not support events at this time.
# if len(data) > ALERT_THRESHOLD * max_detailed:
# # Post a message on the dogweb stream to warn
# self.alert(base_url, max_detailed, len(data), object_type)
# Monasca does not support events at this time.
# if len(data) > ALERT_THRESHOLD * max_detailed:
# # Post a message on the dogweb stream to warn
# self.alert(base_url, max_detailed, len(data), object_type)
if len(data) > max_detailed:
# Display a warning in the info page
@ -226,7 +226,7 @@ class RabbitMQ(checks.AgentCheck):
root = root.get(path, {})
value = root.get(keys[-1], None)
if value == None:
if value is None:
value = 0.0
try:
self.log.debug("Collected data for %s: metric name: %s: value: %f dimensions: %s" % (object_type, metric_name, float(value), str(dimensions)))

View File

@ -29,19 +29,18 @@ def run_command(command, input=None):
stdout,
stderr))
return errcode, stdout, stderr
except Exception as e:
except Exception:
log.error("Failure while executing command - {0}".format(command))
def process_command(command):
"""
Runs the command and returns json output
"""Runs the command and returns json output
"""
try:
errcode, stdout, stderr = run_command(command)
json_output = json.loads(stdout)
return json_output
except Exception as e:
except Exception:
log.error('Failure while processing output - {0}'.format(stdout))
@ -50,11 +49,9 @@ class SwiftDiags(checks.AgentCheck):
super(SwiftDiags, self).__init__(name, init_config, agent_config)
def check(self, instance):
"""
Get swift checks and propagate.
The checks are part of HP swift-diags package and checks are
are run only if the package exists.
"""Get swift checks and propagate.
The checks are part of HP swift-diags package and checks are
are run only if the package exists.
"""
if not (os.path.exists(DIAG_COMMAND) and
os.path.exists(CHECKER_COMMAND)):

View File

@ -116,9 +116,9 @@ class Zookeeper(AgentCheck):
# Latency min/avg/max: -10/0/20007
_, value = buf.readline().split(':')
l_min, l_avg, l_max = [int(v) for v in value.strip().split('/')]
metrics.append(('zookeeper.min_latency_sec', float(l_min)/1000))
metrics.append(('zookeeper.avg_latency_sec', float(l_avg)/1000))
metrics.append(('zookeeper.max_latency_sec', float(l_max)/1000))
metrics.append(('zookeeper.min_latency_sec', float(l_min) / 1000))
metrics.append(('zookeeper.avg_latency_sec', float(l_avg) / 1000))
metrics.append(('zookeeper.max_latency_sec', float(l_max) / 1000))
# Received: 101032173
_, value = buf.readline().split(':')

View File

@ -20,7 +20,7 @@ import monasca_agent.common.util as util
# set up logging before importing any other components
util.initialize_logging('collector')
os.umask(022)
os.umask(0o22)
# Check we're not using an old version of Python. We need 2.4 above because
# some modules (like subprocess) were only introduced in 2.4.
@ -264,7 +264,6 @@ def main():
run_check(check)
elif 'configcheck' == command or 'configtest' == command:
osname = util.get_os()
all_valid = True
paths = util.Paths()
for conf_path in glob.glob(os.path.join(paths.get_confd_path(), "*.yaml")):
@ -317,6 +316,7 @@ def main():
return 0
def run_check(check):
is_multi_threaded = False

View File

@ -20,7 +20,7 @@ from monasca_agent.collector.virt.hyperv import utilsv2
from monasca_agent.collector.virt import inspector as virt_inspector
def instance_name(instance):
def get_instance_name(instance):
"""Shortcut to get instance name."""
return getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', None)

View File

@ -279,5 +279,5 @@ def get_hypervisor_inspector():
cfg.CONF.hypervisor_inspector,
invoke_on_load=True)
return mgr.driver
except ImportError as e:
except ImportError:
return Inspector()

View File

@ -90,7 +90,7 @@ class VsphereInspector(virt_inspector.Inspector):
vm_moid = self._ops.get_vm_moid(instance.id)
if vm_moid is None:
raise virt_inspector.InstanceNotFoundException(
_('VM %s not found in VMware Vsphere') % instance.id)
'VM %s not found in VMware Vsphere' % instance.id)
cpu_util_counter_id = self._ops.get_perf_counter_id(
VC_AVERAGE_CPU_CONSUMED_CNTR)
cpu_util = self._ops.query_vm_aggregate_stats(
@ -107,7 +107,7 @@ class VsphereInspector(virt_inspector.Inspector):
vm_moid = self._ops.get_vm_moid(instance.id)
if not vm_moid:
raise virt_inspector.InstanceNotFoundException(
_('VM %s not found in VMware Vsphere') % instance.id)
'VM %s not found in VMware Vsphere' % instance.id)
vnic_stats = {}
vnic_ids = set()
@ -139,7 +139,7 @@ class VsphereInspector(virt_inspector.Inspector):
vm_moid = self._ops.get_vm_moid(instance.id)
if vm_moid is None:
raise virt_inspector.InstanceNotFoundException(
_('VM %s not found in VMware Vsphere') % instance.id)
'VM %s not found in VMware Vsphere' % instance.id)
mem_counter_id = self._ops.get_perf_counter_id(
VC_AVERAGE_MEMORY_CONSUMED_CNTR)
memory = self._ops.query_vm_aggregate_stats(
@ -152,7 +152,7 @@ class VsphereInspector(virt_inspector.Inspector):
vm_moid = self._ops.get_vm_moid(instance.id)
if not vm_moid:
raise virt_inspector.InstanceNotFoundException(
_('VM %s not found in VMware Vsphere') % instance.id)
'VM %s not found in VMware Vsphere' % instance.id)
disk_stats = {}
disk_ids = set()

View File

@ -57,23 +57,23 @@ class XenapiException(virt_inspector.InspectorException):
def get_api_session():
if not api:
raise ImportError(_('XenAPI not installed'))
raise ImportError('XenAPI not installed')
url = CONF.xenapi.connection_url
username = CONF.xenapi.connection_username
password = CONF.xenapi.connection_password
if not url or password is None:
raise XenapiException(_('Must specify connection_url, and '
'connection_password to use'))
raise XenapiException('Must specify connection_url, and '
'connection_password to use')
exception = api.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
exception = api.Failure("Unable to log in to XenAPI "
"(is the Dom0 disk full?)")
try:
session = api.Session(url)
with timeout.Timeout(CONF.xenapi.login_timeout, exception):
session.login_with_password(username, password)
except api.Failure as e:
msg = _("Could not connect to XenAPI: %s") % e.details[0]
msg = "Could not connect to XenAPI: %s" % e.details[0]
raise XenapiException(msg)
return session
@ -96,10 +96,10 @@ class XenapiInspector(virt_inspector.Inspector):
n = len(vm_refs)
if n == 0:
raise virt_inspector.InstanceNotFoundException(
_('VM %s not found in XenServer') % instance_name)
'VM %s not found in XenServer' % instance_name)
elif n > 1:
raise XenapiException(
_('Multiple VM %s found in XenServer') % instance_name)
'Multiple VM %s found in XenServer' % instance_name)
else:
return vm_refs[0]
@ -112,7 +112,7 @@ class XenapiInspector(virt_inspector.Inspector):
vcpus_number = metrics_rec['VCPUs_number']
vcpus_utils = metrics_rec['VCPUs_utilisation']
if len(vcpus_utils) == 0:
msg = _("Could not get VM %s CPU Utilization") % instance_name
msg = "Could not get VM %s CPU Utilization" % instance_name
raise XenapiException(msg)
utils = 0.0

View File

@ -34,10 +34,7 @@ class InvalidDimensionValue(Exception):
class MetricsAggregator(object):
"""
A metric aggregator class.
"""
"""A metric aggregator class."""
def __init__(self, hostname, recent_point_threshold=None):
self.events = []
@ -63,10 +60,8 @@ class MetricsAggregator(object):
priority=None,
dimensions=None,
hostname=None):
event = {
'msg_title': title,
'msg_text': text,
}
event = {'msg_title': title,
'msg_text': text}
if date_happened is not None:
event['timestamp'] = date_happened
else:
@ -97,7 +92,7 @@ class MetricsAggregator(object):
for context, metric in self.metrics.items():
try:
metrics.extend(metric.flush(timestamp))
except Exception as e:
except Exception:
log.exception('Error flushing {0} metrics.'.format(metric.name))
# Log a warning regarding metrics with old timestamps being submitted
@ -126,9 +121,9 @@ class MetricsAggregator(object):
@staticmethod
def formatter(metric, value, timestamp, dimensions, hostname, delegated_tenant=None,
device_name=None, metric_type=None, value_meta=None):
""" Formats metrics, put them into a Measurement class
(metric, timestamp, value, {"dimensions": {"name1": "value1", "name2": "value2"}, ...})
dimensions should be a dictionary
"""Formats metrics, put them into a Measurement class
(metric, timestamp, value, {"dimensions": {"name1": "value1", "name2": "value2"}, ...})
dimensions should be a dictionary
"""
if 'hostname' not in dimensions and hostname:
dimensions.update({'hostname': hostname})

View File

@ -31,6 +31,7 @@ NTP_OFFSET_THRESHOLD = 600
log = logging.getLogger(__name__)
class Stylizer(object):
STYLES = {
@ -58,7 +59,7 @@ class Stylizer(object):
@classmethod
def stylize(cls, text, *styles):
""" stylize the text. """
"""stylize the text. """
if not cls.ENABLED:
return text
# don't bother about escaping, not that complicated.
@ -102,9 +103,7 @@ def get_ntp_info():
class AgentStatus(object):
"""
A small class used to load and save status messages to the filesystem.
"""A small class used to load and save status messages to the filesystem.
"""
NAME = None
@ -497,8 +496,6 @@ class CollectorStatus(AgentStatus):
check_status['error'] = es.error
status_info['emitter'].append(check_status)
osname = config.get_os()
paths = util.Paths()
try:
status_info['confd_path'] = paths.get_confd_path()

View File

@ -9,6 +9,7 @@ try:
except ImportError:
from yaml import Loader
from monasca_agent.common.exceptions import PathNotFound
import monasca_agent.common.singleton as singleton
DEFAULT_CONFIG_FILE = '/etc/monasca/agent/agent.yaml'
@ -17,10 +18,11 @@ LOGGING_MAX_BYTES = 5 * 1024 * 1024
log = logging.getLogger(__name__)
class Config(object):
# Make this a singleton class so we don't get the config every time
# the class is created
__metaclass__ = singleton.Singleton
six.add_metaclass(singleton.Singleton)
def __init__(self, configFile=None):
if configFile is not None:
@ -146,10 +148,10 @@ def main():
api_config = configuration.get_config('Api')
statsd_config = configuration.get_config('Statsd')
logging_config = configuration.get_config('Logging')
print "Main Configuration: \n {0}".format(config)
print "\nApi Configuration: \n {0}".format(api_config)
print "\nStatsd Configuration: \n {0}".format(statsd_config)
print "\nLogging Configuration: \n {0}".format(logging_config)
print("Main Configuration: \n {0}".format(config))
print("\nApi Configuration: \n {0}".format(api_config))
print("\nStatsd Configuration: \n {0}".format(statsd_config))
print("\nLogging Configuration: \n {0}".format(logging_config))
if __name__ == "__main__":

View File

@ -12,29 +12,28 @@
# Core modules
import atexit
import errno
import logging
import os
import signal
import sys
import time
import logging
import errno
import signal
log = logging.getLogger(__name__)
class AgentSupervisor(object):
''' A simple supervisor to keep a restart a child on expected auto-restarts
'''
"""A simple supervisor to keep a restart a child on expected auto-restarts
"""
RESTART_EXIT_STATUS = 5
@classmethod
def start(cls, parent_func, child_func=None):
''' `parent_func` is a function that's called every time the child
process dies.
`child_func` is a function that should be run by the forked child
that will auto-restart with the RESTART_EXIT_STATUS.
'''
"""`parent_func` is a function that's called every time the child
process dies.
`child_func` is a function that should be run by the forked child
that will auto-restart with the RESTART_EXIT_STATUS.
"""
# Allow the child process to die on SIGTERM
signal.signal(signal.SIGTERM, cls._handle_sigterm)
@ -70,9 +69,7 @@ class AgentSupervisor(object):
class Daemon(object):
"""
A generic daemon class.
"""A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
@ -86,8 +83,7 @@ class Daemon(object):
self.pidfile = pidfile
def daemonize(self):
"""
Do the UNIX double-fork magic, see Stevens' "Advanced
"""Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
@ -208,22 +204,19 @@ class Daemon(object):
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""You should override this method when you subclass Daemon.
It will be called after the process has been daemonized by start() or restart().
"""
raise NotImplementedError
def info(self):
"""
You should override this method when you subclass Daemon. It will be
called to provide information about the status of the process
"""You should override this method when you subclass Daemon.
It will be called to provide information about the status of the process
"""
raise NotImplementedError
def status(self):
"""
Get the status of the daemon. Exits with 0 if running, 1 if not.
"""Get the status of the daemon. Exits with 0 if running, 1 if not.
"""
pid = self.pid()

View File

@ -12,3 +12,7 @@ class CheckException(Exception):
class NaN(CheckException):
pass
class PathNotFound(Exception):
pass

View File

@ -1,4 +1,5 @@
import logging
import six
from monascaclient import ksclient
@ -10,7 +11,7 @@ log = logging.getLogger(__name__)
class Keystone(object):
# Make this a singleton class so we don't get the token every time
# the class is created
__metaclass__ = singleton.Singleton
six.add_metaclass(singleton.Singleton)
def __init__(self, config):
self.config = config

View File

@ -4,7 +4,8 @@ from collections import namedtuple
import logging
from time import time
from monasca_agent.common.exceptions import Infinity, UnknownValue
from monasca_agent.common.exceptions import Infinity
from monasca_agent.common.exceptions import UnknownValue
log = logging.getLogger(__name__)
@ -30,24 +31,21 @@ class MetricTypes(object):
class Metric(object):
"""
A base metric class that accepts points, slices them into time intervals
and performs roll-ups within those intervals.
"""A base metric class that accepts points, slices them into time intervals
and performs roll-ups within those intervals.
"""
def sample(self, value, sample_rate, timestamp=None):
""" Add a point to the given metric. """
"""Add a point to the given metric. """
raise NotImplementedError()
def flush(self, timestamp):
""" Flush all metrics up to the given timestamp. """
"""Flush all metrics up to the given timestamp. """
raise NotImplementedError()
class Gauge(Metric):
""" A metric that tracks a value at particular points in time. """
"""A metric that tracks a value at particular points in time. """
def __init__(self, formatter, name, dimensions,
hostname, device_name, delegated_tenant=None,
@ -87,8 +85,7 @@ class Gauge(Metric):
class Counter(Metric):
""" A metric that tracks a counter value. """
"""A metric that tracks a counter value. """
def __init__(self, formatter, name, dimensions,
hostname, device_name, delegated_tenant=None,
@ -107,9 +104,9 @@ class Counter(Metric):
def sample(self, value, sample_rate, timestamp=None):
try:
self.value += value * int(1 / sample_rate)
self.value += value * int(1 / sample_rate)
except TypeError:
log.error("metric {} value {} sample_rate {}".format(self.name, value, sample_rate))
log.error("metric {} value {} sample_rate {}".format(self.name, value, sample_rate))
def flush(self, timestamp):
if self.value is not None:
@ -129,8 +126,7 @@ class Counter(Metric):
class Histogram(Metric):
""" A metric to track the distribution of a set of values. """
"""A metric to track the distribution of a set of values. """
def __init__(self, formatter, name, dimensions,
hostname, device_name, delegated_tenant=None,
@ -173,15 +169,15 @@ class Histogram(Metric):
]
metrics.extend(self.formatter(hostname=self.hostname,
device_name=self.device_name,
dimensions=self.dimensions,
delegated_tenant=self.delegated_tenant,
metric='%s.%s' % (self.name, suffix),
value=value,
timestamp=timestamp,
metric_type=metric_type,
value_meta=self.value_meta
) for suffix, value, metric_type in metric_aggrs)
device_name=self.device_name,
dimensions=self.dimensions,
delegated_tenant=self.delegated_tenant,
metric='%s.%s' % (self.name, suffix),
value=value,
timestamp=timestamp,
metric_type=metric_type,
value_meta=self.value_meta
) for suffix, value, metric_type in metric_aggrs)
for p in self.percentiles:
val = self.samples[int(round(p * length - 1))]
@ -202,8 +198,7 @@ class Histogram(Metric):
class Set(Metric):
""" A metric to track the number of unique elements in a set. """
"""A metric to track the number of unique elements in a set. """
def __init__(self, formatter, name, dimensions,
hostname, device_name, delegated_tenant=None,
@ -224,7 +219,6 @@ class Set(Metric):
self.values.add(value)
def flush(self, timestamp):
metrics = []
if not self.values:
return []
else:
@ -243,8 +237,7 @@ class Set(Metric):
class Rate(Metric):
""" Track the rate of metrics over each flush interval """
"""Track the rate of metrics over each flush interval """
def __init__(self, formatter, name, dimensions,
hostname, device_name, delegated_tenant=None,

View File

@ -3,7 +3,7 @@ class Singleton(type):
super(Singleton, cls).__init__(name, bases, dict)
cls.instance = None
def __call__(cls,*args,**kw):
def __call__(cls, *args, **kw):
if cls.instance is None:
cls.instance = super(Singleton, cls).__call__(*args, **kw)
return cls.instance

View File

@ -1,9 +1,9 @@
import inspect
import imp
import itertools
import glob
import math
import hashlib
import imp
import inspect
import itertools
import math
import optparse
import os
import platform
@ -23,7 +23,8 @@ log = logging.getLogger(__name__)
# Tornado
try:
from tornado import ioloop, version_info as tornado_version
from tornado import ioloop
from tornado import version_info as tornado_version
except ImportError:
# We are likely running the agent without the forwarder and tornado is not installed
# Generate a warning
@ -37,10 +38,7 @@ LOGGING_MAX_BYTES = 5 * 1024 * 1024
NumericTypes = (float, int, long)
import monasca_agent.common.config as configuration
class PathNotFound(Exception):
pass
from monasca_agent.common.exceptions import PathNotFound
class Watchdog(object):
@ -88,8 +86,7 @@ class Watchdog(object):
class PidFile(object):
""" A small helper class for pidfiles. """
"""A small helper class for pidfiles. """
PID_DIR = '/var/run/monasca-agent'
@ -147,9 +144,7 @@ class PidFile(object):
class LaconicFilter(logging.Filter):
"""
Filters messages, only print them once while keeping memory under control
"""Filters messages, only print them once while keeping memory under control
"""
LACONIC_MEM_LIMIT = 1024
@ -177,8 +172,7 @@ class LaconicFilter(logging.Filter):
class Timer(object):
""" Helper class """
"""Helper class """
def __init__(self):
self.start()
@ -203,9 +197,7 @@ class Timer(object):
class Platform(object):
"""
Return information about the given platform.
"""Return information about the given platform.
"""
@staticmethod
def is_darwin(name=None):
@ -224,7 +216,7 @@ class Platform(object):
@staticmethod
def is_bsd(name=None):
""" Return true if this is a BSD like operating system. """
"""Return true if this is a BSD like operating system. """
name = name or sys.platform
return Platform.is_darwin(name) or Platform.is_freebsd(name)
@ -235,7 +227,7 @@ class Platform(object):
@staticmethod
def is_unix(name=None):
""" Return true if the platform is a unix, False otherwise. """
"""Return true if the platform is a unix, False otherwise. """
name = name or sys.platform
return (Platform.is_darwin()
or Platform.is_linux()
@ -249,11 +241,9 @@ class Platform(object):
class Dimensions(object):
"""Class to update the default dimensions.
"""
"""
Class to update the default dimensions.
"""
def __init__(self, agent_config):
self.agent_config = agent_config
@ -276,9 +266,7 @@ class Dimensions(object):
class Paths(object):
"""
Return information about system paths.
"""Return information about system paths.
"""
def __init__(self):
self.osname = get_os()
@ -316,27 +304,27 @@ class Paths(object):
raise PathNotFound(path)
def _windows_confd_path(self):
common_data = _windows_commondata_path()
common_data = self._windows_commondata_path()
path = os.path.join(common_data, 'Datadog', 'conf.d')
if os.path.exists(path):
return path
raise PathNotFound(path)
def get_checksd_path(self):
if self.osname == 'windows':
return self._windows_checksd_path()
else:
return self._unix_checksd_path()
if self.osname == 'windows':
return self._windows_checksd_path()
else:
return self._unix_checksd_path()
def _unix_checksd_path(self):
# Unix only will look up based on the current directory
# because checks_d will hang with the other python modules
cur_path = os.path.dirname(os.path.realpath(__file__))
checksd_path = os.path.join(cur_path, '../collector/checks_d')
# Unix only will look up based on the current directory
# because checks_d will hang with the other python modules
cur_path = os.path.dirname(os.path.realpath(__file__))
checksd_path = os.path.join(cur_path, '../collector/checks_d')
if os.path.exists(checksd_path):
return checksd_path
raise PathNotFound(checksd_path)
if os.path.exists(checksd_path):
return checksd_path
raise PathNotFound(checksd_path)
def _windows_checksd_path(self):
if hasattr(sys, 'frozen'):
@ -357,7 +345,8 @@ class Paths(object):
how-do-i-find-the-windows-common-application-data-folder-using-python
"""
import ctypes
from ctypes import wintypes, windll
from ctypes import windll
from ctypes import wintypes
_SHGetFolderPath = windll.shell32.SHGetFolderPathW
_SHGetFolderPath.argtypes = [wintypes.HWND,
@ -385,6 +374,7 @@ class Paths(object):
log.info("Windows certificate path: %s" % crt_path)
tornado.simple_httpclient._DEFAULT_CA_CERTS = crt_path
def plural(count):
if count == 1:
return ""
@ -488,9 +478,8 @@ def is_valid_hostname(hostname):
def get_hostname():
"""
Get the canonical host name this agent should identify as. This is
the authoritative source of the host name for the agent.
"""Get the canonical host name this agent should identify as. This is
the authoritative source of the host name for the agent.
Tries, in order:
@ -558,9 +547,11 @@ def get_parsed_args():
def load_check_directory():
''' Return the initialized checks from checks_d, and a mapping of checks that failed to
"""Return the initialized checks from checks_d, and a mapping of checks that failed to
initialize. Only checks that have a configuration
file in conf.d will be returned. '''
file in conf.d will be returned.
"""
from monasca_agent.collector.checks import AgentCheck
config = configuration.Config()

View File

@ -1,11 +1,11 @@
import collections
import copy
import logging
import time
import random
import time
import monascaclient.client
import monasca_agent.common.keystone as keystone
import monascaclient.client
log = logging.getLogger(__name__)
@ -22,9 +22,7 @@ class MonascaAPI(object):
MAX_BACKOFF = 60 # seconds
def __init__(self, config):
"""
Initialize Mon api client connection.
"""
"""Initialize Mon api client connection."""
self.config = config
self.url = config['url']
self.api_version = '2_0'

View File

@ -6,11 +6,11 @@
"""
# Standard imports
import socket
import datetime
import logging
import signal
import socket
import sys
import datetime
# set up logging before importing any other components
import monasca_agent.common.util as util
@ -18,19 +18,19 @@ import monasca_agent.common.util as util
util.initialize_logging('forwarder')
import os
os.umask(022)
os.umask(0o22)
# Tornado
import tornado.escape
import tornado.httpclient
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.escape
import tornado.options
import tornado.web
# agent import
import monasca_agent.common.config as cfg
import monasca_agent.common.check_status as check_status
import monasca_agent.common.config as cfg
import monasca_agent.common.metrics as metrics
import monasca_agent.common.util as util
import monasca_agent.forwarder.api.monasca_api as mon
@ -102,7 +102,7 @@ class Forwarder(tornado.web.Application):
use_simple_http_client=False):
self._port = int(port)
self._agent_config = agent_config
self.flush_interval = (int(agent_config.get('check_freq'))/2) * 1000
self.flush_interval = (int(agent_config.get('check_freq')) / 2) * 1000
self._metrics = {}
transaction.MetricTransaction.set_application(self)
transaction.MetricTransaction.set_endpoints(mon.MonascaAPI(agent_config))
@ -132,9 +132,10 @@ class Forwarder(tornado.web.Application):
# todo why is the tornado logging method overridden? Perhaps ditch this.
def log_request(self, handler):
""" Override the tornado logging method.
"""Override the tornado logging method.
If everything goes well, log level is DEBUG.
Otherwise it's WARNING or ERROR depending on the response code. """
Otherwise it's WARNING or ERROR depending on the response code.
"""
if handler.get_status() < 400:
log_method = log.debug
elif handler.get_status() < 500:

View File

@ -1,9 +1,10 @@
# stdlib
import sys
import time
from datetime import datetime, timedelta
from datetime import datetime
from datetime import timedelta
import logging
from operator import attrgetter
import sys
import time
# project
import monasca_agent.common.check_status as check_status
@ -116,9 +117,9 @@ class MetricTransaction(Transaction):
class TransactionManager(util.Dimensions):
"""Holds any transaction derived object list and make sure they
are all commited, without exceeding parameters (throttling, memory consumption) """
are all commited, without exceeding parameters (throttling, memory consumption)
"""
def __init__(self, max_wait_for_replay, max_queue_size, throttling_delay, agent_config):
super(TransactionManager, self).__init__(agent_config)

View File

@ -27,7 +27,7 @@ log = logging.getLogger('statsd')
class MonascaStatsd(object):
""" This class is the monasca_statsd daemon. """
"""This class is the monasca_statsd daemon. """
def __init__(self, config_path):
config = cfg.Config()
@ -86,7 +86,7 @@ class MonascaStatsd(object):
def main():
""" The main entry point for the unix version of monasca_statsd. """
"""The main entry point for the unix version of monasca_statsd. """
parser = argparse.ArgumentParser(description='Monasca statsd - statsd server supporting metric dimensions')
parser.add_argument('--config', '-c',
help="Location for an alternate config rather than using the default config location.")

View File

@ -1,6 +1,7 @@
import json
import logging
import threading
import monasca_agent.common.check_status as check_status
import monasca_agent.common.emitter as emitter
import monasca_agent.common.util as util
@ -18,8 +19,7 @@ EVENT_CHUNK_SIZE = 50
class Reporter(threading.Thread):
"""
The reporter periodically sends the aggregated metrics to the
"""The reporter periodically sends the aggregated metrics to the
server.
"""

View File

@ -21,10 +21,7 @@ metric_class = {
class Server(object):
"""
A statsd udp server.
"""
"""A statsd udp server."""
def __init__(self, aggregator, host, port, forward_to_host=None, forward_to_port=None):
self.host = host
@ -162,7 +159,7 @@ class Server(object):
name, value, metric_class[mtype], dimensions=dimensions, sample_rate=sample_rate)
def start(self):
""" Run the server. """
"""Run the server."""
# Bind to the UDP socket.
# IPv4 only
open_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

View File

@ -1,27 +1,30 @@
# set up logging before importing any other components
from collector import modules
from config import initialize_logging
from monasca_agent.pup import pup
from collector import modules
from monasca_agent.statsd import daemon
initialize_logging('collector')
import win32serviceutil
import win32service
import win32event
import sys
import logging
import time
import multiprocessing
import sys
import time
import win32event
import win32service
import win32serviceutil
from optparse import Values
from collector.checks.collector import Collector
from emitter import http_emitter
from ddagent import Application
from win32.common import handle_exe_click
from collector.jmxfetch import JMXFetch
from ddagent import Application
from emitter import http_emitter
from optparse import Values
from win32.common import handle_exe_click
from monasca_agent.common.config import get_config, load_check_directory, set_win32_cert_path
from monasca_agent.common.config import get_config
from monasca_agent.common.config import load_check_directory
from monasca_agent.common.config import set_win32_cert_path
from monasca_agent.statsd.daemon import MonascaStatsd
log = logging.getLogger(__name__)
RESTART_INTERVAL = 24 * 60 * 60 # Defaults to 1 day

View File

@ -2,9 +2,10 @@ import ctypes
def handle_exe_click(name):
''' When the executables are clicked directly in the UI, we must let the
"""When the executables are clicked directly in the UI, we must let the
user know that they have to install the program as a service instead of
running it directly. '''
running it directly.
"""
message = """To use %(name)s, you must install it as a service.
To install %(name)s as a service, you must run the following in the console:

View File

@ -3,34 +3,54 @@
# Licensed under the terms of the CECILL License
# Modified for Datadog
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
import sys
import os
import os.path as osp
import sys
import threading as thread
import webbrowser
import thread # To manage the windows process asynchronously
import yaml
import win32serviceutil
import win32service
import win32serviceutil
# GUI Imports
from guidata.qt.QtCore import SIGNAL, Qt, QSize, QPoint, QTimer
from guidata.qt.QtGui import QInputDialog, QWidget, QFont, QLabel, QGroupBox, QHBoxLayout, QSystemTrayIcon
from guidata.qt.QtGui import QVBoxLayout, QPushButton, QSplitter, QListWidget, QMenu, QMessageBox
from guidata.qt.QtCore import QPoint
from guidata.qt.QtCore import QSize
from guidata.qt.QtCore import Qt
from guidata.qt.QtCore import QTimer
from guidata.qt.QtCore import SIGNAL
from guidata.configtools import get_icon, get_family, MONOSPACE
from guidata.qt.QtGui import QFont
from guidata.qt.QtGui import QGroupBox
from guidata.qt.QtGui import QHBoxLayout
from guidata.qt.QtGui import QInputDialog
from guidata.qt.QtGui import QLabel
from guidata.qt.QtGui import QListWidget
from guidata.qt.QtGui import QMenu
from guidata.qt.QtGui import QMessageBox
from guidata.qt.QtGui import QPushButton
from guidata.qt.QtGui import QSplitter
from guidata.qt.QtGui import QSystemTrayIcon
from guidata.qt.QtGui import QVBoxLayout
from guidata.qt.QtGui import QWidget
from guidata.configtools import get_family
from guidata.configtools import get_icon
from guidata.configtools import MONOSPACE
from guidata.qthelpers import get_std_icon
from spyderlib.widgets.sourcecode.codeeditor import CodeEditor
# Datadog
from common.util import get_os
from config import (get_confd_path, get_config_path, get_config,
_windows_commondata_path)
from config import _windows_commondata_path
from config import get_confd_path
from config import get_config
from config import get_config_path
EXCLUDED_WINDOWS_CHECKS = [
'cacti', 'directory', 'gearmand',

View File

@ -58,7 +58,7 @@ def merge_by_name(first, second):
def read_plugin_config_from_disk(config_dir, plugin_name):
""" Reads from the Agent on disk configuration the config for a specific plugin
"""Reads from the Agent on disk configuration the config for a specific plugin
:param config_dir: Monasca Agent configuration directory
:param plugin_name: The name of the check plugin
:return: Dictionary of parsed yaml content
@ -72,7 +72,7 @@ def read_plugin_config_from_disk(config_dir, plugin_name):
def save_plugin_config(config_dir, plugin_name, user, conf):
""" Writes configuration for plugin_name to disk in the config_dir
"""Writes configuration for plugin_name to disk in the config_dir
:param config_dir: Monasca Agent configuration directory
:param plugin_name: The name of the check plugin
:param user: The username Monasca-agent will run as
@ -88,5 +88,5 @@ def save_plugin_config(config_dir, plugin_name, user, conf):
allow_unicode=True,
default_flow_style=False))
gid = pwd.getpwnam(user).pw_gid
os.chmod(config_path, 0640)
os.chmod(config_path, 0o640)
os.chown(config_path, 0, gid)

View File

@ -1,7 +1,7 @@
from args_plugin import ArgsPlugin
from plugin import Plugin
from service_plugin import ServicePlugin
from utils import find_process_cmdline
from utils import find_process_name
from utils import watch_process
from utils import service_api_check
from args_plugin import ArgsPlugin
from service_plugin import ServicePlugin
from utils import watch_process

View File

@ -7,11 +7,10 @@ log = logging.getLogger(__name__)
class ArgsPlugin(Plugin):
""" Base plugin for detection plugins that take arguments for configuration rather than do detection."""
"""Base plugin for detection plugins that take arguments for configuration rather than do detection."""
def _build_instance(self, arg_list):
""" If a value for each arg in the arg_list was specified build it into an instance dictionary. Also check
for dimensions and add if they were specified.
"""If a value for each arg in the arg_list was specified build it into an instance dictionary. Also check for dimensions and add if they were specified.
:param arg_list: Arguments to include
:return: instance dictionary
"""
@ -24,7 +23,7 @@ class ArgsPlugin(Plugin):
return instance
def _check_required_args(self, arg_list):
""" Check that the required args were specified
"""Check that the required args were specified
:param arg_list: A list of arguments to verify were specified
:return: True if the required args exist false otherwise
"""

View File

@ -2,8 +2,8 @@
Detection classes should be platform independent
"""
import sys
import logging
import sys
log = logging.getLogger(__name__)
@ -39,8 +39,8 @@ class Plugin(object):
raise NotImplementedError
def build_config_with_name(self):
""" Builds the config and then adds a field 'built_by' to each instance in the config.
built_by is set to the plugin name
"""Builds the config and then adds a field 'built_by' to each instance in the config.
built_by is set to the plugin name
:return: An agent_config.Plugins object
"""
conf = self.build_config()

View File

@ -102,11 +102,11 @@ class Apache(monasca_setup.detection.Plugin):
log.info("\tSuccessfully setup Apache plugin.")
else:
log.warn('Unable to access the Apache server-status URL;' + error_msg)
except urllib2.URLError, e:
except urllib2.URLError as e:
log.error('\tError {0} received when accessing url {1}.'.format(e.reason, apache_url) +
'\n\tPlease ensure the Apache web server is running and your configuration ' +
'information in /root/.apache.cnf is correct.' + error_msg)
except urllib2.HTTPError, e:
except urllib2.HTTPError as e:
log.error('\tError code {0} received when accessing {1}'.format(e.code, apache_url) + error_msg)
else:
log.error('\tThe dependencies for Apache Web Server are not installed or unavailable.' + error_msg)

View File

@ -12,7 +12,7 @@ class Cinder(monasca_setup.detection.ServicePlugin):
'overwrite': overwrite,
'service_name': 'block-storage',
'process_names': ['cinder-volume', 'cinder-scheduler',
'cinder-api','cinder-backup'],
'cinder-api', 'cinder-backup'],
'service_api_url': 'http://localhost:8776/v2.0',
'search_pattern': '.*version=1.*'
}

View File

@ -8,14 +8,12 @@ log = logging.getLogger(__name__)
class Crash(Plugin):
"""
Detect if kdump is installed and enabled and setup configuration to
monitor for crash dumps.
"""Detect if kdump is installed and enabled and setup configuration to
monitor for crash dumps.
"""
def _detect(self):
"""
Run detection, set self.available True if a crash kernel is loaded.
"""Run detection, set self.available True if a crash kernel is loaded.
"""
loaded = '/sys/kernel/kexec_crash_loaded'
if os.path.isfile(loaded):
@ -24,8 +22,7 @@ class Crash(Plugin):
self.available = True
def build_config(self):
"""
Build the config as a Plugin object and return it.
"""Build the config as a Plugin object and return it.
"""
log.info('\tEnabling the Monasca crash dump healthcheck')
config = agent_config.Plugins()

View File

@ -64,7 +64,7 @@ class HAProxy(monasca_setup.detection.Plugin):
instance_config['username'] = user
if password is not None:
instance_config['password'] = password
config['haproxy'] = {'init_config': None, 'instances': [ instance_config ]}
config['haproxy'] = {'init_config': None, 'instances': [instance_config]}
return config

View File

@ -14,7 +14,7 @@ class Heat(monasca_setup.detection.ServicePlugin):
'process_names': ['heat-api', 'heat-api-cfn',
'heat-api-cloudwatch', 'heat-engine'],
'service_api_url': 'http://localhost:8004',
'search_pattern' : '.*200 OK.*',
'search_pattern': '.*200 OK.*',
}
super(Heat, self).__init__(service_params)

View File

@ -7,16 +7,16 @@ log = logging.getLogger(__name__)
class HostAlive(monasca_setup.detection.ArgsPlugin):
""" Setup an host_alive check according to the passed in args.
Despite being a detection plugin, this plugin does no detection and
will be a NOOP without arguments. Expects two space-separated
arguments, 'hostname' and 'type,' where the former is a comma-separated
list of hosts, and the latter can be either 'ssh' or 'ping'.
Examples:
"""Setup an host_alive check according to the passed in args.
Despite being a detection plugin, this plugin does no detection and
will be a NOOP without arguments. Expects two space-separated
arguments, 'hostname' and 'type,' where the former is a comma-separated
list of hosts, and the latter can be either 'ssh' or 'ping'.
Examples:
monasca-setup -d hostalive -a "hostname=remotebox type=ping"
monasca-setup -d hostalive -a "hostname=remotebox type=ping"
monasca-setup -d hostalive -a "hostname=remotebox,remotebox2 type=ssh"
monasca-setup -d hostalive -a "hostname=remotebox,remotebox2 type=ssh"
"""
def _detect(self):

View File

@ -8,10 +8,10 @@ log = logging.getLogger(__name__)
class HttpCheck(monasca_setup.detection.ArgsPlugin):
""" Setup an http_check according to the passed in args.
Despite being a detection plugin this plugin does no detection and will be a noop without arguments.
Expects space seperated arguments, the required argument is url. Optional parameters include:
disable_ssl_validation and match_pattern.
"""Setup an http_check according to the passed in args.
Despite being a detection plugin this plugin does no detection and will be a noop without arguments.
Expects space seperated arguments, the required argument is url. Optional parameters include:
disable_ssl_validation and match_pattern.
"""
def _detect(self):

View File

@ -1,12 +1,17 @@
import logging
import re
from subprocess import CalledProcessError, STDOUT
from subprocess import CalledProcessError
from subprocess import STDOUT
from monasca_setup.detection import Plugin, find_process_cmdline, watch_process
from monasca_setup.detection.utils import find_addr_listening_on_port
from monasca_setup.detection.utils import check_output
from monasca_setup import agent_config
from monasca_setup.detection import find_process_cmdline
from monasca_setup.detection import Plugin
from monasca_setup.detection import watch_process
from monasca_setup.detection.utils import check_output
from monasca_setup.detection.utils import find_addr_listening_on_port
log = logging.getLogger(__name__)
@ -67,8 +72,8 @@ class Kafka(Plugin):
@staticmethod
def _find_zookeeper_url():
""" Pull the zookeeper url the kafka config.
:return: Zookeeper url
"""Pull the zookeeper url the kafka config.
:return: Zookeeper url
"""
zk_connect = re.compile('zookeeper.connect=(.*)')
try:
@ -84,9 +89,9 @@ class Kafka(Plugin):
return match.group(1).split(',')[0] # Only use the first zk url
def _ls_zookeeper(self, path):
""" Do a ls on the given zookeeper path.
I am using the local command line kafka rather than kazoo because it doesn't make sense to
have kazoo as a dependency only for detection.
"""Do a ls on the given zookeeper path.
I am using the local command line kafka rather than kazoo because it doesn't make sense to
have kazoo as a dependency only for detection.
"""
zk_shell = ['/opt/kafka/bin/zookeeper-shell.sh', self.zk_url, 'ls', path]
try:

View File

@ -1,9 +1,10 @@
import ConfigParser
import logging
import os
import subprocess
import ConfigParser
import monasca_setup.detection
import monasca_setup.agent_config
import monasca_setup.detection
from distutils.version import LooseVersion
@ -101,9 +102,9 @@ class Libvirt(monasca_setup.detection.Plugin):
def dependencies_installed(self):
try:
import novaclient
import time
import yaml
import novaclient
# novaclient module versions were renamed in version 2.22
if novaclient.__version__ < LooseVersion("2.22"):
import novaclient.v1_1.client

View File

@ -37,7 +37,6 @@ class MKLivestatus(monasca_setup.detection.Plugin):
"""Search common Nagios/Icinga config file locations for mk_livestatus
broker module socket path
"""
socket_path = None
# Search likely Nagios/Icinga config file locations
for cfg_file in nagios_cfg_files:
if os.path.isfile(cfg_file):

View File

@ -9,7 +9,8 @@ import yaml
import monasca_setup.agent_config
import monasca_setup.detection
from monasca_setup.detection import find_process_cmdline, watch_process
from monasca_setup.detection import find_process_cmdline
from monasca_setup.detection import watch_process
log = logging.getLogger(__name__)
@ -38,7 +39,6 @@ class MonAPI(monasca_setup.detection.Plugin):
def build_config(self):
"""Build the config as a Plugins object and return."""
log.info("\tEnabling the Monasca api healthcheck")
admin_port = self.api_config['server']['adminConnectors'][0]['port']
config = monasca_setup.agent_config.Plugins()
config.merge(dropwizard_health_check('monitoring', 'api', 'http://localhost:8081/healthcheck'))

View File

@ -38,8 +38,8 @@ class MySQL(monasca_setup.detection.Plugin):
# Attempt login, requires either an empty root password from localhost
# or relying on a configured /root/.my.cnf
if self.dependencies_installed(): # ensures MySQLdb is available
import MySQLdb
import _mysql_exceptions
import MySQLdb
try:
MySQLdb.connect(read_default_file=mysql_conf)
except _mysql_exceptions.MySQLError:

View File

@ -13,7 +13,7 @@ class Ovsvapp(monasca_setup.detection.ServicePlugin):
'template_dir': template_dir,
'overwrite': overwrite,
'service_name': 'OVSvApp-ServiceVM',
'process_names': ['neutron-ovsvapp-agent', 'ovsdb-server','ovs-vswitchd'],
'process_names': ['neutron-ovsvapp-agent', 'ovsdb-server', 'ovs-vswitchd'],
'service_api_url': '',
'search_pattern': ''
}

View File

@ -17,7 +17,7 @@ class Postfix(monasca_setup.detection.Plugin):
"""
if monasca_setup.detection.find_process_cmdline('postfix') is not None:
# Test for sudo access
# TODO: don't hardcode the user. Need to get it from the arguments to monasca_setup
# TODO(craig): don't hardcode the user. Need to get it from the arguments to monasca_setup
test_sudo = os.system('sudo -l -U mon-agent find /var/spool/postfix/incoming -type f > /dev/null')
if test_sudo != 0:
log.info("Postfix found but the required sudo access is not configured.\n\t" +

View File

@ -41,7 +41,7 @@ class RabbitMQ(monasca_setup.detection.Plugin):
self.available = True
def _get_config(self):
""" Set the configuration to be used for connecting to rabbitmq
"""Set the configuration to be used for connecting to rabbitmq
:return:
"""
# Set defaults and read config or use arguments
@ -63,7 +63,7 @@ class RabbitMQ(monasca_setup.detection.Plugin):
self.exchanges = self.args.get('exchanges')
def _login_test(self):
""" Attempt to log into the rabbitmq admin api to verify credentials.
"""Attempt to log into the rabbitmq admin api to verify credentials.
:return: bool status of the test
"""
url = self.api_url + '/aliveness-test/%2F'
@ -84,7 +84,7 @@ class RabbitMQ(monasca_setup.detection.Plugin):
return False
def _read_config(self, config_file):
""" Read the configuration setting member variables as appropriate.
"""Read the configuration setting member variables as appropriate.
:param config_file: The filename of the configuration to read and parse
"""
# Read the rabbitmq config file to extract the needed variables.

View File

@ -2,8 +2,8 @@ import logging
import os
import yaml
from monasca_setup.detection import Plugin
from monasca_setup import agent_config
from monasca_setup.detection import Plugin
log = logging.getLogger(__name__)

View File

@ -62,8 +62,8 @@ class ServicePlugin(Plugin):
host, port = parsed.netloc.split(':')
listening = []
for connection in psutil.net_connections():
if connection.status == psutil.CONN_LISTEN and connection.laddr[1] == int(port):
listening.append(connection.laddr[0])
if connection.status == psutil.CONN_LISTEN and connection.laddr[1] == int(port):
listening.append(connection.laddr[0])
if len(listening) > 0:
# If not listening on localhost or ips then use another local ip
@ -75,7 +75,7 @@ class ServicePlugin(Plugin):
# Setup an active http_status check on the API
log.info("\tConfiguring an http_check for the {0} API.".format(self.service_name))
config.merge(service_api_check(self.service_name + '-api', api_url,
self.search_pattern, self.service_name))
self.search_pattern, self.service_name))
else:
log.info("\tNo process found listening on {0} ".format(port) +
"skipping setup of http_check for the {0} API." .format(self.service_name))

View File

@ -2,7 +2,9 @@
"""
import logging
import subprocess
from subprocess import Popen, PIPE, CalledProcessError
from subprocess import CalledProcessError
from subprocess import PIPE
from subprocess import Popen
import psutil

View File

@ -89,7 +89,7 @@ def main(argv=None):
def base_configuration(args):
""" Write out the primary Agent configuration and setup the service.
"""Write out the primary Agent configuration and setup the service.
:param args: Arguments from the command line
:return: None
"""
@ -203,7 +203,7 @@ def parse_arguments(parser):
def plugin_detection(plugins, template_dir, detection_args, skip_failed=True):
""" Runs the detection step for each plugin in the list and returns the complete detected agent config.
"""Runs the detection step for each plugin in the list and returns the complete detected agent config.
:param plugins: A list of detection plugin classes
:param template_dir: Location of plugin configuration templates
:param detection_args: Arguments passed to each detection plugin
@ -228,8 +228,8 @@ def plugin_detection(plugins, template_dir, detection_args, skip_failed=True):
def remove_config(args, plugin_names):
""" Parse all configuration removing any configuration built by plugins in plugin_names
Note there is no concept of overwrite for removal.
"""Parse all configuration removing any configuration built by plugins in plugin_names
Note there is no concept of overwrite for removal.
:param args: specified arguments
:param plugin_names: A list of the plugin names to remove from the config
:return: True if changes, false otherwise

View File

@ -9,8 +9,8 @@ log = logging.getLogger(__name__)
def detect_init(*args, **kwargs):
""" Detect the service manager running on this box
args/kwargs match those of service.Service
"""Detect the service manager running on this box
args/kwargs match those of service.Service
:return: The apropriate Service object for this system
"""
detected_os = platform.system()
@ -29,7 +29,7 @@ def detect_init(*args, **kwargs):
def detect_linux_init(*args, **kwargs):
""" Detect which of the linux inits is running
"""Detect which of the linux inits is running
:return: Return a valid Linux service manager object
"""
with open('/proc/1/comm', 'r') as init_proc:

View File

@ -13,10 +13,10 @@ log = logging.getLogger(__name__)
class LinuxInit(service.Service):
""" Parent class for all Linux based init systems.
"""Parent class for all Linux based init systems.
"""
def enable(self):
""" Does user/group directory creation.
"""Does user/group directory creation.
"""
# Create user/group if needed
try:
@ -29,7 +29,7 @@ class LinuxInit(service.Service):
# todo log dir is hardcoded
for path in (self.log_dir, self.config_dir, '%s/conf.d' % self.config_dir):
if not os.path.exists(path):
os.makedirs(path, 0755)
os.makedirs(path, 0o755)
os.chown(path, 0, user.pw_gid)
# the log dir needs to be writable by the user
os.chown(self.log_dir, user.pw_uid, user.pw_gid)
@ -66,7 +66,7 @@ class Systemd(LinuxInit):
service_script.write(template.read().format(prefix=self.prefix_dir, monasca_user=self.username,
config_dir=self.config_dir))
os.chown(init_path, 0, 0)
os.chmod(init_path, 0644)
os.chmod(init_path, 0o644)
# Enable the service
subprocess.check_call(['systemctl', 'daemon-reload'])
@ -128,7 +128,7 @@ class SysV(LinuxInit):
conf.write(template.read().format(prefix=self.prefix_dir, monasca_user=self.username,
config_dir=self.config_dir))
os.chown(self.init_script, 0, 0)
os.chmod(self.init_script, 0755)
os.chmod(self.init_script, 0o755)
for runlevel in ['2', '3', '4', '5']:
link_path = '/etc/rc%s.d/S10monasca-agent' % runlevel

View File

@ -12,7 +12,7 @@ log = logging.getLogger(__name__)
def discover_plugins(custom_path):
""" Find and import all detection plugins. It will look in detection/plugins dir of the code as well as custom_path
"""Find and import all detection plugins. It will look in detection/plugins dir of the code as well as custom_path
:param custom_path: An additional path to search for detection plugins
:return: A list of imported detection plugin classes.
@ -44,10 +44,9 @@ def discover_plugins(custom_path):
def select_plugins(plugin_names, plugin_list):
"""
:param plugin_names: A list of names
:param plugin_list: A list of detection plugins classes
:return: Returns a list of plugins from plugin_list that match plugin_names
""":param plugin_names: A list of names
:param plugin_list: A list of detection plugins classes
:return: Returns a list of plugins from plugin_list that match plugin_names
"""
lower_plugins = [p.lower() for p in plugin_names]
plugins = []
@ -64,8 +63,8 @@ def select_plugins(plugin_names, plugin_list):
def write_template(template_path, out_path, variables, group, is_yaml=False):
""" Write a file using a simple python string template.
Assumes 640 for the permissions and root:group for ownership.
"""Write a file using a simple python string template.
Assumes 640 for the permissions and root:group for ownership.
:param template_path: Location of the Template to use
:param out_path: Location of the file to write
:param variables: dictionary with key/value pairs to use in writing the template
@ -85,4 +84,4 @@ def write_template(template_path, out_path, variables, group, is_yaml=False):
else:
conf.write(contents)
os.chown(out_path, 0, group)
os.chmod(out_path, 0640)
os.chmod(out_path, 0o640)

View File

@ -1,2 +1,3 @@
hacking>=0.9.2,<0.10
hacking>=0.10.2
flake8>=2.2.4
nose==1.3.0

View File

@ -21,11 +21,14 @@ commands = {posargs}
[flake8]
max-line-length = 120
max-complexity = 30
# TODO: ignored checks should be enabled in the future
# E501 Line length > 80 characters
# F401 module imported but unused
# H302 import only modules
# H904 Wrap long lines in parentheses instead of a backslash (DEPRECATED)
ignore = E501, F401, H302, H904,
# H405 Multiline docstring separated by empty line
# H105 Don't use author tags
ignore = E501,F401,H302,H904,H803,H405,H105
show-source = True
exclude=.venv,.git,.tox,dist,*egg,build
exclude=.venv,.git,.tox,dist,*egg,build,tests