Initial tox setup and misc pep8 fixes.

Change-Id: I433900b95b5e223c359e9cb55565655810139177
This commit is contained in:
Jonathan Halterman 2014-07-17 17:08:36 -07:00
parent f9b319e9ad
commit 6b10dcd350
43 changed files with 440 additions and 233 deletions

1
.gitignore vendored
View File

@ -11,3 +11,4 @@ mon-agent_4.2.0-0_all.deb
artifacts
root
mon_agent.egg-info
.tox/

View File

@ -282,8 +282,11 @@ class AgentCheck(object):
self.hostname = get_hostname(agent_config)
self.log = logging.getLogger('%s.%s' % (__name__, name))
self.aggregator = MetricsAggregator(self.hostname,
recent_point_threshold=agent_config.get('recent_point_threshold', None))
self.aggregator = MetricsAggregator(
self.hostname,
recent_point_threshold=agent_config.get(
'recent_point_threshold',
None))
self.events = []
self.instances = instances or []

View File

@ -163,14 +163,20 @@ class Dogstream(object):
# Build our tail -f
if self._gen is None:
self._gen = TailFile(self.logger, self.log_path, self._line_parser).tail(line_by_line=False,
move_end=move_end)
self._gen = TailFile(
self.logger,
self.log_path,
self._line_parser).tail(
line_by_line=False,
move_end=move_end)
# read until the end of file
try:
self._gen.next()
self.logger.debug("Done dogstream check for file %s, found %s metric points" % (self.log_path,
len(self._values)))
self.logger.debug(
"Done dogstream check for file %s, found %s metric points" %
(self.log_path, len(
self._values)))
except StopIteration as e:
self.logger.exception(e)
self.logger.warn("Can't tail %s file" % self.log_path)

View File

@ -58,8 +58,12 @@ class Disk(Check):
# parse into a list of Measurements
stats.update(inodes)
timestamp = time.time()
measurements = [Measurement(key.split('.', 1)[1], timestamp, value, {'device': key.split('.', 1)[0]})
for key, value in stats.iteritems()]
measurements = [
Measurement(
key.split(
'.', 1)[1], timestamp, value, {
'device': key.split(
'.', 1)[0]}) for key, value in stats.iteritems()]
return measurements
except Exception:
@ -301,7 +305,8 @@ class IO(Check):
# sdb 0.00 0.00 0.00 2.97 0.00 11.88 8.00 0.00 0.00 0.00 0.00
# sdg 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
# sdf 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
# md0 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
# md0 0.00 0.00 0.00 0.00 0.00 0.00 0.00
# 0.00 0.00 0.00 0.00
io.update(self._parse_linux2(stdout))
elif sys.platform == "sunos5":
@ -708,7 +713,8 @@ class Memory(Check):
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:snaptime 16787393.9439095
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:swap 91828224 <--
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:swapcap 1073741824 <--
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:zonename 53aa9b7e-48ba-4152-a52b-a6368c3d9e7c
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:zonename
# 53aa9b7e-48ba-4152-a52b-a6368c3d9e7c
# turn memory_cap:360:zone_name:key value
# into { "key": value, ...}
@ -822,8 +828,14 @@ class Cpu(Check):
elif sys.platform == 'darwin':
# generate 3 seconds of data
# [' disk0 disk1 cpu load average', ' KB/t tps MB/s KB/t tps MB/s us sy id 1m 5m 15m', ' 21.23 13 0.27 17.85 7 0.13 14 7 79 1.04 1.27 1.31', ' 4.00 3 0.01 5.00 8 0.04 12 10 78 1.04 1.27 1.31', '']
iostats = sp.Popen(
['iostat', '-C', '-w', '3', '-c', '2'], stdout=sp.PIPE, close_fds=True).communicate()[0]
iostats = sp.Popen(['iostat',
'-C',
'-w',
'3',
'-c',
'2'],
stdout=sp.PIPE,
close_fds=True).communicate()[0]
lines = [l for l in iostats.split("\n") if len(l) > 0]
legend = [l for l in lines if "us" in l]
if len(legend) == 1:
@ -836,8 +848,13 @@ class Cpu(Check):
cpu_st = 0
return format_results(cpu_user, cpu_sys, cpu_wait, cpu_idle, cpu_st)
else:
self.logger.warn("Expected to get at least 4 lines of data from iostat instead of just " +
str(iostats[: max(80, len(iostats))]))
self.logger.warn(
"Expected to get at least 4 lines of data from iostat instead of just " +
str(
iostats[
: max(
80,
len(iostats))]))
return {}
elif sys.platform.startswith("freebsd"):
@ -864,8 +881,13 @@ class Cpu(Check):
cpu_user + cpu_nice, cpu_sys + cpu_intr, cpu_wait, cpu_idle, cpu_stol)
else:
self.logger.warn("Expected to get at least 4 lines of data from iostat instead of just " +
str(iostats[:max(80, len(iostats))]))
self.logger.warn(
"Expected to get at least 4 lines of data from iostat instead of just " +
str(
iostats[
:max(
80,
len(iostats))]))
return {}
elif sys.platform == 'sunos5':

View File

@ -218,8 +218,9 @@ class IO(Check):
try:
disk = w.Win32_PerfFormattedData_PerfDisk_LogicalDisk()
except AttributeError:
self.logger.info('Missing Win32_PerfFormattedData_PerfDisk_LogicalDiskUnable WMI class.' +
' No I/O metrics will be returned.')
self.logger.info(
'Missing Win32_PerfFormattedData_PerfDisk_LogicalDiskUnable WMI class.' +
' No I/O metrics will be returned.')
return
for device in disk:

View File

@ -76,8 +76,9 @@ class Apache(AgentCheck):
self.warning("Assuming url was not correct. Trying to add ?auto suffix to the url")
self.check(instance)
else:
raise Exception("No metrics were fetched for this instance. Make sure that %s is the proper url."
% instance['apache_status_url'])
raise Exception(
"No metrics were fetched for this instance. Make sure that %s is the proper url." %
instance['apache_status_url'])
@staticmethod
def parse_agent_config(agentConfig):

View File

@ -54,11 +54,19 @@ class DirectoryCheck(AgentCheck):
directory_bytes += file_stat.st_size
# file specific metrics
self.histogram(
"system.disk.directory.file.bytes", file_stat.st_size, dimensions=dimensions)
self.histogram("system.disk.directory.file.modified_sec_ago", time.time() - file_stat.st_mtime,
dimensions=dimensions)
self.histogram("system.disk.directory.file.created_sec_ago", time.time() - file_stat.st_ctime,
dimensions=dimensions)
"system.disk.directory.file.bytes",
file_stat.st_size,
dimensions=dimensions)
self.histogram(
"system.disk.directory.file.modified_sec_ago",
time.time() -
file_stat.st_mtime,
dimensions=dimensions)
self.histogram(
"system.disk.directory.file.created_sec_ago",
time.time() -
file_stat.st_ctime,
dimensions=dimensions)
# os.walk gives us all sub-directories and their files
# if we do not want to do this recursively and just want

View File

@ -127,7 +127,8 @@ class Docker(AgentCheck):
if not instance.get("exclude") or not instance.get("include"):
if len(containers) > max_containers:
self.warning(
"Too many containers to collect. Please refine the containers to collect by editing the configuration file. Truncating to %s containers" % max_containers)
"Too many containers to collect. Please refine the containers to collect by editing the configuration file. Truncating to %s containers" %
max_containers)
containers = containers[:max_containers]
collected_containers = 0
@ -228,7 +229,8 @@ class Docker(AgentCheck):
fp = open(file_)
except IOError:
raise IOError(
"Can't open %s. If you are using Docker 0.9.0 or higher, the Datadog agent is not yet compatible with these versions. Please get in touch with Datadog Support for more information" % file_)
"Can't open %s. If you are using Docker 0.9.0 or higher, the Datadog agent is not yet compatible with these versions. Please get in touch with Datadog Support for more information" %
file_)
return dict(map(lambda x: x.split(), fp.read().splitlines()))
finally:

View File

@ -182,13 +182,13 @@ class ElasticSearch(AgentCheck):
self.NODES_URL = "/_nodes?network=true"
additional_metrics = {
"elasticsearch.search.fetch.open_contexts": ("gauge", "indices.search.open_contexts"),
"elasticsearch.cache.filter.evictions": ("gauge", "indices.filter_cache.evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.filter_cache.memory_size_in_bytes"),
"elasticsearch.id_cache.size": ("gauge", "indices.id_cache.memory_size_in_bytes"),
"elasticsearch.fielddata.size": ("gauge", "indices.fielddata.memory_size_in_bytes"),
"elasticsearch.fielddata.evictions": ("gauge", "indices.fielddata.evictions")
}
"elasticsearch.search.fetch.open_contexts": (
"gauge", "indices.search.open_contexts"), "elasticsearch.cache.filter.evictions": (
"gauge", "indices.filter_cache.evictions"), "elasticsearch.cache.filter.size": (
"gauge", "indices.filter_cache.memory_size_in_bytes"), "elasticsearch.id_cache.size": (
"gauge", "indices.id_cache.memory_size_in_bytes"), "elasticsearch.fielddata.size": (
"gauge", "indices.fielddata.memory_size_in_bytes"), "elasticsearch.fielddata.evictions": (
"gauge", "indices.fielddata.evictions")}
else:
# ES version 0.90.9 and below
@ -197,19 +197,19 @@ class ElasticSearch(AgentCheck):
self.NODES_URL = "/_cluster/nodes?network=true"
additional_metrics = {
"elasticsearch.cache.field.evictions": ("gauge", "indices.cache.field_evictions"),
"elasticsearch.cache.field.size": ("gauge", "indices.cache.field_size_in_bytes"),
"elasticsearch.cache.filter.count": ("gauge", "indices.cache.filter_count"),
"elasticsearch.cache.filter.evictions": ("gauge", "indices.cache.filter_evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.cache.filter_size_in_bytes"),
"elasticsearch.thread_pool.cache.active": ("gauge", "thread_pool.cache.active"),
"elasticsearch.thread_pool.cache.threads": ("gauge", "thread_pool.cache.threads"),
"elasticsearch.thread_pool.cache.queue": ("gauge", "thread_pool.cache.queue"),
"jvm.gc.collection_count": ("gauge", "jvm.gc.collection_count"),
"jvm.gc.collection_time": ("gauge", "jvm.gc.collection_time_in_millis", lambda v: float(v) / 1000),
"jvm.gc.copy.count": ("gauge", "jvm.gc.collectors.Copy.collection_count"),
"jvm.gc.copy.collection_time": ("gauge", "jvm.gc.collectors.Copy.collection_time_in_millis", lambda v: float(v) / 1000)
}
"elasticsearch.cache.field.evictions": (
"gauge", "indices.cache.field_evictions"), "elasticsearch.cache.field.size": (
"gauge", "indices.cache.field_size_in_bytes"), "elasticsearch.cache.filter.count": (
"gauge", "indices.cache.filter_count"), "elasticsearch.cache.filter.evictions": (
"gauge", "indices.cache.filter_evictions"), "elasticsearch.cache.filter.size": (
"gauge", "indices.cache.filter_size_in_bytes"), "elasticsearch.thread_pool.cache.active": (
"gauge", "thread_pool.cache.active"), "elasticsearch.thread_pool.cache.threads": (
"gauge", "thread_pool.cache.threads"), "elasticsearch.thread_pool.cache.queue": (
"gauge", "thread_pool.cache.queue"), "jvm.gc.collection_count": (
"gauge", "jvm.gc.collection_count"), "jvm.gc.collection_time": (
"gauge", "jvm.gc.collection_time_in_millis", lambda v: float(v) / 1000), "jvm.gc.copy.count": (
"gauge", "jvm.gc.collectors.Copy.collection_count"), "jvm.gc.copy.collection_time": (
"gauge", "jvm.gc.collectors.Copy.collection_time_in_millis", lambda v: float(v) / 1000)}
self.METRICS.update(additional_metrics)

View File

@ -19,8 +19,9 @@ class Gearman(AgentCheck):
try:
import gearman
except ImportError:
raise Exception("Cannot import Gearman module. Check the instructions to install" +
"this module at https://app.datadoghq.com/account/settings#integrations/gearman")
raise Exception(
"Cannot import Gearman module. Check the instructions to install" +
"this module at https://app.datadoghq.com/account/settings#integrations/gearman")
self.log.debug("Connecting to gearman at address %s:%s" % (host, port))
return gearman.GearmanAdminClient(["%s:%s" % (host, port)])

View File

@ -89,8 +89,9 @@ class IIS(AgentCheck):
for metric, mtype, wmi_val in self.METRICS:
if not hasattr(iis_site, wmi_val):
self.warning('Unable to fetch metric %s. Missing %s in Win32_PerfFormattedData_W3SVC_WebService'
% (metric, wmi_val))
self.warning(
'Unable to fetch metric %s. Missing %s in Win32_PerfFormattedData_W3SVC_WebService' %
(metric, wmi_val))
continue
# Submit the metric value with the correct type

View File

@ -133,4 +133,5 @@ consumer_groups:
except Exception as e:
self.log.exception(e)
raise Exception(
'Could not parse %s. Must be in the form of `host0:port0,host1:port1,host2:port2`' % val)
'Could not parse %s. Must be in the form of `host0:port0,host1:port1,host2:port2`' %
val)

View File

@ -113,15 +113,16 @@ class Lighttpd(AgentCheck):
if metric_count == 0:
url_suffix = self.URL_SUFFIX_PER_VERSION[server_version]
if self.assumed_url.get(
instance['lighttpd_status_url'], None) is None and url[-len(url_suffix):] != url_suffix:
if self.assumed_url.get(instance['lighttpd_status_url'],
None) is None and url[-len(url_suffix):] != url_suffix:
self.assumed_url[instance['lighttpd_status_url']] = '%s%s' % (url, url_suffix)
self.warning(
"Assuming url was not correct. Trying to add %s suffix to the url" % url_suffix)
self.check(instance)
else:
raise Exception("No metrics were fetched for this instance. Make sure that %s is the proper url." % instance[
'lighttpd_status_url'])
raise Exception(
"No metrics were fetched for this instance. Make sure that %s is the proper url." %
instance['lighttpd_status_url'])
def _get_server_version(self, headers):
for h in headers:

View File

@ -215,7 +215,8 @@ class MongoDb(AgentCheck):
status['stats'] = db.command('dbstats')
# Handle replica data, if any
# See http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus
# See
# http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus
try:
data = {}
@ -238,8 +239,8 @@ class MongoDb(AgentCheck):
if hasattr(lag, 'total_seconds'):
data['replicationLag'] = lag.total_seconds()
else:
data['replicationLag'] = (lag.microseconds +
(lag.seconds + lag.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6
data['replicationLag'] = (
lag.microseconds + (lag.seconds + lag.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6
if current is not None:
data['health'] = current['health']

View File

@ -93,8 +93,9 @@ class MySql(AgentCheck):
try:
import MySQLdb
except ImportError:
raise Exception("Cannot import MySQLdb module. Check the instructions "
"to install this module at https://app.datadoghq.com/account/settings#integrations/mysql")
raise Exception(
"Cannot import MySQLdb module. Check the instructions "
"to install this module at https://app.datadoghq.com/account/settings#integrations/mysql")
if defaults_file != '':
db = MySQLdb.connect(read_default_file=defaults_file)
@ -156,8 +157,11 @@ class MySql(AgentCheck):
else:
slave_running = 0
self.gauge("mysql.replication.slave_running", slave_running, dimensions=dimensions)
self._collect_dict(GAUGE, {"Seconds_behind_master": "mysql.replication.seconds_behind_master"},
"SHOW SLAVE STATUS", db, dimensions=dimensions)
self._collect_dict(GAUGE,
{"Seconds_behind_master": "mysql.replication.seconds_behind_master"},
"SHOW SLAVE STATUS",
db,
dimensions=dimensions)
def _rate_or_gauge_statuses(self, statuses, dbResults, dimensions):
for status, metric in statuses.iteritems():
@ -326,7 +330,12 @@ class MySql(AgentCheck):
if pid is None:
try:
if sys.platform.startswith("linux"):
ps = subprocess.Popen(['ps', '-C', 'mysqld', '-o', 'pid'], stdout=subprocess.PIPE,
ps = subprocess.Popen(['ps',
'-C',
'mysqld',
'-o',
'pid'],
stdout=subprocess.PIPE,
close_fds=True).communicate()[0]
pslines = ps.strip().split('\n')
# First line is header, second line is mysql pid

View File

@ -57,7 +57,11 @@ class WrapNagios(ServicesCheck):
if last_run_path.endswith('/') is False:
last_run_path += '/'
last_run_file = (
last_run_path + 'nagios_wrapper_' + hashlib.md5(instance['service_name']).hexdigest() + '.pck')
last_run_path +
'nagios_wrapper_' +
hashlib.md5(
instance['service_name']).hexdigest() +
'.pck')
# Load last-run data from shared memory file
last_run_data = {}

View File

@ -158,7 +158,8 @@ class Network(AgentCheck):
# face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
# lo:45890956 112797 0 0 0 0 0 0 45890956 112797 0 0 0 0 0 0
# eth0:631947052 1042233 0 19 0 184 0 1206 1208625538 1320529 0 0 0 0 0 0
# eth1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
# eth1: 0 0 0 0 0 0 0 0
# 0 0 0 0 0 0 0 0
for l in lines[2:]:
cols = l.split(':', 1)
x = cols[1].split()
@ -196,7 +197,8 @@ class Network(AgentCheck):
# ham0 1404 <Link#6> 7a:79:05:4d:bf:f5 30100 0 6815204 18742 0 8494811 0
# ham0 1404 5 5.77.191.245 30100 - 6815204 18742 - 8494811 -
# ham0 1404 seneca.loca fe80:6::7879:5ff: 30100 - 6815204 18742 - 8494811 -
# ham0 1404 2620:9b::54 2620:9b::54d:bff5 30100 - 6815204 18742 - 8494811 -
# ham0 1404 2620:9b::54 2620:9b::54d:bff5 30100 - 6815204
# 18742 - 8494811 -
lines = netstat.split("\n")
headers = lines[0].split()

View File

@ -63,7 +63,7 @@ class ProcessCheck(AgentCheck):
except psutil.NoSuchProcess:
self.warning('Process disappeared while scanning')
pass
except psutil.AccessDenied, e:
except psutil.AccessDenied as e:
self.log.error('Access denied to %s process'
% string)
self.log.error('Error: %s' % e)

View File

@ -157,7 +157,8 @@ class RabbitMQ(AgentCheck):
if len(data) > max_detailed:
# Display a warning in the info page
self.warning(
"Too many queues to fetch. You must choose the %s you are interested in by editing the rabbitmq.yaml configuration file or get in touch with Datadog Support" % object_type)
"Too many queues to fetch. You must choose the %s you are interested in by editing the rabbitmq.yaml configuration file or get in touch with Datadog Support" %
object_type)
for data_line in data[:max_detailed]:
# We truncate the list of nodes/queues if it's above the limit

View File

@ -149,10 +149,12 @@ class Redis(AgentCheck):
# See https://github.com/DataDog/dd-agent/issues/374 for details
import redis
raise Exception("""Unable to run the info command. This is probably an issue with your version of the python-redis library.
raise Exception(
"""Unable to run the info command. This is probably an issue with your version of the python-redis library.
Minimum required version: 2.4.11
Your current version: %s
Please upgrade to a newer version by running sudo easy_install redis""" % redis.__version__)
Please upgrade to a newer version by running sudo easy_install redis""" %
redis.__version__)
latency_ms = round((time.time() - start) * 1000, 2)
self.gauge('redis.info.latency_ms', latency_ms, dimensions=dimensions)

View File

@ -131,14 +131,21 @@ class TCPCheck(ServicesCheck):
alert_type = "error"
msg = """%s %s %s reported that %s (%s:%s) failed %s time(s) within %s last attempt(s).
Last error: %s""" % (notify_message,
custom_message, self.hostname, name, host, port, nb_failures, nb_tries, msg)
custom_message,
self.hostname,
name,
host,
port,
nb_failures,
nb_tries,
msg)
event_type = EventType.DOWN
else: # Status is UP
title = "[Recovered] %s reported that %s is up" % (self.hostname, name)
alert_type = "success"
msg = "%s %s %s reported that %s (%s:%s) recovered." % (notify_message,
custom_message, self.hostname, name, host, port)
msg = "%s %s %s reported that %s (%s:%s) recovered." % (
notify_message, custom_message, self.hostname, name, host, port)
event_type = EventType.UP
return {

View File

@ -64,7 +64,8 @@ class WMICheck(AgentCheck):
def _extract_metrics(self, results, metrics, tag_by):
if len(results) > 1 and tag_by is None:
raise Exception(
'WMI query returned multiple rows but no `tag_by` value was given. metrics=%s' % metrics)
'WMI query returned multiple rows but no `tag_by` value was given. metrics=%s' %
metrics)
for wmi_property, name, mtype in metrics:
for res in results:

View File

@ -61,7 +61,8 @@ class Zookeeper(AgentCheck):
if num_reads > max_reads:
# Safeguard against an infinite loop
raise Exception(
"Read %s bytes before exceeding max reads of %s. " % (buf.tell(), max_reads))
"Read %s bytes before exceeding max reads of %s. " %
(buf.tell(), max_reads))
chunk = sock.recv(chunk_size)
buf.write(chunk)
num_reads += 1

View File

@ -322,7 +322,13 @@ def main():
checks_list = args[2:]
confd_directory = get_confd_path(get_os())
should_run = JMXFetch.init(
confd_directory, agentConfig, get_logging_config(), 15, jmx_command, checks_list, reporter="console")
confd_directory,
agentConfig,
get_logging_config(),
15,
jmx_command,
checks_list,
reporter="console")
if not should_run:
print "Couldn't find any valid JMX configuration in your conf.d directory: %s" % confd_directory
print "Have you enabled any JMX check ?"

View File

@ -43,8 +43,7 @@ JMX_LIST_COMMANDS = {
'list_matching_attributes': 'List attributes that match at least one of your instances configuration',
'list_not_matching_attributes': "List attributes that don't match any of your instances configuration",
'list_limited_attributes': "List attributes that do match one of your instances configuration but that are not being collected because it would exceed the number of metrics that can be collected",
JMX_COLLECT_COMMAND: "Start the collection of metrics based on your current configuration and display them in the console"
}
JMX_COLLECT_COMMAND: "Start the collection of metrics based on your current configuration and display them in the console"}
PYTHON_JMX_STATUS_FILE = 'jmx_status_python.yaml'
@ -189,8 +188,8 @@ class JMXFetch(object):
if conf is None:
log.warning(
"%s doesn't have a 'conf' section. Only basic JVM metrics will be collected. %s" % (
inst, LINK_TO_DOC))
"%s doesn't have a 'conf' section. Only basic JVM metrics will be collected. %s" %
(inst, LINK_TO_DOC))
else:
if not isinstance(conf, list) or len(conf) == 0:
raise InvalidJMXConfiguration(
@ -200,7 +199,8 @@ class JMXFetch(object):
include = config.get('include', None)
if include is None:
raise InvalidJMXConfiguration(
"Each configuration must have an 'include' section. %s" % LINK_TO_DOC)
"Each configuration must have an 'include' section. %s" %
LINK_TO_DOC)
if not isinstance(include, dict):
raise InvalidJMXConfiguration(

View File

@ -61,13 +61,30 @@ class Aggregator(object):
return 0
return round(float(self.count) / interval, 2)
def submit_metric(self, name, value, mtype, dimensions=None, hostname=None, device_name=None, timestamp=None,
sample_rate=1):
def submit_metric(
self,
name,
value,
mtype,
dimensions=None,
hostname=None,
device_name=None,
timestamp=None,
sample_rate=1):
""" Add a metric to be aggregated """
raise NotImplementedError()
def event(self, title, text, date_happened=None, alert_type=None, aggregation_key=None, source_type_name=None,
priority=None, dimensions=None, hostname=None):
def event(
self,
title,
text,
date_happened=None,
alert_type=None,
aggregation_key=None,
source_type_name=None,
priority=None,
dimensions=None,
hostname=None):
event = {
'msg_title': title,
'msg_text': text,
@ -214,8 +231,11 @@ class MetricsBucketAggregator(Aggregator):
not_sampled_in_this_bucket.pop(context, None)
# We need to account for Metrics that have not expired and were not
# flushed for this bucket
self.create_empty_metrics(not_sampled_in_this_bucket, expiry_timestamp, bucket_start_timestamp,
metrics)
self.create_empty_metrics(
not_sampled_in_this_bucket,
expiry_timestamp,
bucket_start_timestamp,
metrics)
del self.metric_by_bucket[bucket_start_timestamp]
else:

View File

@ -649,9 +649,11 @@ def get_jmx_status():
jmx_checks = java_jmx_stats.get('checks', {})
if status_age > 60:
check_statuses.append(CheckStatus("jmx", [InstanceStatus(0, STATUS_ERROR,
error="JMXfetch didn't return any metrics during the last minute")],
0, 0))
check_statuses.append(
CheckStatus(
"jmx", [
InstanceStatus(
0, STATUS_ERROR, error="JMXfetch didn't return any metrics during the last minute")], 0, 0))
else:
for check_name, instances in jmx_checks.get('failed_checks', {}).iteritems():
@ -660,8 +662,12 @@ def get_jmx_status():
metric_count = info.get('metric_count', 0)
status = info.get('status')
instance_name = info.get('instance_name', None)
check_data[check_name]['statuses'].append(get_jmx_instance_status(instance_name, status,
message, metric_count))
check_data[check_name]['statuses'].append(
get_jmx_instance_status(
instance_name,
status,
message,
metric_count))
check_data[check_name]['metric_count'].append(metric_count)
for check_name, instances in jmx_checks.get('initialized_checks', {}).iteritems():
@ -670,8 +676,12 @@ def get_jmx_status():
metric_count = info.get('metric_count', 0)
status = info.get('status')
instance_name = info.get('instance_name', None)
check_data[check_name]['statuses'].append(get_jmx_instance_status(instance_name, status,
message, metric_count))
check_data[check_name]['statuses'].append(
get_jmx_instance_status(
instance_name,
status,
message,
metric_count))
check_data[check_name]['metric_count'].append(metric_count)
for check_name, data in check_data.iteritems():

View File

@ -179,7 +179,8 @@ def get_config_path(cfg_path=None, os_name=None):
# If all searches fail, exit the agent with an error
sys.stderr.write(
"Please supply a configuration file at %s or in the directory where the Agent is currently deployed.\n" % bad_path)
"Please supply a configuration file at %s or in the directory where the Agent is currently deployed.\n" %
bad_path)
sys.exit(3)
@ -421,7 +422,8 @@ def get_proxy(agent_config, use_system_settings=False):
except Exception as e:
log.debug(
"Error while trying to fetch proxy settings using urllib %s. Proxy is probably not set" % str(e))
"Error while trying to fetch proxy settings using urllib %s. Proxy is probably not set" %
str(e))
log.debug("No proxy configured")
@ -530,7 +532,8 @@ def load_check_directory(agent_config):
confd_path = get_confd_path(osname)
except PathNotFound as e:
log.error(
"No conf.d folder found at '%s' or in the directory where the Agent is currently deployed.\n" % e.args[0])
"No conf.d folder found at '%s' or in the directory where the Agent is currently deployed.\n" %
e.args[0])
sys.exit(3)
# Start JMXFetch if needed
@ -821,7 +824,11 @@ def initialize_logging(logger_name):
try:
from logging.handlers import NTEventLogHandler
nt_event_handler = NTEventLogHandler(
logger_name, get_win32service_file('windows', 'win32service.pyd'), 'Application')
logger_name,
get_win32service_file(
'windows',
'win32service.pyd'),
'Application')
nt_event_handler.setFormatter(
logging.Formatter(get_syslog_format(logger_name), get_log_date_format()))
nt_event_handler.setLevel(logging.ERROR)

View File

@ -214,8 +214,13 @@ class TransactionManager(object):
def tr_error(self, tr):
tr.inc_error_count()
tr.compute_next_flush(self._MAX_WAIT_FOR_REPLAY)
log.warn("Transaction %d in error (%s error%s), it will be replayed after %s" %
(tr.get_id(), tr.get_error_count(), plural(tr.get_error_count()), tr.get_next_flush()))
log.warn(
"Transaction %d in error (%s error%s), it will be replayed after %s" %
(tr.get_id(),
tr.get_error_count(),
plural(
tr.get_error_count()),
tr.get_next_flush()))
def tr_success(self, tr):
log.debug("Transaction %d completed" % tr.get_id())

View File

@ -95,8 +95,12 @@ def init_monstatsd(config_path=None, use_watchdog=False):
# server and reporting threads.
assert 0 < interval
aggregator = MetricsBucketAggregator(hostname, aggregator_interval,
recent_point_threshold=c.get('recent_point_threshold', None))
aggregator = MetricsBucketAggregator(
hostname,
aggregator_interval,
recent_point_threshold=c.get(
'recent_point_threshold',
None))
# Start the reporting thread.
reporter = Reporter(interval, aggregator, target, use_watchdog, event_chunk_size)

View File

@ -91,11 +91,17 @@ class Reporter(threading.Thread):
log_func = log.info
if not should_log:
log_func = log.debug
log_func("Flush #%s: flushed %s metric%s and %s event%s" % (self.flush_count, count, plural(count),
event_count, plural(event_count)))
log_func(
"Flush #%s: flushed %s metric%s and %s event%s" %
(self.flush_count,
count,
plural(count),
event_count,
plural(event_count)))
if self.flush_count == FLUSH_LOGGING_INITIAL:
log.info("First flushes done, %s flushes will be logged every %s flushes." % (FLUSH_LOGGING_COUNT,
FLUSH_LOGGING_PERIOD))
log.info(
"First flushes done, %s flushes will be logged every %s flushes." %
(FLUSH_LOGGING_COUNT, FLUSH_LOGGING_PERIOD))
# Persist a status message.
packet_count = self.aggregator.total_count

View File

@ -32,8 +32,9 @@ class Server(object):
if forward_to_port is None:
forward_to_port = 8125
log.info("External statsd forwarding enabled. All packets received will be forwarded to %s:%s" %
(forward_to_host, forward_to_port))
log.info(
"External statsd forwarding enabled. All packets received will be forwarded to %s:%s" %
(forward_to_host, forward_to_port))
try:
self.forward_udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.forward_udp_sock.connect((forward_to_host, forward_to_port))
@ -55,9 +56,16 @@ class Server(object):
text_length = int(text_length[:-1])
event = {
'title': metadata[:title_length],
'text': (metadata[title_length + 1:title_length + text_length + 1]).replace('\\n', '\n')
}
'title': metadata[
:title_length],
'text': (
metadata[
title_length +
1:title_length +
text_length +
1]).replace(
'\\n',
'\n')}
meta = metadata[title_length + text_length + 1:]
for m in meta.split('|')[1:]:
if m[0] == u't':

View File

@ -129,8 +129,8 @@ class DatadogConf(EditorFile):
def check_api_key(self, editor):
if self.api_key is None:
api_key, ok = QInputDialog.getText(None, "Add your API KEY",
"You must first set your api key in this file. You can find it here: https://app.datadoghq.com/account/settings#api")
api_key, ok = QInputDialog.getText(
None, "Add your API KEY", "You must first set your api key in this file. You can find it here: https://app.datadoghq.com/account/settings#api")
if ok and api_key:
new_content = []
for line in self.content.splitlines():
@ -334,8 +334,14 @@ class MainWindow(QSplitter):
lambda: self.properties.set_log_file(self.log_file))
self.manager_menu = Menu(self)
self.connect(self.properties.menu_button, SIGNAL("clicked()"),
lambda: self.manager_menu.popup(self.properties.menu_button.mapToGlobal(QPoint(0, 0))))
self.connect(
self.properties.menu_button,
SIGNAL("clicked()"),
lambda: self.manager_menu.popup(
self.properties.menu_button.mapToGlobal(
QPoint(
0,
0))))
listwidget.setCurrentRow(0)

14
requirements.txt Executable file
View File

@ -0,0 +1,14 @@
requests
gearman
httplib2
ntplib
pymongo
pylint
psutil
python-memcached
PyYAML
redis
simplejson
supervisor
tornado
python-monclient

2
test-requirements.txt Executable file
View File

@ -0,0 +1,2 @@
nose==1.3.0
pep8==1.5.6

View File

@ -14,19 +14,22 @@ class TestCassandraDogstream(unittest.TestCase):
@attr('cassandra')
def testStart(self):
events = parse_cassandra(
logger, " INFO [main] 2012-12-11 21:46:26,995 StorageService.java (line 687) Bootstrap/Replace/Move completed! Now serving reads.")
logger,
" INFO [main] 2012-12-11 21:46:26,995 StorageService.java (line 687) Bootstrap/Replace/Move completed! Now serving reads.")
self.assertTrue(events is None)
@attr('cassandra')
def testInfo(self):
events = parse_cassandra(
logger, " INFO [CompactionExecutor:35] 2012-12-02 21:15:03,738 AutoSavingCache.java (line 268) Saved KeyCache (5 items) in 3 ms")
logger,
" INFO [CompactionExecutor:35] 2012-12-02 21:15:03,738 AutoSavingCache.java (line 268) Saved KeyCache (5 items) in 3 ms")
self.assertTrue(events is None)
@attr('cassandra')
def testWarn(self):
events = parse_cassandra(
logger, " WARN [MemoryMeter:1] 2012-12-03 20:07:47,158 Memtable.java (line 197) setting live ratio to minimum of 1.0 instead of 0.9416553595658074")
logger,
" WARN [MemoryMeter:1] 2012-12-03 20:07:47,158 Memtable.java (line 197) setting live ratio to minimum of 1.0 instead of 0.9416553595658074")
self.assertTrue(events is None)
@attr('cassandra')
@ -61,16 +64,28 @@ java.util.concurrent.RejectedExecutionException
@attr('cassandra')
def testCompactionStart(self):
events = parse_cassandra(
logger, " INFO [CompactionExecutor:2] 2012-12-11 21:46:27,012 CompactionTask.java (line 109) Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-11-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-9-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-12-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-10-Data.db')]")
self.assertEquals(events, [{'alert_type': 'info', 'event_type': 'cassandra.compaction', 'timestamp': 1355262387, 'msg_title': "Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-1", 'msg_text':
"Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-11-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-9-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-12-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-10-Data.db')]", 'auto_priority': 0}])
logger,
" INFO [CompactionExecutor:2] 2012-12-11 21:46:27,012 CompactionTask.java (line 109) Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-11-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-9-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-12-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-10-Data.db')]")
self.assertEquals(events,
[{'alert_type': 'info',
'event_type': 'cassandra.compaction',
'timestamp': 1355262387,
'msg_title': "Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-1",
'msg_text': "Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-11-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-9-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-12-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-10-Data.db')]",
'auto_priority': 0}])
@attr('cassandra')
def testCompactionEnd(self):
events = parse_cassandra(
logger, "INFO [CompactionExecutor:2] 2012-12-11 21:46:27,095 CompactionTask.java (line 221) Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 to 583 (~66% of original) bytes for 4 keys at 0.007831MB/s. Time: 71ms.")
self.assertEquals(events, [{'alert_type': 'info', 'event_type': 'cassandra.compaction', 'timestamp': 1355262387, 'msg_title': 'Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 ',
'msg_text': 'Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 to 583 (~66% of original) bytes for 4 keys at 0.007831MB/s. Time: 71ms.', 'auto_priority': 0}])
logger,
"INFO [CompactionExecutor:2] 2012-12-11 21:46:27,095 CompactionTask.java (line 221) Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 to 583 (~66% of original) bytes for 4 keys at 0.007831MB/s. Time: 71ms.")
self.assertEquals(events,
[{'alert_type': 'info',
'event_type': 'cassandra.compaction',
'timestamp': 1355262387,
'msg_title': 'Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 ',
'msg_text': 'Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 to 583 (~66% of original) bytes for 4 keys at 0.007831MB/s. Time: 71ms.',
'auto_priority': 0}])
if __name__ == '__main__':
unittest.main()

View File

@ -101,8 +101,11 @@ class TestCore(unittest.TestCase):
self.assertEquals(self.c.normalize("__metric__", "prefix"), "prefix.metric")
self.assertEquals(
self.c.normalize("abc.metric(a+b+c{}/5)", "prefix"), "prefix.abc.metric_a_b_c_5")
self.assertEquals(self.c.normalize(
"VBE.default(127.0.0.1,,8080).happy", "varnish"), "varnish.VBE.default_127.0.0.1_8080.happy")
self.assertEquals(
self.c.normalize(
"VBE.default(127.0.0.1,,8080).happy",
"varnish"),
"varnish.VBE.default_127.0.0.1_8080.happy")
class TestAggregator(unittest.TestCase):

View File

@ -38,8 +38,13 @@ class CouchbaseTestCase(unittest.TestCase):
for test_input, expected_output in test_pairs.items():
test_output = self.check.camel_case_to_joined_lower(test_input)
self.assertEqual(test_output, expected_output,
'Input was %s, expected output was %s, actual output was %s' % (test_input, expected_output, test_output))
self.assertEqual(
test_output,
expected_output,
'Input was %s, expected output was %s, actual output was %s' %
(test_input,
expected_output,
test_output))
@attr('couchbase')
def test_metrics_casing(self):

View File

@ -208,7 +208,8 @@ class TestDogstream(TailTestCase):
self._write_log(log_data)
plugdog = Dogstreams.init(
self.logger, {
'dogstreams': '%s:tests.test_datadog:parse_ancient_function_plugin' % self.log_file.name})
'dogstreams': '%s:tests.test_datadog:parse_ancient_function_plugin' %
self.log_file.name})
actual_output = plugdog.check(self.config, move_end=False)
def test_dogstream_function_plugin(self):
@ -256,54 +257,43 @@ class TestDogstream(TailTestCase):
'2012-05-14 12:52:03 [RECOVERY] - host0 is up (collarbone healed)',
'2012-05-14 12:59:09 [RECOVERY] - host1 is up (nose stopped bleeding)',
]
expected_output = {
"dogstreamEvents": [
{
"timestamp": 1336999561,
"alert_type": "error",
"host": "host0",
"msg_title": "host0 is down (broke its collarbone)",
"msg_text": "2012-05-14 12:46:01 [ERROR] - host0 is down (broke its collarbone)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{
"timestamp": 1336999687,
"alert_type": "error",
"host": "host1",
"msg_title": "host1 is down (got a bloody nose)",
"msg_text": "2012-05-14 12:48:07 [ERROR] - host1 is down (got a bloody nose)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{
"timestamp": 1336999923,
"alert_type": "success",
"host": "host0",
"msg_title": "host0 is up (collarbone healed)",
"msg_text": "2012-05-14 12:52:03 [RECOVERY] - host0 is up (collarbone healed)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{
"timestamp": 1337000349,
"alert_type": "success",
"host": "host1",
"msg_title": "host1 is up (nose stopped bleeding)",
"msg_text": "2012-05-14 12:59:09 [RECOVERY] - host1 is up (nose stopped bleeding)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
]
}
expected_output = {"dogstreamEvents": [{"timestamp": 1336999561,
"alert_type": "error",
"host": "host0",
"msg_title": "host0 is down (broke its collarbone)",
"msg_text": "2012-05-14 12:46:01 [ERROR] - host0 is down (broke its collarbone)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{"timestamp": 1336999687,
"alert_type": "error",
"host": "host1",
"msg_title": "host1 is down (got a bloody nose)",
"msg_text": "2012-05-14 12:48:07 [ERROR] - host1 is down (got a bloody nose)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{"timestamp": 1336999923,
"alert_type": "success",
"host": "host0",
"msg_title": "host0 is up (collarbone healed)",
"msg_text": "2012-05-14 12:52:03 [RECOVERY] - host0 is up (collarbone healed)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{"timestamp": 1337000349,
"alert_type": "success",
"host": "host1",
"msg_title": "host1 is up (nose stopped bleeding)",
"msg_text": "2012-05-14 12:59:09 [RECOVERY] - host1 is up (nose stopped bleeding)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
]}
self._write_log(log_data)
@ -367,55 +357,49 @@ class TestDogstream(TailTestCase):
event_type = cassandra.EVENT_TYPE
event_object = EventDefaults.EVENT_OBJECT
expected_output = {
"dogstreamEvents": [
{
"timestamp": cassandra.parse_date("2012-05-12 21:10:48,058"),
"msg_title": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6528-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6531-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6529-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6530-Data.db')]"[
0:common.MAX_TITLE_LEN],
"msg_text": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6528-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6531-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6529-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6530-Data.db')]",
"alert_type": alert_type,
"auto_priority": 0,
"event_type": event_type,
"aggregation_key": event_object,
"event_object": event_object,
}, {
"timestamp": cassandra.parse_date("2012-05-12 21:10:54,851"),
"msg_title": "Compacted to [/var/cassandra/a-hc-65-Data.db,]. 102,079,134 to 101,546,397",
"alert_type": alert_type,
"auto_priority": 0,
"event_type": event_type,
"aggregation_key": event_object,
"event_object": event_object,
}, {
"timestamp": cassandra.parse_date("2012-05-13 13:15:01,927"),
"msg_title": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6527-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6522-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6532-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6517-Data.db')]"[
0:common.MAX_TITLE_LEN],
"msg_text": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6527-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6522-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6532-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6517-Data.db')]",
"alert_type": alert_type,
"event_type": event_type,
"auto_priority": 0,
"aggregation_key": event_object,
"event_object": event_object,
}, {
"timestamp": cassandra.parse_date("2012-05-13 13:27:17,685"),
"msg_title": "Compacting large row test_data/series:6c6f677c32 (782001077 bytes) incrementally",
"alert_type": alert_type,
"event_type": event_type,
"auto_priority": 0,
"aggregation_key": event_object,
"event_object": event_object,
}, {
"timestamp": cassandra.parse_date(
datetime.utcnow().strftime("%Y-%m-%d") + " 13:27:17,685"),
"msg_title": "Compacting large row test_data/series:6c6f677c32 (782001077 bytes) incrementally",
"alert_type": alert_type,
"event_type": event_type,
"auto_priority": 0,
"aggregation_key": event_object,
"event_object": event_object,
},
]}
expected_output = {"dogstreamEvents": [{"timestamp": cassandra.parse_date("2012-05-12 21:10:48,058"),
"msg_title": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6528-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6531-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6529-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6530-Data.db')]"[0:common.MAX_TITLE_LEN],
"msg_text": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6528-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6531-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6529-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6530-Data.db')]",
"alert_type": alert_type,
"auto_priority": 0,
"event_type": event_type,
"aggregation_key": event_object,
"event_object": event_object,
},
{"timestamp": cassandra.parse_date("2012-05-12 21:10:54,851"),
"msg_title": "Compacted to [/var/cassandra/a-hc-65-Data.db,]. 102,079,134 to 101,546,397",
"alert_type": alert_type,
"auto_priority": 0,
"event_type": event_type,
"aggregation_key": event_object,
"event_object": event_object,
},
{"timestamp": cassandra.parse_date("2012-05-13 13:15:01,927"),
"msg_title": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6527-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6522-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6532-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6517-Data.db')]"[0:common.MAX_TITLE_LEN],
"msg_text": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6527-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6522-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6532-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6517-Data.db')]",
"alert_type": alert_type,
"event_type": event_type,
"auto_priority": 0,
"aggregation_key": event_object,
"event_object": event_object,
},
{"timestamp": cassandra.parse_date("2012-05-13 13:27:17,685"),
"msg_title": "Compacting large row test_data/series:6c6f677c32 (782001077 bytes) incrementally",
"alert_type": alert_type,
"event_type": event_type,
"auto_priority": 0,
"aggregation_key": event_object,
"event_object": event_object,
},
{"timestamp": cassandra.parse_date(datetime.utcnow().strftime("%Y-%m-%d") + " 13:27:17,685"),
"msg_title": "Compacting large row test_data/series:6c6f677c32 (782001077 bytes) incrementally",
"alert_type": alert_type,
"event_type": event_type,
"auto_priority": 0,
"aggregation_key": event_object,
"event_object": event_object,
},
]}
self._write_log(log_data.split("\n"))

View File

@ -44,7 +44,15 @@ class TestMongo(unittest.TestCase):
dir1 = mkdtemp()
dir2 = mkdtemp()
try:
self.p1 = subprocess.Popen(["mongod", "--dbpath", dir1, "--port", str(PORT1), "--replSet", "testset/%s:%d" % (socket.gethostname(), PORT2), "--rest"],
self.p1 = subprocess.Popen(["mongod",
"--dbpath",
dir1,
"--port",
str(PORT1),
"--replSet",
"testset/%s:%d" % (socket.gethostname(),
PORT2),
"--rest"],
executable="mongod",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@ -53,7 +61,15 @@ class TestMongo(unittest.TestCase):
if self.p1:
# Set up replication
c1 = pymongo.Connection('localhost:%s' % PORT1, slave_okay=True)
self.p2 = subprocess.Popen(["mongod", "--dbpath", dir2, "--port", str(PORT2), "--replSet", "testset/%s:%d" % (socket.gethostname(), PORT1), "--rest"],
self.p2 = subprocess.Popen(["mongod",
"--dbpath",
dir2,
"--port",
str(PORT2),
"--replSet",
"testset/%s:%d" % (socket.gethostname(),
PORT1),
"--rest"],
executable="mongod",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)

View File

@ -136,8 +136,8 @@ class TestRedis(unittest.TestCase):
# Assert that the keys metrics are tagged by db. just check db0, since
# it's the only one we can guarantee is there.
db_metrics = self._sort_metrics([m for m in metrics if m[0] in ['redis.keys',
'redis.expires'] and "redis_db:db14" in m[3]["dimensions"]])
db_metrics = self._sort_metrics(
[m for m in metrics if m[0] in ['redis.keys', 'redis.expires'] and "redis_db:db14" in m[3]["dimensions"]])
self.assertEquals(2, len(db_metrics))
self.assertEquals('redis.expires', db_metrics[0][0])

View File

@ -112,8 +112,8 @@ class WinEventLogTest(unittest.TestCase):
check.check(inst2)
ev2 = check.get_events()
assert len(ev2) > 0
assert len(ev2) == len([ev for ev in self.LOG_EVENTS
if ev[1] in (win32evtlog.EVENTLOG_ERROR_TYPE, win32evtlog.EVENTLOG_INFORMATION_TYPE)])
assert len(ev2) == len([ev for ev in self.LOG_EVENTS if ev[1] in (
win32evtlog.EVENTLOG_ERROR_TYPE, win32evtlog.EVENTLOG_INFORMATION_TYPE)])
for ev in ev2:
# Make sure we only picked up our source
assert 'EVENTLOGTESTBAD' not in ev['msg_title']

20
tox.ini Normal file
View File

@ -0,0 +1,20 @@
[tox]
envlist = py27,pep8
minversion = 1.6
skipsdist = True
[testenv]
usedevelop = True
install_command = pip install -U {opts} {packages}
setenv =
VIRTUAL_ENV={envdir}
DISCOVER_DIRECTORY=tests
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
whitelist_externals = bash
[testenv:pep8]
commands = pep8 monagent/ tests/ --max-line-length 100 --ignore E501 {posargs}
[testenv:venv]
commands = {posargs}