[DEFAULT] #logging, make sure that the user under whom the server runs has permission #to write to the directory. log_file=threshold.log log_dir=/var/log/kiloeyes/ log_level=DEBUG default_log_levels = kiloeyes=DEBUG service = threshold_engine threads = 3 [thresholdengine] metrics_topic = metrics alarm_topic = alarms processor = threshold_processor check_alarm_interval = 60 [alarmdefinitions] doc_type = alarmdefinitions index_strategy = fixed index_prefix = admin size = 10000 #query param includes name and dimensions #GET /v2.0/alarm-definitions?name=CPU percent greater than 10&dimensions=hostname:devstack,os=linux #the alarm definition name filter #for example, name = cpu, only start thresh engine with alarm defs who are about cpu #name =, set no filter name = #dimension key/value pairs filter #for example, dimensions = hostname:devstack,os=linux, #only start engine with alarm defs whose have the related dimensions dimensions = #the time interval to retrieve the latest alarm definitions check_alarm_def_interval = 120 [kafka_opts] #The endpoint to the kafka server, you can have multiple servers listed here #for example: #uri = 10.100.41.114:9092,10.100.41.115:9092,10.100.41.116:9092 uri = 192.168.1.191:9092 #consumer group name group = datapoints_group #how many times to try when error occurs max_retry = 1 #wait time between tries when kafka goes down wait_time = 1 #use synchronized or asynchronized connection to kafka async = False #send messages in bulk or send messages one by one. compact = False #How many partitions this connection should listen messages on, this #parameter is for reading from kafka. If listens on multiple partitions, #For example, if the client should listen on partitions 1 and 3, then the #configuration should look like the following: # partitions = 1 # partitions = 3 #default to listen on partition 0. partitions = 0 [es_conn] uri = http://192.168.1.191:9200 time_id = timestamp drop_data = False