From cb3fc2416c0433eebb2bf95f254873f6e3f03285 Mon Sep 17 00:00:00 2001 From: Simon Pasquier Date: Wed, 20 Jul 2016 10:12:08 +0200 Subject: [PATCH] Increase the Elasticsearch queue to 1Gb Otherwise the collector's pipeline may block when it processes a large amount of data (for instance when the collector starts for the first time). Change-Id: Id543c939891b4cec52fabb3bed07677eaf110f07 --- .../puppet/modules/lma_collector/manifests/params.pp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/deployment_scripts/puppet/modules/lma_collector/manifests/params.pp b/deployment_scripts/puppet/modules/lma_collector/manifests/params.pp index c56d48e2d..f154e98d4 100644 --- a/deployment_scripts/puppet/modules/lma_collector/manifests/params.pp +++ b/deployment_scripts/puppet/modules/lma_collector/manifests/params.pp @@ -86,8 +86,12 @@ class lma_collector::params { $buffering_max_buffer_size_for_aggregator = 256 * 1024 * 1024 $queue_full_action_for_aggregator = 'drop' - $buffering_max_file_size_for_log = 64 * 1024 * 1024 - $buffering_max_buffer_size_for_log = 256 * 1024 * 1024 + # The log collector should have enough room to deal with transient spikes of + # data otherwise it may fill up the local buffer which in turn blocks the + # Heka pipeline. Once the pipeline is stuck, it will have a hard time to + # recover from that situation. In most cases, 1Gb should be enough. + $buffering_max_file_size_for_log = 128 * 1024 * 1024 + $buffering_max_buffer_size_for_log = 1024 * 1024 * 1024 $queue_full_action_for_log = 'block' $buffering_max_file_log_metric_size = 64 * 1024 * 1024