Retire this project
We've shutdown the elasticsearch service and don't need to manage it with puppet any more. Depends-On: https://review.opendev.org/c/openstack/project-config/+/839235 Change-Id: I7b94d0fc8f49b3900ab0012196ff665e3e54266d
This commit is contained in:
parent
4627e95891
commit
66cf2dd8ef
5
.gitignore
vendored
5
.gitignore
vendored
@ -1,5 +0,0 @@
|
||||
Gemfile.lock
|
||||
.bundled_gems/
|
||||
log/
|
||||
junit/
|
||||
.vagrant/
|
15
Gemfile
15
Gemfile
@ -1,15 +0,0 @@
|
||||
source 'https://rubygems.org'
|
||||
|
||||
if File.exists?('/home/zuul/src/git.openstack.org/openstack-infra/puppet-openstack_infra_spec_helper')
|
||||
gem_checkout_method = {:path => '/home/zuul/src/git.openstack.org/openstack-infra/puppet-openstack_infra_spec_helper'}
|
||||
else
|
||||
gem_checkout_method = {:git => 'https://git.openstack.org/openstack-infra/puppet-openstack_infra_spec_helper'}
|
||||
end
|
||||
gem_checkout_method[:require] = false
|
||||
|
||||
group :development, :test, :system_tests do
|
||||
gem 'puppet-openstack_infra_spec_helper',
|
||||
gem_checkout_method
|
||||
end
|
||||
|
||||
# vim:ft=ruby
|
202
LICENSE
202
LICENSE
@ -1,202 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
10
README.md
10
README.md
@ -1,5 +1,9 @@
|
||||
# OpenStack Elasticsearch Module
|
||||
This project is no longer maintained.
|
||||
|
||||
## Overview
|
||||
The contents of this repository are still available in the Git
|
||||
source code management system. To see the contents of this
|
||||
repository before it reached its end of life, please check out the
|
||||
previous commit with "git checkout HEAD^1".
|
||||
|
||||
The module installs Elasticsearch from debian packages.
|
||||
For any further questions, please email
|
||||
service-discuss@lists.openstack.org or join #opendev on OFTC.
|
||||
|
8
Rakefile
8
Rakefile
@ -1,8 +0,0 @@
|
||||
require 'rubygems'
|
||||
require 'puppetlabs_spec_helper/rake_tasks'
|
||||
require 'puppet-lint/tasks/puppet-lint'
|
||||
PuppetLint.configuration.fail_on_warnings = true
|
||||
PuppetLint.configuration.send('disable_80chars')
|
||||
PuppetLint.configuration.send('disable_autoloader_layout')
|
||||
PuppetLint.configuration.send('disable_class_inherits_from_params_class')
|
||||
PuppetLint.configuration.send('disable_class_parameter_defaults')
|
11
bindep.txt
11
bindep.txt
@ -1,11 +0,0 @@
|
||||
# This is a cross-platform list tracking distribution packages needed by tests;
|
||||
# see http://docs.openstack.org/infra/bindep/ for additional information.
|
||||
|
||||
libxml2-devel [test platform:rpm]
|
||||
libxml2-dev [test platform:dpkg]
|
||||
libxslt-devel [test platform:rpm]
|
||||
libxslt1-dev [test platform:dpkg]
|
||||
ruby-devel [test platform:rpm]
|
||||
ruby-dev [test platform:dpkg]
|
||||
zlib1g-dev [test platform:dpkg]
|
||||
zlib-devel [test platform:rpm]
|
@ -1,20 +0,0 @@
|
||||
{
|
||||
"_default_": {
|
||||
"_all": { "enabled": false },
|
||||
"_source": { "compress": true },
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"string_template" : {
|
||||
"match" : "*",
|
||||
"mapping": { "type": "string", "index": "not_analyzed" },
|
||||
"match_mapping_type" : "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"properties" : {
|
||||
"@fields": { "type": "object", "dynamic": true, "path": "full" },
|
||||
"@message" : { "type" : "string", "index" : "analyzed" },
|
||||
"message" : { "type" : "string", "index" : "analyzed" }
|
||||
}
|
||||
}
|
||||
}
|
@ -1,68 +0,0 @@
|
||||
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
|
||||
es.logger.level: INFO
|
||||
rootLogger: ${es.logger.level}, console, file
|
||||
logger:
|
||||
# log action execution errors for easier debugging
|
||||
# action: DEBUG
|
||||
# reduce the logging for aws, too much is logged under the default INFO
|
||||
com.amazonaws: WARN
|
||||
org.apache.http: INFO
|
||||
|
||||
# gateway
|
||||
#gateway: DEBUG
|
||||
#index.gateway: DEBUG
|
||||
|
||||
# peer shard recovery
|
||||
#indices.recovery: DEBUG
|
||||
|
||||
# discovery
|
||||
#discovery: TRACE
|
||||
|
||||
index.search.slowlog: TRACE, index_search_slow_log_file
|
||||
index.indexing.slowlog: TRACE, index_indexing_slow_log_file
|
||||
|
||||
additivity:
|
||||
index.search.slowlog: false
|
||||
index.indexing.slowlog: false
|
||||
|
||||
appender:
|
||||
console:
|
||||
type: console
|
||||
layout:
|
||||
type: consolePattern
|
||||
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
|
||||
|
||||
file:
|
||||
type: dailyRollingFile
|
||||
file: ${path.logs}/${cluster.name}.log
|
||||
datePattern: "'.'yyyy-MM-dd"
|
||||
layout:
|
||||
type: pattern
|
||||
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
|
||||
|
||||
# Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
|
||||
# For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
|
||||
#file:
|
||||
#type: extrasRollingFile
|
||||
#file: ${path.logs}/${cluster.name}.log
|
||||
#rollingPolicy: timeBased
|
||||
#rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
|
||||
#layout:
|
||||
#type: pattern
|
||||
#conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
|
||||
|
||||
index_search_slow_log_file:
|
||||
type: dailyRollingFile
|
||||
file: ${path.logs}/${cluster.name}_index_search_slowlog.log
|
||||
datePattern: "'.'yyyy-MM-dd"
|
||||
layout:
|
||||
type: pattern
|
||||
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
|
||||
|
||||
index_indexing_slow_log_file:
|
||||
type: dailyRollingFile
|
||||
file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
|
||||
datePattern: "'.'yyyy-MM-dd"
|
||||
layout:
|
||||
type: pattern
|
||||
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
|
@ -1,22 +0,0 @@
|
||||
module Puppet::Parser::Functions
|
||||
newfunction(:es_checksum, :type => :rvalue, :doc => <<-EOS
|
||||
This function returns the checksum from elastic search checksum file.
|
||||
*Examples:*
|
||||
es_checksum('https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-0.20.5.deb.sha1.txt')
|
||||
Would return: "b51e4dc55490bc03e54d7f8f2d41affc54773206"
|
||||
EOS
|
||||
) do |arguments|
|
||||
|
||||
raise(Puppet::ParseError, "es_checksum(): Wrong number of arguments " +
|
||||
"given (#{arguments.size} for 1)") if arguments.size != 1
|
||||
|
||||
begin
|
||||
require 'open-uri'
|
||||
result = open(arguments[0]).read
|
||||
result.split.first
|
||||
rescue Exception => e
|
||||
Puppet.debug("Unable to obtain elastic search checksum: #{e.message}")
|
||||
nil
|
||||
end
|
||||
end
|
||||
end
|
@ -1,142 +0,0 @@
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Class to install elasticsearch.
|
||||
#
|
||||
class elasticsearch (
|
||||
$es_template_config = {},
|
||||
$checksum = undef,
|
||||
$heap_size = '16g',
|
||||
$url = 'https://download.elastic.co/elasticsearch/elasticsearch',
|
||||
$version = '0.20.5',
|
||||
) inherits elasticsearch::params {
|
||||
# Ensure: java runtime and curl
|
||||
# Curl is handy for talking to the ES API on localhost. Allows for
|
||||
# querying cluster state and deleting indexes and so on.
|
||||
ensure_packages([$::elasticsearch::params::jre_package, 'curl', $::elasticsearch::params::gem_package])
|
||||
|
||||
include '::archive'
|
||||
|
||||
$package_name = "elasticsearch-${version}.deb"
|
||||
$source_url = "${url}/${package_name}"
|
||||
$source_checksum = "${source_url}.sha1.txt"
|
||||
|
||||
if $checksum {
|
||||
$es_checksum = $checksum
|
||||
} else {
|
||||
$es_checksum = es_checksum($source_checksum)
|
||||
}
|
||||
|
||||
if $es_checksum {
|
||||
$checksum_type = 'sha1'
|
||||
} else {
|
||||
$checksum_type = 'none'
|
||||
}
|
||||
|
||||
archive { "/tmp/elasticsearch-${version}.deb":
|
||||
source => $source_url,
|
||||
extract => false,
|
||||
checksum => $es_checksum,
|
||||
checksum_type => $checksum_type,
|
||||
}
|
||||
|
||||
# install elastic search
|
||||
package { 'elasticsearch':
|
||||
ensure => latest,
|
||||
source => "/tmp/elasticsearch-${version}.deb",
|
||||
provider => 'dpkg',
|
||||
require => [
|
||||
Package[$::elasticsearch::params::jre_package],
|
||||
File['/etc/elasticsearch/elasticsearch.yml'],
|
||||
File['/etc/elasticsearch/default-mapping.json'],
|
||||
File['/etc/elasticsearch/logging.yml'],
|
||||
File['/etc/default/elasticsearch'],
|
||||
Archive["/tmp/elasticsearch-${version}.deb"],
|
||||
]
|
||||
}
|
||||
|
||||
file { '/var/lib/elasticsearch':
|
||||
ensure => directory,
|
||||
group => 'elasticsearch',
|
||||
owner => 'elasticsearch',
|
||||
require => Package['elasticsearch'],
|
||||
}
|
||||
|
||||
if 'path.data' in $es_template_config {
|
||||
file { $es_template_config['path.data']:
|
||||
ensure => directory,
|
||||
owner => 'elasticsearch',
|
||||
require => Package['elasticsearch'],
|
||||
}
|
||||
}
|
||||
|
||||
file { '/etc/elasticsearch':
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
file { '/etc/elasticsearch/elasticsearch.yml':
|
||||
ensure => present,
|
||||
content => template('elasticsearch/elasticsearch.yml.erb'),
|
||||
replace => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
file { '/etc/elasticsearch/logging.yml':
|
||||
ensure => present,
|
||||
source => 'puppet:///modules/elasticsearch/logging.yml',
|
||||
replace => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
file { '/etc/elasticsearch/templates':
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
file { '/etc/elasticsearch/default-mapping.json':
|
||||
ensure => present,
|
||||
source => 'puppet:///modules/elasticsearch/elasticsearch.mapping.json',
|
||||
replace => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
file { '/etc/default/elasticsearch':
|
||||
ensure => present,
|
||||
content => template('elasticsearch/elasticsearch.default.erb'),
|
||||
replace => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
cron { 'cleanup-es-logs':
|
||||
command => 'find /var/log/elasticsearch -type f -mtime +14 -delete',
|
||||
user => 'root',
|
||||
hour => '6',
|
||||
minute => '7',
|
||||
environment => 'PATH=/usr/bin:/bin:/usr/sbin:/sbin',
|
||||
require => Package['elasticsearch'],
|
||||
}
|
||||
}
|
@ -1,34 +0,0 @@
|
||||
# Params class
|
||||
class elasticsearch::params (
|
||||
){
|
||||
|
||||
|
||||
case $::osfamily {
|
||||
'Debian': {
|
||||
case $::lsbdistcodename {
|
||||
'precise': {
|
||||
$gem_package = 'rubygems'
|
||||
$jre_package = 'openjdk-7-jre-headless'
|
||||
}
|
||||
'trusty': {
|
||||
$gem_package = 'ruby'
|
||||
$jre_package = 'openjdk-7-jre-headless'
|
||||
}
|
||||
'xenial': {
|
||||
$gem_package = 'ruby'
|
||||
$jre_package = 'openjdk-8-jre-headless'
|
||||
}
|
||||
default: {
|
||||
$gem_package = 'ruby'
|
||||
$jre_package = 'openjdk-7-jre-headless'
|
||||
}
|
||||
}
|
||||
}
|
||||
default: {
|
||||
$gem_package = 'ruby'
|
||||
$jre_package = 'openjdk-7-jre-headless'
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"name": "openstackinfra-elasticsearch",
|
||||
"version": "0.0.1",
|
||||
"author": "Openstack CI",
|
||||
"summary": "Puppet module for Elasticsearch",
|
||||
"license": "Apache 2.0",
|
||||
"source": "https://git.openstack.org/openstack-infra/puppet-elasticsearch.git",
|
||||
"project_page": "http://docs.openstack.org/infra/system-config/",
|
||||
"issues_url": "https://storyboard.openstack.org/#!/project/758",
|
||||
"dependencies": []
|
||||
}
|
@ -1,23 +0,0 @@
|
||||
require 'puppet-openstack_infra_spec_helper/spec_helper_acceptance'
|
||||
|
||||
describe 'puppet-elasticsearch module', :if => ['debian', 'ubuntu'].include?(os[:family]) do
|
||||
def pp_path
|
||||
base_path = File.dirname(__FILE__)
|
||||
File.join(base_path, 'fixtures')
|
||||
end
|
||||
|
||||
def default_puppet_module
|
||||
module_path = File.join(pp_path, 'default.pp')
|
||||
File.read(module_path)
|
||||
end
|
||||
|
||||
it 'should work with no errors' do
|
||||
apply_manifest(default_puppet_module, catch_failures: true)
|
||||
end
|
||||
|
||||
it 'should be idempotent' do
|
||||
pending('this module is not idempotent yet')
|
||||
apply_manifest(default_puppet_module, catch_changes: true)
|
||||
end
|
||||
|
||||
end
|
@ -1,6 +0,0 @@
|
||||
class { '::elasticsearch':
|
||||
es_template_config => {
|
||||
'cluster.name' => 'acceptance-test',
|
||||
'path.data' => '/tmp/acceptance',
|
||||
},
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
HOSTS:
|
||||
ubuntu-server-1404-x64:
|
||||
roles:
|
||||
- master
|
||||
platform: ubuntu-14.04-amd64
|
||||
box: puppetlabs/ubuntu-14.04-64-nocm
|
||||
box_url: https://vagrantcloud.com/puppetlabs/ubuntu-14.04-64-nocm
|
||||
hypervisor: vagrant
|
||||
CONFIG:
|
||||
log_level: debug
|
||||
type: git
|
@ -1,10 +0,0 @@
|
||||
HOSTS:
|
||||
centos-70-x64:
|
||||
roles:
|
||||
- master
|
||||
platform: el-7-x86_64
|
||||
hypervisor: none
|
||||
ip: 127.0.0.1
|
||||
CONFIG:
|
||||
type: foss
|
||||
set_env: false
|
@ -1,10 +0,0 @@
|
||||
HOSTS:
|
||||
ubuntu-14.04-amd64:
|
||||
roles:
|
||||
- master
|
||||
platform: ubuntu-14.04-amd64
|
||||
hypervisor: none
|
||||
ip: 127.0.0.1
|
||||
CONFIG:
|
||||
type: foss
|
||||
set_env: false
|
@ -1,10 +0,0 @@
|
||||
HOSTS:
|
||||
ubuntu-16.04-amd64:
|
||||
roles:
|
||||
- master
|
||||
platform: ubuntu-16.04-amd64
|
||||
hypervisor: none
|
||||
ip: 127.0.0.1
|
||||
CONFIG:
|
||||
type: foss
|
||||
set_env: false
|
@ -1,38 +0,0 @@
|
||||
# Run ElasticSearch as this user ID and group ID
|
||||
#ES_USER=elasticsearch
|
||||
#ES_GROUP=elasticsearch
|
||||
|
||||
# Heap Size (defaults to 256m min, 1g max)
|
||||
ES_HEAP_SIZE=<%= @heap_size %>
|
||||
|
||||
# Heap new generation
|
||||
#ES_HEAP_NEWSIZE=
|
||||
|
||||
# max direct memory
|
||||
#ES_DIRECT_SIZE=
|
||||
|
||||
# Maximum number of open files, defaults to 65535.
|
||||
#MAX_OPEN_FILES=65535
|
||||
|
||||
# Maximum locked memory size. Set to "unlimited" if you use the
|
||||
# bootstrap.mlockall option in elasticsearch.yml. You must also set
|
||||
# ES_HEAP_SIZE.
|
||||
MAX_LOCKED_MEMORY=unlimited
|
||||
|
||||
# ElasticSearch log directory
|
||||
#LOG_DIR=/var/log/elasticsearch
|
||||
|
||||
# ElasticSearch data directory
|
||||
#DATA_DIR=/var/lib/elasticsearch
|
||||
|
||||
# ElasticSearch work directory
|
||||
#WORK_DIR=/tmp/elasticsearch
|
||||
|
||||
# ElasticSearch configuration directory
|
||||
#CONF_DIR=/etc/elasticsearch
|
||||
|
||||
# ElasticSearch configuration file (elasticsearch.yml)
|
||||
#CONF_FILE=/etc/elasticsearch/elasticsearch.yml
|
||||
|
||||
# Additional Java OPTS
|
||||
#ES_JAVA_OPTS=
|
@ -1,439 +0,0 @@
|
||||
##################### ElasticSearch Configuration Example #####################
|
||||
|
||||
# This file contains an overview of various configuration settings,
|
||||
# targeted at operations staff. Application developers should
|
||||
# consult the guide at <http://elasticsearch.org/guide>.
|
||||
#
|
||||
# The installation procedure is covered at
|
||||
# <http://elasticsearch.org/guide/reference/setup/installation.html>.
|
||||
#
|
||||
# ElasticSearch comes with reasonable defaults for most settings,
|
||||
# so you can try it out without bothering with configuration.
|
||||
#
|
||||
# Most of the time, these defaults are just fine for running a production
|
||||
# cluster. If you're fine-tuning your cluster, or wondering about the
|
||||
# effect of certain configuration option, please _do ask_ on the
|
||||
# mailing list or IRC channel [http://elasticsearch.org/community].
|
||||
|
||||
# Any element in the configuration can be replaced with environment variables
|
||||
# by placing them in ${...} notation. For example:
|
||||
#
|
||||
# node.rack: ${RACK_ENV_VAR}
|
||||
|
||||
# See <http://elasticsearch.org/guide/reference/setup/configuration.html>
|
||||
# for information on supported formats and syntax for the configuration file.
|
||||
|
||||
|
||||
################################### Cluster ###################################
|
||||
|
||||
# Cluster name identifies your cluster for auto-discovery. If you're running
|
||||
# multiple clusters on the same network, make sure you're using unique names.
|
||||
#
|
||||
# cluster.name: elasticsearch
|
||||
<% if @es_template_config.has_key?('cluster.name') then -%>
|
||||
cluster.name: <%= @es_template_config['cluster.name'] %>
|
||||
<% end -%>
|
||||
|
||||
|
||||
#################################### Node #####################################
|
||||
|
||||
# Node names are generated dynamically on startup, so you're relieved
|
||||
# from configuring them manually. You can tie this node to a specific name:
|
||||
#
|
||||
node.name: "<%= scope.lookupvar("::hostname") %>"
|
||||
|
||||
# Every node can be configured to allow or deny being eligible as the master,
|
||||
# and to allow or deny to store the data.
|
||||
#
|
||||
# Allow this node to be eligible as a master node (enabled by default):
|
||||
#
|
||||
# node.master: true
|
||||
#
|
||||
# Allow this node to store data (enabled by default):
|
||||
#
|
||||
# node.data: true
|
||||
|
||||
# You can exploit these settings to design advanced cluster topologies.
|
||||
#
|
||||
# 1. You want this node to never become a master node, only to hold data.
|
||||
# This will be the "workhorse" of your cluster.
|
||||
#
|
||||
# node.master: false
|
||||
# node.data: true
|
||||
#
|
||||
# 2. You want this node to only serve as a master: to not store any data and
|
||||
# to have free resources. This will be the "coordinator" of your cluster.
|
||||
#
|
||||
# node.master: true
|
||||
# node.data: false
|
||||
#
|
||||
# 3. You want this node to be neither master nor data node, but
|
||||
# to act as a "search load balancer" (fetching data from nodes,
|
||||
# aggregating results, etc.)
|
||||
#
|
||||
# node.master: false
|
||||
# node.data: false
|
||||
|
||||
# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
|
||||
# Node Info API [http://localhost:9200/_cluster/nodes] or GUI tools
|
||||
# such as <http://github.com/lukas-vlcek/bigdesk> and
|
||||
# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
|
||||
|
||||
# A node can have generic attributes associated with it, which can later be used
|
||||
# for customized shard allocation filtering, or allocation awareness. An attribute
|
||||
# is a simple key value pair, similar to node.key: value, here is an example:
|
||||
#
|
||||
# node.rack: rack314
|
||||
|
||||
# By default, multiple nodes are allowed to start from the same installation location
|
||||
# to disable it, set the following:
|
||||
# node.max_local_storage_nodes: 1
|
||||
|
||||
<% if @es_template_config.has_key?('node.master') then -%>
|
||||
node.master: <%= @es_template_config['node.master'] %>
|
||||
<% end -%>
|
||||
<% if @es_template_config.has_key?('node.data') then -%>
|
||||
node.data: <%= @es_template_config['node.data'] %>
|
||||
<% end -%>
|
||||
|
||||
|
||||
#################################### Index ####################################
|
||||
|
||||
# You can set a number of options (such as shard/replica options, mapping
|
||||
# or analyzer definitions, translog settings, ...) for indices globally,
|
||||
# in this file.
|
||||
#
|
||||
# Note, that it makes more sense to configure index settings specifically for
|
||||
# a certain index, either when creating it or by using the index templates API.
|
||||
#
|
||||
# See <http://elasticsearch.org/guide/reference/index-modules/> and
|
||||
# <http://elasticsearch.org/guide/reference/api/admin-indices-create-index.html>
|
||||
# for more information.
|
||||
|
||||
# Set the number of shards (splits) of an index (5 by default):
|
||||
#
|
||||
<% if @es_template_config.has_key?('index.number_of_shards') then -%>
|
||||
index.number_of_shards: <%= @es_template_config['index.number_of_shards'] %>
|
||||
<% else -%>
|
||||
# index.number_of_shards: 5
|
||||
<% end -%>
|
||||
|
||||
# Set the number of replicas (additional copies) of an index (1 by default):
|
||||
#
|
||||
<% if @es_template_config.has_key?('index.number_of_replicas') then -%>
|
||||
index.number_of_replicas: <%= @es_template_config['index.number_of_replicas'] %>
|
||||
<% else -%>
|
||||
# index.number_of_replicas: 1
|
||||
<% end -%>
|
||||
|
||||
# Note, that for development on a local machine, with small indices, it usually
|
||||
# makes sense to "disable" the distributed features:
|
||||
#
|
||||
# index.number_of_shards: 1
|
||||
# index.number_of_replicas: 0
|
||||
|
||||
# These settings directly affect the performance of index and search operations
|
||||
# in your cluster. Assuming you have enough machines to hold shards and
|
||||
# replicas, the rule of thumb is:
|
||||
#
|
||||
# 1. Having more *shards* enhances the _indexing_ performance and allows to
|
||||
# _distribute_ a big index across machines.
|
||||
# 2. Having more *replicas* enhances the _search_ performance and improves the
|
||||
# cluster _availability_.
|
||||
#
|
||||
# The "number_of_shards" is a one-time setting for an index.
|
||||
#
|
||||
# The "number_of_replicas" can be increased or decreased anytime,
|
||||
# by using the Index Update Settings API.
|
||||
#
|
||||
# ElasticSearch takes care about load balancing, relocating, gathering the
|
||||
# results from nodes, etc. Experiment with different settings to fine-tune
|
||||
# your setup.
|
||||
|
||||
# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
|
||||
# the index status.
|
||||
|
||||
<% if @es_template_config.has_key?('index.store.compress.stored') then -%>
|
||||
index.store.compress.stored: <%= @es_template_config['index.store.compress.stored'] %>
|
||||
<% end -%>
|
||||
<% if @es_template_config.has_key?('index.store.compress.tv') then -%>
|
||||
index.store.compress.tv: <%= @es_template_config['index.store.compress.tv'] %>
|
||||
<% end -%>
|
||||
<% if @es_template_config.has_key?('indices.memory.index_buffer_size') then -%>
|
||||
indices.memory.index_buffer_size: "<%= @es_template_config['indices.memory.index_buffer_size'] %>"
|
||||
<% end -%>
|
||||
<% if @es_template_config.has_key?('indices.breaker.fielddata.limit') then -%>
|
||||
indices.breaker.fielddata.limit: "<%= @es_template_config['indices.breaker.fielddata.limit'] %>"
|
||||
<% end -%>
|
||||
<% if @es_template_config.has_key?('index.routing.allocation.total_shards_per_node') then -%>
|
||||
index.routing.allocation.total_shards_per_node: "<%= @es_template_config['index.routing.allocation.total_shards_per_node'] %>"
|
||||
<% end -%>
|
||||
|
||||
#################################### Paths ####################################
|
||||
|
||||
# Path to directory containing configuration (this file and logging.yml):
|
||||
#
|
||||
# path.conf: /path/to/conf
|
||||
|
||||
# Path to directory where to store index data allocated for this node.
|
||||
#
|
||||
# path.data: /path/to/data
|
||||
#
|
||||
# Can optionally include more than one location, causing data to be striped across
|
||||
# the locations (a la RAID 0) on a file level, favouring locations with most free
|
||||
# space on creation. For example:
|
||||
#
|
||||
# path.data: /path/to/data1,/path/to/data2
|
||||
<% if @es_template_config.has_key?('path.data') then -%>
|
||||
path.data: "<%= @es_template_config['path.data'] %>"
|
||||
<% end -%>
|
||||
|
||||
# Path to temporary files:
|
||||
#
|
||||
# path.work: /path/to/work
|
||||
|
||||
# Path to log files:
|
||||
#
|
||||
# path.logs: /path/to/logs
|
||||
|
||||
# Path to where plugins are installed:
|
||||
#
|
||||
# path.plugins: /path/to/plugins
|
||||
|
||||
|
||||
#################################### Plugin ###################################
|
||||
|
||||
# If a plugin listed here is not installed for current node, the node will not start.
|
||||
#
|
||||
# plugin.mandatory: mapper-attachments,lang-groovy
|
||||
|
||||
|
||||
################################### Memory ####################################
|
||||
|
||||
# ElasticSearch performs poorly when JVM starts swapping: you should ensure that
|
||||
# it _never_ swaps.
|
||||
#
|
||||
# Set this property to true to lock the memory:
|
||||
#
|
||||
<% if @es_template_config.has_key?('bootstrap.mlockall') then -%>
|
||||
bootstrap.mlockall: <%= @es_template_config['bootstrap.mlockall'] %>
|
||||
<% else -%>
|
||||
# bootstrap.mlockall: true
|
||||
<% end -%>
|
||||
|
||||
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
|
||||
# to the same value, and that the machine has enough memory to allocate
|
||||
# for ElasticSearch, leaving enough memory for the operating system itself.
|
||||
#
|
||||
# You should also make sure that the ElasticSearch process is allowed to lock
|
||||
# the memory, eg. by using `ulimit -l unlimited`.
|
||||
|
||||
|
||||
############################## Network And HTTP ###############################
|
||||
|
||||
# ElasticSearch, by default, binds itself to the 0.0.0.0 address, and listens
|
||||
# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
|
||||
# communication. (the range means that if the port is busy, it will automatically
|
||||
# try the next port).
|
||||
|
||||
# Set the bind address specifically (IPv4 or IPv6):
|
||||
#
|
||||
# network.bind_host: 192.168.0.1
|
||||
|
||||
# Set the address other nodes will use to communicate with this node. If not
|
||||
# set, it is automatically derived. It must point to an actual IP address.
|
||||
#
|
||||
<% if @es_template_config.has_key?('network.publish_host') then -%>
|
||||
network.publish_host: <%= @es_template_config['network.publish_host'] %>
|
||||
<% else -%>
|
||||
# network.publish_host: 192.168.0.1
|
||||
<% end -%>
|
||||
|
||||
# Set both 'bind_host' and 'publish_host':
|
||||
#
|
||||
# network.host: 192.168.0.1
|
||||
|
||||
# Set a custom port for the node to node communication (9300 by default):
|
||||
#
|
||||
# transport.tcp.port: 9300
|
||||
|
||||
# Enable compression for all communication between nodes (disabled by default):
|
||||
#
|
||||
# transport.tcp.compress: true
|
||||
|
||||
# Set a custom port to listen for HTTP traffic:
|
||||
#
|
||||
# http.port: 9200
|
||||
|
||||
# Set a custom allowed content length:
|
||||
#
|
||||
# http.max_content_length: 100mb
|
||||
|
||||
# Disable HTTP completely:
|
||||
#
|
||||
# http.enabled: false
|
||||
|
||||
<% if @es_template_config.has_key?('http.cors.enabled') then -%>
|
||||
http.cors.enabled: <%= @es_template_config['http.cors.enabled'] %>
|
||||
<% end -%>
|
||||
|
||||
<% if @es_template_config.has_key?('http.cors.allow-origin') then -%>
|
||||
http.cors.allow-origin: <%= @es_template_config['http.cors.allow-origin'] %>
|
||||
<% end -%>
|
||||
|
||||
################################### Gateway ###################################
|
||||
|
||||
# The gateway allows for persisting the cluster state between full cluster
|
||||
# restarts. Every change to the state (such as adding an index) will be stored
|
||||
# in the gateway, and when the cluster starts up for the first time,
|
||||
# it will read its state from the gateway.
|
||||
|
||||
# There are several types of gateway implementations. For more information,
|
||||
# see <http://elasticsearch.org/guide/reference/modules/gateway>.
|
||||
|
||||
# The default gateway type is the "local" gateway (recommended):
|
||||
#
|
||||
# gateway.type: local
|
||||
|
||||
# Settings below control how and when to start the initial recovery process on
|
||||
# a full cluster restart (to reuse as much local data as possible when using shared
|
||||
# gateway).
|
||||
|
||||
# Allow recovery process after N nodes in a cluster are up:
|
||||
#
|
||||
<% if @es_template_config.has_key?('gateway.recover_after_nodes') then -%>
|
||||
gateway.recover_after_nodes: <%= @es_template_config['gateway.recover_after_nodes'] %>
|
||||
<% else -%>
|
||||
# gateway.recover_after_nodes: 1
|
||||
<% end -%>
|
||||
|
||||
# Set the timeout to initiate the recovery process, once the N nodes
|
||||
# from previous setting are up (accepts time value):
|
||||
#
|
||||
<% if @es_template_config.has_key?('gateway.recover_after_time') then -%>
|
||||
gateway.recover_after_time: <%= @es_template_config['gateway.recover_after_time'] %>
|
||||
<% else -%>
|
||||
# gateway.recover_after_time: 5m
|
||||
<% end -%>
|
||||
|
||||
# Set how many nodes are expected in this cluster. Once these N nodes
|
||||
# are up (and recover_after_nodes is met), begin recovery process immediately
|
||||
# (without waiting for recover_after_time to expire):
|
||||
#
|
||||
<% if @es_template_config.has_key?('gateway.expected_nodes') then -%>
|
||||
gateway.expected_nodes: <%= @es_template_config['gateway.expected_nodes'] %>
|
||||
<% else -%>
|
||||
# gateway.expected_nodes: 2
|
||||
<% end -%>
|
||||
|
||||
|
||||
############################# Recovery Throttling #############################
|
||||
|
||||
# These settings allow to control the process of shards allocation between
|
||||
# nodes during initial recovery, replica allocation, rebalancing,
|
||||
# or when adding and removing nodes.
|
||||
|
||||
# Set the number of concurrent recoveries happening on a node:
|
||||
#
|
||||
# 1. During the initial recovery
|
||||
#
|
||||
# cluster.routing.allocation.node_initial_primaries_recoveries: 4
|
||||
#
|
||||
# 2. During adding/removing nodes, rebalancing, etc
|
||||
#
|
||||
# cluster.routing.allocation.node_concurrent_recoveries: 2
|
||||
|
||||
# Set to throttle throughput when recovering (eg. 100mb, by default unlimited):
|
||||
#
|
||||
# indices.recovery.max_size_per_sec: 0
|
||||
|
||||
# Set to limit the number of open concurrent streams when
|
||||
# recovering a shard from a peer:
|
||||
#
|
||||
# indices.recovery.concurrent_streams: 5
|
||||
|
||||
|
||||
################################## Discovery ##################################
|
||||
|
||||
# Discovery infrastructure ensures nodes can be found within a cluster
|
||||
# and master node is elected. Multicast discovery is the default.
|
||||
|
||||
# Set to ensure a node sees N other master eligible nodes to be considered
|
||||
# operational within the cluster. Set this option to a higher value (2-4)
|
||||
# for large clusters (>3 nodes):
|
||||
#
|
||||
<% if @es_template_config.has_key?('discovery.zen.minimum_master_nodes') then -%>
|
||||
discovery.zen.minimum_master_nodes: <%= @es_template_config['discovery.zen.minimum_master_nodes'] %>
|
||||
<% else -%>
|
||||
# discovery.zen.minimum_master_nodes: 1
|
||||
<% end -%>
|
||||
|
||||
# Set the time to wait for ping responses from other nodes when discovering.
|
||||
# Set this option to a higher value on a slow or congested network
|
||||
# to minimize discovery failures:
|
||||
#
|
||||
# discovery.zen.ping.timeout: 3s
|
||||
|
||||
# See <http://elasticsearch.org/guide/reference/modules/discovery/zen.html>
|
||||
# for more information.
|
||||
|
||||
# Unicast discovery allows to explicitly control which nodes will be used
|
||||
# to discover the cluster. It can be used when multicast is not present,
|
||||
# or to restrict the cluster communication-wise.
|
||||
#
|
||||
# 1. Disable multicast discovery (enabled by default):
|
||||
#
|
||||
<% if @es_template_config.has_key?('discovery.zen.ping.multicast.enabled') then -%>
|
||||
discovery.zen.ping.multicast.enabled: <%= @es_template_config['discovery.zen.ping.multicast.enabled'] %>
|
||||
<% else -%>
|
||||
# discovery.zen.ping.multicast.enabled: false
|
||||
<% end -%>
|
||||
#
|
||||
# 2. Configure an initial list of master nodes in the cluster
|
||||
# to perform discovery when new nodes (master or data) are started:
|
||||
#
|
||||
# discovery.zen.ping.unicast.hosts: ["host1", "host2:port", "host3[portX-portY]"]
|
||||
#
|
||||
<% if @es_template_config.has_key?('discovery.zen.ping.unicast.hosts') then -%>
|
||||
discovery.zen.ping.unicast.hosts: ["<%= @es_template_config['discovery.zen.ping.unicast.hosts'].join("\", \"") %>"]
|
||||
<% end -%>
|
||||
|
||||
# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
|
||||
#
|
||||
# You have to install the cloud-aws plugin for enabling the EC2 discovery.
|
||||
#
|
||||
# See <http://elasticsearch.org/guide/reference/modules/discovery/ec2.html>
|
||||
# for more information.
|
||||
#
|
||||
# See <http://elasticsearch.org/tutorials/2011/08/22/elasticsearch-on-ec2.html>
|
||||
# for a step-by-step tutorial.
|
||||
|
||||
|
||||
################################## Slow Log ##################################
|
||||
|
||||
# Shard level query and fetch threshold logging.
|
||||
|
||||
#index.search.slowlog.threshold.query.warn: 10s
|
||||
#index.search.slowlog.threshold.query.info: 5s
|
||||
#index.search.slowlog.threshold.query.debug: 2s
|
||||
#index.search.slowlog.threshold.query.trace: 500ms
|
||||
|
||||
#index.search.slowlog.threshold.fetch.warn: 1s
|
||||
#index.search.slowlog.threshold.fetch.info: 800ms
|
||||
#index.search.slowlog.threshold.fetch.debug: 500ms
|
||||
#index.search.slowlog.threshold.fetch.trace: 200ms
|
||||
|
||||
#index.indexing.slowlog.threshold.index.warn: 10s
|
||||
#index.indexing.slowlog.threshold.index.info: 5s
|
||||
#index.indexing.slowlog.threshold.index.debug: 2s
|
||||
#index.indexing.slowlog.threshold.index.trace: 500ms
|
||||
|
||||
################################## GC Logging ################################
|
||||
|
||||
#monitor.jvm.gc.ParNew.warn: 1000ms
|
||||
#monitor.jvm.gc.ParNew.info: 700ms
|
||||
#monitor.jvm.gc.ParNew.debug: 400ms
|
||||
|
||||
#monitor.jvm.gc.ConcurrentMarkSweep.warn: 10s
|
||||
#monitor.jvm.gc.ConcurrentMarkSweep.info: 5s
|
||||
#monitor.jvm.gc.ConcurrentMarkSweep.debug: 2s
|
@ -1 +0,0 @@
|
||||
include '::elasticsearch'
|
Loading…
x
Reference in New Issue
Block a user