Retire this project
We've shutdown the log processing service and don't need to manage it with puppet any more. Depends-On: https://review.opendev.org/c/openstack/project-config/+/839235 Change-Id: I451488faf6a7502a5171d2a4299d7a4e40d96072
This commit is contained in:
parent
89bfe00dda
commit
fb7c8790dd
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,2 +0,0 @@
|
|||||||
Gemfile.lock
|
|
||||||
.bundled_gems/
|
|
15
Gemfile
15
Gemfile
@ -1,15 +0,0 @@
|
|||||||
source 'https://rubygems.org'
|
|
||||||
|
|
||||||
if File.exists?('/home/zuul/src/git.openstack.org/openstack-infra/puppet-openstack_infra_spec_helper')
|
|
||||||
gem_checkout_method = {:path => '/home/zuul/src/git.openstack.org/openstack-infra/puppet-openstack_infra_spec_helper'}
|
|
||||||
else
|
|
||||||
gem_checkout_method = {:git => 'https://git.openstack.org/openstack-infra/puppet-openstack_infra_spec_helper'}
|
|
||||||
end
|
|
||||||
gem_checkout_method[:require] = false
|
|
||||||
|
|
||||||
group :development, :test, :system_tests do
|
|
||||||
gem 'puppet-openstack_infra_spec_helper',
|
|
||||||
gem_checkout_method
|
|
||||||
end
|
|
||||||
|
|
||||||
# vim:ft=ruby
|
|
202
LICENSE
202
LICENSE
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
10
README.md
10
README.md
@ -1,5 +1,9 @@
|
|||||||
# OpenStack Log Processor Module
|
This project is no longer maintained.
|
||||||
|
|
||||||
This module installs and configures Log Processor
|
|
||||||
|
|
||||||
|
The contents of this repository are still available in the Git
|
||||||
|
source code management system. To see the contents of this
|
||||||
|
repository before it reached its end of life, please check out the
|
||||||
|
previous commit with "git checkout HEAD^1".
|
||||||
|
|
||||||
|
For any further questions, please email
|
||||||
|
service-discuss@lists.openstack.org or join #opendev on OFTC.
|
||||||
|
8
Rakefile
8
Rakefile
@ -1,8 +0,0 @@
|
|||||||
require 'rubygems'
|
|
||||||
require 'puppetlabs_spec_helper/rake_tasks'
|
|
||||||
require 'puppet-lint/tasks/puppet-lint'
|
|
||||||
PuppetLint.configuration.fail_on_warnings = true
|
|
||||||
PuppetLint.configuration.send('disable_80chars')
|
|
||||||
PuppetLint.configuration.send('disable_autoloader_layout')
|
|
||||||
PuppetLint.configuration.send('disable_class_inherits_from_params_class')
|
|
||||||
PuppetLint.configuration.send('disable_class_parameter_defaults')
|
|
@ -1,123 +0,0 @@
|
|||||||
#! /usr/bin/crm
|
|
||||||
#
|
|
||||||
# Copyright 2013 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
# This script trains an OSB (Orthogonal Sparse Bigram) bayesian filter
|
|
||||||
# with log lines from test runs and classifies each line according to
|
|
||||||
# the likelyhood it indicates an error. Very little experimentation
|
|
||||||
# has been done to determine the best classifier and training method;
|
|
||||||
# further experimentation may be useful.
|
|
||||||
|
|
||||||
# The training method is TET -- Train Every Thing. This is not
|
|
||||||
# normally advised as a training method for Bayesian filters. In
|
|
||||||
# experiments, it identified about twice as many lines as being
|
|
||||||
# associated with errers as were indicated by a TOE (Train On Error)
|
|
||||||
# method. Some of them were false positives, but many were not, and
|
|
||||||
# of those, it had a much higher (pR ~= 37) confidence in them than
|
|
||||||
# TOE. TET seems to give qualitatively better results when filtering
|
|
||||||
# for higher pR values.
|
|
||||||
|
|
||||||
# Set unbuffered IO
|
|
||||||
window
|
|
||||||
|
|
||||||
# Base component of path to data files
|
|
||||||
isolate (:prefix:) /:*:_arg2:/
|
|
||||||
|
|
||||||
# Whether this run is for a SUCCESS or FAILURE result
|
|
||||||
isolate (:target:) /:*:_arg3:/
|
|
||||||
|
|
||||||
# Train each file on a newline just to make sure it exists
|
|
||||||
learn [:_nl:] <osb unique microgroom> (:*:prefix:/SUCCESS.css)
|
|
||||||
learn [:_nl:] <osb unique microgroom> (:*:prefix:/FAILURE.css)
|
|
||||||
{
|
|
||||||
# Iterate over each line
|
|
||||||
window <bychar> /\n/ /\n/
|
|
||||||
{
|
|
||||||
isolate (:stats:)
|
|
||||||
isolate (:result:)
|
|
||||||
isolate (:prob:)
|
|
||||||
isolate (:pr:)
|
|
||||||
# Save a copy of this line
|
|
||||||
isolate (:line:) /:*:_dw:/
|
|
||||||
{
|
|
||||||
{
|
|
||||||
# Remove things that look like timestamps from the beginning of the line
|
|
||||||
match (:timestamp:) /^[-.0-9 |:]+/
|
|
||||||
alter (:timestamp:) //
|
|
||||||
}
|
|
||||||
{
|
|
||||||
# Don't treat UUIDs as uniquely special.
|
|
||||||
match (:uuidtoken:) /[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12}/
|
|
||||||
alter (:uuidtoken:) /UUIDTOKEN/
|
|
||||||
{
|
|
||||||
match (:uuidtoken:) <fromnext> /[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12}/
|
|
||||||
alter (:uuidtoken:) /UUIDTOKEN/
|
|
||||||
# Loop to replace all TOKENS in line
|
|
||||||
liaf
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
# Don't treat IDs as uniquely special.
|
|
||||||
match (:idtoken:) /[[:xdigit:]]{32,40}/
|
|
||||||
alter (:idtoken:) /IDTOKEN/
|
|
||||||
{
|
|
||||||
match (:idtoken:) <fromnext> /[[:xdigit:]]{32,40}/
|
|
||||||
alter (:idtoken:) /IDTOKEN/
|
|
||||||
# Loop to replace all TOKENS in line
|
|
||||||
liaf
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
# Don't treat IDs as uniquely special.
|
|
||||||
match (:numtoken:) /-[[:digit:]]{7,}/
|
|
||||||
alter (:numtoken:) /-NUMTOKEN/
|
|
||||||
{
|
|
||||||
match (:numtoken:) <fromnext> /-[[:digit:]]{7,}/
|
|
||||||
alter (:numtoken:) /-NUMTOKEN/
|
|
||||||
# Loop to replace all TOKENS in line
|
|
||||||
liaf
|
|
||||||
}
|
|
||||||
}
|
|
||||||
# Train on the line
|
|
||||||
learn <osb unique microgroom> (:*:prefix:/:*:target:.css)
|
|
||||||
# Classify the line to see if it looks more like a SUCCESS or FAILURE line
|
|
||||||
classify <osb unique microgroom> (:*:prefix:/SUCCESS.css :*:prefix:/FAILURE.css) (:stats:)
|
|
||||||
{
|
|
||||||
# The stats variable looks like:
|
|
||||||
# CLASSIFY succeeds; success probability: 1.0000 pR: 304.6527
|
|
||||||
# Best match to file #0 (/tmp/crm114/console_html/SUCCESS.css) prob: 0.9933 pR: 2.1720
|
|
||||||
# Total features in input file: 20
|
|
||||||
# #0 (/tmp/crm114/console_html/SUCCESS.css): features: 3544235, hits: 901854, prob: 9.93e-01, pR: 2.17
|
|
||||||
# #1 (/tmp/crm114/console_html/FAILURE.css): features: 1, hits: 0, prob: 6.69e-03, pR: -2.17
|
|
||||||
# Pull out the filename, probability, and pR (a kind of logarithmic probability, see CRM docs)
|
|
||||||
match [:stats:] <nomultiline> /^Best match to .*\/([A-Za-z]+).css\) prob: ([-.0-9]+) pR: ([-.0-9]+)/ ( :: :result: :prob: :pr: )
|
|
||||||
{
|
|
||||||
# If this line is classified as FAILURE, negate
|
|
||||||
# the pR value (which will always be positive).
|
|
||||||
# Do this by prepending a '-' or the empty string.
|
|
||||||
{
|
|
||||||
match [:result:] /FAILURE/
|
|
||||||
alter (:result:) /-/
|
|
||||||
} alius {
|
|
||||||
alter (:result:) //
|
|
||||||
}
|
|
||||||
}
|
|
||||||
# Output the sign and pR value for this line.
|
|
||||||
output /:*:result::*:pr:\n/
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
liaf
|
|
||||||
}
|
|
158
files/geard.init
158
files/geard.init
@ -1,158 +0,0 @@
|
|||||||
#! /bin/sh
|
|
||||||
### BEGIN INIT INFO
|
|
||||||
# Provides: geard
|
|
||||||
# Required-Start: $remote_fs $syslog $named $network
|
|
||||||
# Required-Stop: $remote_fs $syslog $named $network
|
|
||||||
# Default-Start: 2 3 4 5
|
|
||||||
# Default-Stop: 0 1 6
|
|
||||||
# Short-Description: Python gearman server
|
|
||||||
# Description: Daemon that runs a native python gearman server
|
|
||||||
### END INIT INFO
|
|
||||||
|
|
||||||
# Do NOT "set -e"
|
|
||||||
|
|
||||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
|
||||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
|
||||||
DESC="Geard"
|
|
||||||
NAME=geard
|
|
||||||
DAEMON=/usr/local/bin/geard
|
|
||||||
PIDFILE=/var/run/$NAME/$NAME.pid
|
|
||||||
SCRIPTNAME=/etc/init.d/$NAME
|
|
||||||
USER=logprocessor
|
|
||||||
|
|
||||||
# Exit if the package is not installed
|
|
||||||
[ -x "$DAEMON" ] || exit 0
|
|
||||||
|
|
||||||
# Read configuration variable file if it is present
|
|
||||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
|
||||||
DAEMON_ARGS="--listen-address $GEARD_LISTEN_ADDRESS -p $GEARD_PORT --pidfile $PIDFILE"
|
|
||||||
|
|
||||||
# Load the VERBOSE setting and other rcS variables
|
|
||||||
. /lib/init/vars.sh
|
|
||||||
|
|
||||||
# Define LSB log_* functions.
|
|
||||||
# Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
|
|
||||||
. /lib/lsb/init-functions
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that starts the daemon/service
|
|
||||||
#
|
|
||||||
do_start()
|
|
||||||
{
|
|
||||||
# Return
|
|
||||||
# 0 if daemon has been started
|
|
||||||
# 1 if daemon was already running
|
|
||||||
# 2 if daemon could not be started
|
|
||||||
|
|
||||||
mkdir -p /var/run/$NAME
|
|
||||||
chown $USER /var/run/$NAME
|
|
||||||
start-stop-daemon --start --quiet --pidfile $PIDFILE -c $USER --exec $DAEMON --test > /dev/null \
|
|
||||||
|| return 1
|
|
||||||
start-stop-daemon --start --quiet --pidfile $PIDFILE -c $USER --exec $DAEMON -- \
|
|
||||||
$DAEMON_ARGS \
|
|
||||||
|| return 2
|
|
||||||
# Add code here, if necessary, that waits for the process to be ready
|
|
||||||
# to handle requests from services started subsequently which depend
|
|
||||||
# on this one. As a last resort, sleep for some time.
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that stops the daemon/service
|
|
||||||
#
|
|
||||||
do_stop()
|
|
||||||
{
|
|
||||||
# Return
|
|
||||||
# 0 if daemon has been stopped
|
|
||||||
# 1 if daemon was already stopped
|
|
||||||
# 2 if daemon could not be stopped
|
|
||||||
# other if a failure occurred
|
|
||||||
start-stop-daemon --stop --signal 9 --pidfile $PIDFILE
|
|
||||||
RETVAL="$?"
|
|
||||||
[ "$RETVAL" = 2 ] && return 2
|
|
||||||
rm -f /var/run/$NAME/*
|
|
||||||
return "$RETVAL"
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that stops the daemon/service
|
|
||||||
#
|
|
||||||
#do_graceful_stop()
|
|
||||||
#{
|
|
||||||
# PID=`cat $PIDFILE`
|
|
||||||
# kill -USR1 $PID
|
|
||||||
#
|
|
||||||
# # wait until really stopped
|
|
||||||
# if [ -n "${PID:-}" ]; then
|
|
||||||
# i=0
|
|
||||||
# while kill -0 "${PID:-}" 2> /dev/null; do
|
|
||||||
# if [ $i -eq '0' ]; then
|
|
||||||
# echo -n " ... waiting "
|
|
||||||
# else
|
|
||||||
# echo -n "."
|
|
||||||
# fi
|
|
||||||
# i=$(($i+1))
|
|
||||||
# sleep 1
|
|
||||||
# done
|
|
||||||
# fi
|
|
||||||
#
|
|
||||||
# rm -f /var/run/$NAME/*
|
|
||||||
#}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that sends a SIGHUP to the daemon/service
|
|
||||||
#
|
|
||||||
#do_reload() {
|
|
||||||
# #
|
|
||||||
# # If the daemon can reload its configuration without
|
|
||||||
# # restarting (for example, when it is sent a SIGHUP),
|
|
||||||
# # then implement that here.
|
|
||||||
# #
|
|
||||||
# start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name zuul-server
|
|
||||||
# return 0
|
|
||||||
#}
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
start)
|
|
||||||
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
|
|
||||||
do_start
|
|
||||||
case "$?" in
|
|
||||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
|
||||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
stop)
|
|
||||||
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
|
|
||||||
do_stop
|
|
||||||
case "$?" in
|
|
||||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
|
||||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
status)
|
|
||||||
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
|
|
||||||
;;
|
|
||||||
# reload)
|
|
||||||
# #
|
|
||||||
# # If do_reload() is not implemented then leave this commented out
|
|
||||||
# # and leave 'force-reload' as an alias for 'restart'.
|
|
||||||
# #
|
|
||||||
# log_daemon_msg "Reloading $DESC" "$NAME"
|
|
||||||
# do_reload
|
|
||||||
# log_end_msg $?
|
|
||||||
# ;;
|
|
||||||
restart|force-reload)
|
|
||||||
#
|
|
||||||
# If the "reload" option is implemented then remove the
|
|
||||||
# 'force-reload' alias
|
|
||||||
#
|
|
||||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
|
||||||
do_stop
|
|
||||||
do_start
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
|
||||||
exit 3
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
:
|
|
@ -1,158 +0,0 @@
|
|||||||
#! /bin/sh
|
|
||||||
### BEGIN INIT INFO
|
|
||||||
# Provides: jenkins-log-client
|
|
||||||
# Required-Start: $remote_fs $syslog $named $network
|
|
||||||
# Required-Stop: $remote_fs $syslog $named $network
|
|
||||||
# Default-Start: 2 3 4 5
|
|
||||||
# Default-Stop: 0 1 6
|
|
||||||
# Short-Description: Jenkins Log Client
|
|
||||||
# Description: Service to push Jenkins logs into logstash.
|
|
||||||
### END INIT INFO
|
|
||||||
|
|
||||||
# Do NOT "set -e"
|
|
||||||
|
|
||||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
|
||||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
|
||||||
DESC="Jenkins Log Client"
|
|
||||||
NAME=jenkins-log-client
|
|
||||||
DAEMON=/usr/local/bin/log-gearman-client.py
|
|
||||||
PIDFILE=/var/run/$NAME/$NAME.pid
|
|
||||||
DAEMON_ARGS="-c /etc/logprocessor/jenkins-log-client.yaml -p $PIDFILE"
|
|
||||||
SCRIPTNAME=/etc/init.d/$NAME
|
|
||||||
USER=logprocessor
|
|
||||||
|
|
||||||
# Exit if the package is not installed
|
|
||||||
[ -x "$DAEMON" ] || exit 0
|
|
||||||
|
|
||||||
# Read configuration variable file if it is present
|
|
||||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
|
||||||
|
|
||||||
# Load the VERBOSE setting and other rcS variables
|
|
||||||
. /lib/init/vars.sh
|
|
||||||
|
|
||||||
# Define LSB log_* functions.
|
|
||||||
# Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
|
|
||||||
. /lib/lsb/init-functions
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that starts the daemon/service
|
|
||||||
#
|
|
||||||
do_start()
|
|
||||||
{
|
|
||||||
# Return
|
|
||||||
# 0 if daemon has been started
|
|
||||||
# 1 if daemon was already running
|
|
||||||
# 2 if daemon could not be started
|
|
||||||
|
|
||||||
mkdir -p /var/run/$NAME
|
|
||||||
chown $USER /var/run/$NAME
|
|
||||||
start-stop-daemon --start --quiet --pidfile $PIDFILE -c $USER --exec $DAEMON --test > /dev/null \
|
|
||||||
|| return 1
|
|
||||||
start-stop-daemon --start --quiet --pidfile $PIDFILE -c $USER --exec $DAEMON -- \
|
|
||||||
$DAEMON_ARGS \
|
|
||||||
|| return 2
|
|
||||||
# Add code here, if necessary, that waits for the process to be ready
|
|
||||||
# to handle requests from services started subsequently which depend
|
|
||||||
# on this one. As a last resort, sleep for some time.
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that stops the daemon/service
|
|
||||||
#
|
|
||||||
do_stop()
|
|
||||||
{
|
|
||||||
# Return
|
|
||||||
# 0 if daemon has been stopped
|
|
||||||
# 1 if daemon was already stopped
|
|
||||||
# 2 if daemon could not be stopped
|
|
||||||
# other if a failure occurred
|
|
||||||
start-stop-daemon --stop --signal 9 --pidfile $PIDFILE
|
|
||||||
RETVAL="$?"
|
|
||||||
[ "$RETVAL" = 2 ] && return 2
|
|
||||||
rm -f /var/run/$NAME/*
|
|
||||||
return "$RETVAL"
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that stops the daemon/service
|
|
||||||
#
|
|
||||||
#do_graceful_stop()
|
|
||||||
#{
|
|
||||||
# PID=`cat $PIDFILE`
|
|
||||||
# kill -USR1 $PID
|
|
||||||
#
|
|
||||||
# # wait until really stopped
|
|
||||||
# if [ -n "${PID:-}" ]; then
|
|
||||||
# i=0
|
|
||||||
# while kill -0 "${PID:-}" 2> /dev/null; do
|
|
||||||
# if [ $i -eq '0' ]; then
|
|
||||||
# echo -n " ... waiting "
|
|
||||||
# else
|
|
||||||
# echo -n "."
|
|
||||||
# fi
|
|
||||||
# i=$(($i+1))
|
|
||||||
# sleep 1
|
|
||||||
# done
|
|
||||||
# fi
|
|
||||||
#
|
|
||||||
# rm -f /var/run/$NAME/*
|
|
||||||
#}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that sends a SIGHUP to the daemon/service
|
|
||||||
#
|
|
||||||
#do_reload() {
|
|
||||||
# #
|
|
||||||
# # If the daemon can reload its configuration without
|
|
||||||
# # restarting (for example, when it is sent a SIGHUP),
|
|
||||||
# # then implement that here.
|
|
||||||
# #
|
|
||||||
# start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name zuul-server
|
|
||||||
# return 0
|
|
||||||
#}
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
start)
|
|
||||||
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
|
|
||||||
do_start
|
|
||||||
case "$?" in
|
|
||||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
|
||||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
stop)
|
|
||||||
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
|
|
||||||
do_stop
|
|
||||||
case "$?" in
|
|
||||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
|
||||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
status)
|
|
||||||
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
|
|
||||||
;;
|
|
||||||
# reload)
|
|
||||||
# #
|
|
||||||
# # If do_reload() is not implemented then leave this commented out
|
|
||||||
# # and leave 'force-reload' as an alias for 'restart'.
|
|
||||||
# #
|
|
||||||
# log_daemon_msg "Reloading $DESC" "$NAME"
|
|
||||||
# do_reload
|
|
||||||
# log_end_msg $?
|
|
||||||
# ;;
|
|
||||||
restart|force-reload)
|
|
||||||
#
|
|
||||||
# If the "reload" option is implemented then remove the
|
|
||||||
# 'force-reload' alias
|
|
||||||
#
|
|
||||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
|
||||||
do_stop
|
|
||||||
do_start
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
|
||||||
exit 3
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
:
|
|
@ -1,244 +0,0 @@
|
|||||||
#!/usr/bin/python2
|
|
||||||
#
|
|
||||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import daemon
|
|
||||||
import gear
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import os.path
|
|
||||||
import re
|
|
||||||
import signal
|
|
||||||
import socket
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
import yaml
|
|
||||||
import zmq
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
import daemon.pidlockfile as pidfile_mod
|
|
||||||
except ImportError:
|
|
||||||
import daemon.pidfile as pidfile_mod
|
|
||||||
|
|
||||||
|
|
||||||
class EventProcessor(threading.Thread):
|
|
||||||
def __init__(self, zmq_address, gearman_client, files, source_url):
|
|
||||||
threading.Thread.__init__(self)
|
|
||||||
self.files = files
|
|
||||||
self.source_url = source_url
|
|
||||||
self.gearman_client = gearman_client
|
|
||||||
self.zmq_address = zmq_address
|
|
||||||
self._connect_zmq()
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
self._read_event()
|
|
||||||
except:
|
|
||||||
# Assume that an error reading data from zmq or deserializing
|
|
||||||
# data received from zmq indicates a zmq error and reconnect.
|
|
||||||
logging.exception("ZMQ exception.")
|
|
||||||
self._connect_zmq()
|
|
||||||
|
|
||||||
def _connect_zmq(self):
|
|
||||||
logging.debug("Connecting to zmq endpoint.")
|
|
||||||
self.context = zmq.Context()
|
|
||||||
self.socket = self.context.socket(zmq.SUB)
|
|
||||||
event_filter = b"onFinalized"
|
|
||||||
self.socket.setsockopt(zmq.SUBSCRIBE, event_filter)
|
|
||||||
self.socket.connect(self.zmq_address)
|
|
||||||
|
|
||||||
def _read_event(self):
|
|
||||||
string = self.socket.recv().decode('utf-8')
|
|
||||||
event = json.loads(string.split(None, 1)[1])
|
|
||||||
logging.debug("Jenkins event received: " + json.dumps(event))
|
|
||||||
for fileopts in self.files:
|
|
||||||
output = {}
|
|
||||||
source_url, out_event = self._parse_event(event, fileopts)
|
|
||||||
job_filter = fileopts.get('job-filter')
|
|
||||||
if (job_filter and
|
|
||||||
not re.match(job_filter, out_event['fields']['build_name'])):
|
|
||||||
continue
|
|
||||||
build_queue_filter = fileopts.get('build-queue-filter')
|
|
||||||
if (build_queue_filter and
|
|
||||||
not re.match(build_queue_filter,
|
|
||||||
out_event['fields']['build_queue'])):
|
|
||||||
continue
|
|
||||||
project_filter = fileopts.get('project-filter')
|
|
||||||
if (project_filter and
|
|
||||||
not re.match(project_filter, out_event['fields']['project'])):
|
|
||||||
continue
|
|
||||||
output['source_url'] = source_url
|
|
||||||
output['retry'] = fileopts.get('retry-get', False)
|
|
||||||
output['event'] = out_event
|
|
||||||
if 'subunit' in fileopts.get('name'):
|
|
||||||
job = gear.Job(b'push-subunit',
|
|
||||||
json.dumps(output).encode('utf8'))
|
|
||||||
else:
|
|
||||||
job = gear.Job(b'push-log', json.dumps(output).encode('utf8'))
|
|
||||||
try:
|
|
||||||
self.gearman_client.submitJob(job)
|
|
||||||
except:
|
|
||||||
logging.exception("Exception submitting job to Gearman.")
|
|
||||||
|
|
||||||
def _get_log_dir(self, event):
|
|
||||||
parameters = event["build"].get("parameters", {})
|
|
||||||
base = parameters.get('LOG_PATH', 'UNKNOWN')
|
|
||||||
return base
|
|
||||||
|
|
||||||
def _parse_fields(self, event, filename):
|
|
||||||
fields = {}
|
|
||||||
fields["filename"] = filename
|
|
||||||
fields["build_name"] = event.get("name", "UNKNOWN")
|
|
||||||
fields["build_status"] = event["build"].get("status", "UNKNOWN")
|
|
||||||
fields["build_node"] = event["build"].get("node_name", "UNKNOWN")
|
|
||||||
fields["build_master"] = event["build"].get("host_name", "UNKNOWN")
|
|
||||||
parameters = event["build"].get("parameters", {})
|
|
||||||
fields["project"] = parameters.get("ZUUL_PROJECT", "UNKNOWN")
|
|
||||||
# The voting value is "1" for voting, "0" for non-voting
|
|
||||||
fields["voting"] = parameters.get("ZUUL_VOTING", "UNKNOWN")
|
|
||||||
# TODO(clarkb) can we do better without duplicated data here?
|
|
||||||
fields["build_uuid"] = parameters.get("ZUUL_UUID", "UNKNOWN")
|
|
||||||
fields["build_short_uuid"] = fields["build_uuid"][:7]
|
|
||||||
fields["build_queue"] = parameters.get("ZUUL_PIPELINE", "UNKNOWN")
|
|
||||||
fields["build_ref"] = parameters.get("ZUUL_REF", "UNKNOWN")
|
|
||||||
fields["build_branch"] = parameters.get("ZUUL_BRANCH", "UNKNOWN")
|
|
||||||
fields["build_zuul_url"] = parameters.get("ZUUL_URL", "UNKNOWN")
|
|
||||||
if parameters.get("ZUUL_CHANGE"):
|
|
||||||
fields["build_change"] = parameters.get("ZUUL_CHANGE", "UNKNOWN")
|
|
||||||
fields["build_patchset"] = parameters.get("ZUUL_PATCHSET",
|
|
||||||
"UNKNOWN")
|
|
||||||
elif parameters.get("ZUUL_NEWREV"):
|
|
||||||
fields["build_newrev"] = parameters.get("ZUUL_NEWREV",
|
|
||||||
"UNKNOWN")
|
|
||||||
if ["build_node"] != "UNKNOWN":
|
|
||||||
node_provider = '-'.join(
|
|
||||||
fields["build_node"].split('-')[-3:-1])
|
|
||||||
fields["node_provider"] = node_provider or "UNKNOWN"
|
|
||||||
else:
|
|
||||||
fields["node_provider"] = "UNKNOWN"
|
|
||||||
return fields
|
|
||||||
|
|
||||||
def _parse_event(self, event, fileopts):
|
|
||||||
fields = self._parse_fields(event, fileopts['name'])
|
|
||||||
log_dir = self._get_log_dir(event)
|
|
||||||
source_url = fileopts.get('source-url', self.source_url) + '/' + \
|
|
||||||
os.path.join(log_dir, fileopts['name'])
|
|
||||||
fields["log_url"] = source_url
|
|
||||||
out_event = {}
|
|
||||||
out_event["fields"] = fields
|
|
||||||
out_event["tags"] = [os.path.basename(fileopts['name'])] + \
|
|
||||||
fileopts.get('tags', [])
|
|
||||||
return source_url, out_event
|
|
||||||
|
|
||||||
|
|
||||||
class Server(object):
|
|
||||||
def __init__(self, config, debuglog):
|
|
||||||
# Config init.
|
|
||||||
self.config = config
|
|
||||||
self.source_url = self.config['source-url']
|
|
||||||
# Pythong logging output file.
|
|
||||||
self.debuglog = debuglog
|
|
||||||
self.processors = []
|
|
||||||
|
|
||||||
def setup_logging(self):
|
|
||||||
if self.debuglog:
|
|
||||||
logging.basicConfig(format='%(asctime)s %(message)s',
|
|
||||||
filename=self.debuglog, level=logging.DEBUG)
|
|
||||||
else:
|
|
||||||
# Prevent leakage into the logstash log stream.
|
|
||||||
logging.basicConfig(level=logging.CRITICAL)
|
|
||||||
logging.debug("Log pusher starting.")
|
|
||||||
|
|
||||||
def setup_processors(self):
|
|
||||||
for publisher in self.config['zmq-publishers']:
|
|
||||||
gearclient = gear.Client()
|
|
||||||
gearclient.addServer('localhost')
|
|
||||||
gearclient.waitForServer()
|
|
||||||
log_processor = EventProcessor(
|
|
||||||
publisher, gearclient,
|
|
||||||
self.config['source-files'], self.source_url)
|
|
||||||
subunit_processor = EventProcessor(
|
|
||||||
publisher, gearclient,
|
|
||||||
self.config['subunit-files'], self.source_url)
|
|
||||||
self.processors.append(log_processor)
|
|
||||||
self.processors.append(subunit_processor)
|
|
||||||
|
|
||||||
def wait_for_name_resolution(self, host, port):
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
socket.getaddrinfo(host, port)
|
|
||||||
except socket.gaierror as e:
|
|
||||||
if e.errno == socket.EAI_AGAIN:
|
|
||||||
logging.debug("Temporary failure in name resolution")
|
|
||||||
time.sleep(2)
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
break
|
|
||||||
|
|
||||||
def main(self):
|
|
||||||
statsd_host = os.environ.get('STATSD_HOST')
|
|
||||||
statsd_port = int(os.environ.get('STATSD_PORT', 8125))
|
|
||||||
statsd_prefix = os.environ.get('STATSD_PREFIX', 'logstash.geard')
|
|
||||||
if statsd_host:
|
|
||||||
self.wait_for_name_resolution(statsd_host, statsd_port)
|
|
||||||
self.gearserver = gear.Server(
|
|
||||||
statsd_host=statsd_host,
|
|
||||||
statsd_port=statsd_port,
|
|
||||||
statsd_prefix=statsd_prefix)
|
|
||||||
|
|
||||||
self.setup_processors()
|
|
||||||
for processor in self.processors:
|
|
||||||
processor.daemon = True
|
|
||||||
processor.start()
|
|
||||||
while True:
|
|
||||||
signal.pause()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument("-c", "--config", required=True,
|
|
||||||
help="Path to yaml config file.")
|
|
||||||
parser.add_argument("-d", "--debuglog",
|
|
||||||
help="Enable debug log. "
|
|
||||||
"Specifies file to write log to.")
|
|
||||||
parser.add_argument("--foreground", action='store_true',
|
|
||||||
help="Run in the foreground.")
|
|
||||||
parser.add_argument("-p", "--pidfile",
|
|
||||||
default="/var/run/jenkins-log-pusher/"
|
|
||||||
"jenkins-log-gearman-client.pid",
|
|
||||||
help="PID file to lock during daemonization.")
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
with open(args.config, 'r') as config_stream:
|
|
||||||
config = yaml.load(config_stream)
|
|
||||||
server = Server(config, args.debuglog)
|
|
||||||
|
|
||||||
if args.foreground:
|
|
||||||
server.setup_logging()
|
|
||||||
server.main()
|
|
||||||
else:
|
|
||||||
pidfile = pidfile_mod.TimeoutPIDLockFile(args.pidfile, 10)
|
|
||||||
with daemon.DaemonContext(pidfile=pidfile):
|
|
||||||
server.setup_logging()
|
|
||||||
server.main()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,553 +0,0 @@
|
|||||||
#!/usr/bin/python2
|
|
||||||
#
|
|
||||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import daemon
|
|
||||||
import gear
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import Queue
|
|
||||||
import re
|
|
||||||
import requests
|
|
||||||
import select
|
|
||||||
import socket
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
import paho.mqtt.publish as publish
|
|
||||||
|
|
||||||
try:
|
|
||||||
import daemon.pidlockfile as pidfile_mod
|
|
||||||
except ImportError:
|
|
||||||
import daemon.pidfile as pidfile_mod
|
|
||||||
|
|
||||||
|
|
||||||
def semi_busy_wait(seconds):
|
|
||||||
# time.sleep() may return early. If it does sleep() again and repeat
|
|
||||||
# until at least the number of seconds specified has elapsed.
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
time.sleep(seconds)
|
|
||||||
cur_time = time.time()
|
|
||||||
seconds = seconds - (cur_time - start_time)
|
|
||||||
if seconds <= 0.0:
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
class FilterException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class CRM114Filter(object):
|
|
||||||
def __init__(self, script, path, build_status):
|
|
||||||
self.p = None
|
|
||||||
self.script = script
|
|
||||||
self.path = path
|
|
||||||
self.build_status = build_status
|
|
||||||
if build_status not in ['SUCCESS', 'FAILURE']:
|
|
||||||
return
|
|
||||||
if not os.path.exists(path):
|
|
||||||
os.makedirs(path)
|
|
||||||
args = [script, path, build_status]
|
|
||||||
self.p = subprocess.Popen(args,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.PIPE,
|
|
||||||
stdin=subprocess.PIPE,
|
|
||||||
close_fds=True)
|
|
||||||
|
|
||||||
def process(self, data):
|
|
||||||
if not self.p:
|
|
||||||
return True
|
|
||||||
self.p.stdin.write(data['message'].encode('utf-8') + '\n')
|
|
||||||
(r, w, x) = select.select([self.p.stdout], [],
|
|
||||||
[self.p.stdin, self.p.stdout], 20)
|
|
||||||
if not r:
|
|
||||||
self.p.kill()
|
|
||||||
raise FilterException('Timeout reading from CRM114')
|
|
||||||
r = self.p.stdout.readline()
|
|
||||||
if not r:
|
|
||||||
err = self.p.stderr.read()
|
|
||||||
if err:
|
|
||||||
raise FilterException(err)
|
|
||||||
else:
|
|
||||||
raise FilterException('Early EOF from CRM114')
|
|
||||||
r = r.strip()
|
|
||||||
data['error_pr'] = float(r)
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _catchOSError(self, method):
|
|
||||||
try:
|
|
||||||
method()
|
|
||||||
except OSError:
|
|
||||||
logging.exception("Subprocess cleanup failed.")
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
if not self.p:
|
|
||||||
return
|
|
||||||
# CRM114 should die when its stdinput is closed. Close that
|
|
||||||
# fd along with stdout and stderr then return.
|
|
||||||
self._catchOSError(self.p.stdin.close)
|
|
||||||
self._catchOSError(self.p.stdout.close)
|
|
||||||
self._catchOSError(self.p.stderr.close)
|
|
||||||
self._catchOSError(self.p.wait)
|
|
||||||
|
|
||||||
|
|
||||||
class CRM114FilterFactory(object):
|
|
||||||
name = "CRM114"
|
|
||||||
|
|
||||||
def __init__(self, script, basepath):
|
|
||||||
self.script = script
|
|
||||||
self.basepath = basepath
|
|
||||||
# Precompile regexes
|
|
||||||
self.re_remove_suffix = re.compile(r'(\.[^a-zA-Z]+)?(\.gz)?$')
|
|
||||||
self.re_remove_dot = re.compile(r'\.')
|
|
||||||
|
|
||||||
def create(self, fields):
|
|
||||||
# We only want the basename so that the same logfile at different
|
|
||||||
# paths isn't treated as different
|
|
||||||
filename = os.path.basename(fields['filename'])
|
|
||||||
# We want to collapse any numeric or compression suffixes so that
|
|
||||||
# nova.log and nova.log.1 and nova.log.1.gz are treated as the same
|
|
||||||
# logical file
|
|
||||||
filename = self.re_remove_suffix.sub(r'', filename)
|
|
||||||
filename = self.re_remove_dot.sub('_', filename)
|
|
||||||
path = os.path.join(self.basepath, filename)
|
|
||||||
return CRM114Filter(self.script, path, fields['build_status'])
|
|
||||||
|
|
||||||
|
|
||||||
class OsloSeverityFilter(object):
|
|
||||||
DATEFMT = '\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}((\.|\,)\d{3,6})?'
|
|
||||||
SEVERITYFMT = '(DEBUG|INFO|WARNING|ERROR|TRACE|AUDIT|CRITICAL)'
|
|
||||||
OSLO_LOGMATCH = ('^(?P<date>%s)(?P<line>(?P<pid> \d+)? '
|
|
||||||
'(?P<severity>%s).*)' %
|
|
||||||
(DATEFMT, SEVERITYFMT))
|
|
||||||
OSLORE = re.compile(OSLO_LOGMATCH)
|
|
||||||
|
|
||||||
def process(self, data):
|
|
||||||
msg = data['message']
|
|
||||||
m = self.OSLORE.match(msg)
|
|
||||||
if m:
|
|
||||||
data['severity'] = m.group('severity')
|
|
||||||
if data['severity'].lower == 'debug':
|
|
||||||
# Ignore debug-level lines
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class OsloSeverityFilterFactory(object):
|
|
||||||
name = "OsloSeverity"
|
|
||||||
|
|
||||||
def create(self, fields):
|
|
||||||
return OsloSeverityFilter()
|
|
||||||
|
|
||||||
|
|
||||||
class SystemdSeverityFilter(object):
|
|
||||||
'''
|
|
||||||
Match systemd DEBUG level logs
|
|
||||||
|
|
||||||
A line to match looks like:
|
|
||||||
|
|
||||||
Aug 15 18:58:49.910786 hostname devstack@keystone.service[31400]: DEBUG uwsgi ...
|
|
||||||
'''
|
|
||||||
SYSTEMDDATE = '\w+\s+\d+\s+\d{2}:\d{2}:\d{2}((\.|\,)\d{3,6})?'
|
|
||||||
DATEFMT = '\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}((\.|\,)\d{3,6})?'
|
|
||||||
SEVERITYFMT = '(DEBUG|INFO|WARNING|ERROR|TRACE|AUDIT|CRITICAL)'
|
|
||||||
SYSTEMD_LOGMATCH = '^(?P<date>%s)( (\S+) \S+\[\d+\]\: ' \
|
|
||||||
'(?P<severity>%s)?.*)' % (SYSTEMDDATE, SEVERITYFMT)
|
|
||||||
SYSTEMDRE = re.compile(SYSTEMD_LOGMATCH)
|
|
||||||
|
|
||||||
def process(self, data):
|
|
||||||
msg = data['message']
|
|
||||||
m = self.SYSTEMDRE.match(msg)
|
|
||||||
if m:
|
|
||||||
if m.group('severity') == 'DEBUG':
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class SystemdSeverityFilterFactory(object):
|
|
||||||
name = "SystemdSeverity"
|
|
||||||
|
|
||||||
def create(self, fields):
|
|
||||||
return SystemdSeverityFilter()
|
|
||||||
|
|
||||||
|
|
||||||
class LogRetriever(threading.Thread):
|
|
||||||
def __init__(self, gearman_worker, filters, logq, mqtt=None):
|
|
||||||
threading.Thread.__init__(self)
|
|
||||||
self.gearman_worker = gearman_worker
|
|
||||||
self.filters = filters
|
|
||||||
self.logq = logq
|
|
||||||
self.mqtt = mqtt
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
self._handle_event()
|
|
||||||
except:
|
|
||||||
logging.exception("Exception retrieving log event.")
|
|
||||||
|
|
||||||
def _handle_event(self):
|
|
||||||
fields = {}
|
|
||||||
num_log_lines = 0
|
|
||||||
source_url = ''
|
|
||||||
http_session = None
|
|
||||||
job = self.gearman_worker.getJob()
|
|
||||||
try:
|
|
||||||
arguments = json.loads(job.arguments.decode('utf-8'))
|
|
||||||
source_url = arguments['source_url']
|
|
||||||
event = arguments['event']
|
|
||||||
logging.debug("Handling event: " + json.dumps(event))
|
|
||||||
fields = event.get('fields') or event.get('@fields')
|
|
||||||
tags = event.get('tags') or event.get('@tags')
|
|
||||||
if fields['build_status'] != 'ABORTED':
|
|
||||||
# Handle events ignoring aborted builds. These builds are
|
|
||||||
# discarded by zuul.
|
|
||||||
file_obj, http_session = self._open_log_file_url(source_url)
|
|
||||||
|
|
||||||
try:
|
|
||||||
all_filters = []
|
|
||||||
for f in self.filters:
|
|
||||||
logging.debug("Adding filter: %s" % f.name)
|
|
||||||
all_filters.append(f.create(fields))
|
|
||||||
filters = all_filters
|
|
||||||
|
|
||||||
base_event = {}
|
|
||||||
base_event.update(fields)
|
|
||||||
base_event["tags"] = tags
|
|
||||||
for line in self._retrieve_log_line(file_obj):
|
|
||||||
keep_line = True
|
|
||||||
out_event = base_event.copy()
|
|
||||||
out_event["message"] = line
|
|
||||||
new_filters = []
|
|
||||||
for f in filters:
|
|
||||||
if not keep_line:
|
|
||||||
new_filters.append(f)
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
keep_line = f.process(out_event)
|
|
||||||
new_filters.append(f)
|
|
||||||
except FilterException:
|
|
||||||
logging.exception("Exception filtering event: "
|
|
||||||
"%s" % line.encode("utf-8"))
|
|
||||||
filters = new_filters
|
|
||||||
if keep_line:
|
|
||||||
self.logq.put(out_event)
|
|
||||||
num_log_lines += 1
|
|
||||||
|
|
||||||
logging.debug("Pushed " + str(num_log_lines) +
|
|
||||||
" log lines.")
|
|
||||||
finally:
|
|
||||||
for f in all_filters:
|
|
||||||
f.close()
|
|
||||||
if http_session:
|
|
||||||
http_session.close()
|
|
||||||
job.sendWorkComplete()
|
|
||||||
# Only send mqtt events for log files we processed.
|
|
||||||
if self.mqtt and num_log_lines:
|
|
||||||
msg = json.dumps({
|
|
||||||
'build_uuid': fields.get('build_uuid'),
|
|
||||||
'source_url': source_url,
|
|
||||||
'status': 'success',
|
|
||||||
})
|
|
||||||
self.mqtt.publish_single(msg, fields.get('project'),
|
|
||||||
fields.get('build_change'),
|
|
||||||
'retrieve_logs',
|
|
||||||
fields.get('build_queue'))
|
|
||||||
except Exception as e:
|
|
||||||
logging.exception("Exception handling log event.")
|
|
||||||
job.sendWorkException(str(e).encode('utf-8'))
|
|
||||||
if self.mqtt:
|
|
||||||
msg = json.dumps({
|
|
||||||
'build_uuid': fields.get('build_uuid'),
|
|
||||||
'source_url': source_url,
|
|
||||||
'status': 'failure',
|
|
||||||
})
|
|
||||||
self.mqtt.publish_single(msg, fields.get('project'),
|
|
||||||
fields.get('build_change'),
|
|
||||||
'retrieve_logs',
|
|
||||||
fields.get('build_queue'))
|
|
||||||
|
|
||||||
def _retrieve_log_line(self, file_obj, chunk_size=4096):
|
|
||||||
if not file_obj:
|
|
||||||
return
|
|
||||||
# Response.iter_lines automatically decodes 'gzip' and 'deflate'
|
|
||||||
# encodings.
|
|
||||||
# https://requests.readthedocs.io/en/master/user/quickstart/#raw-response-content
|
|
||||||
for line in file_obj.iter_lines(chunk_size, decode_unicode=True):
|
|
||||||
yield line
|
|
||||||
|
|
||||||
def _open_log_file_url(self, source_url):
|
|
||||||
file_obj = None
|
|
||||||
try:
|
|
||||||
logging.debug("Retrieving: " + source_url)
|
|
||||||
# Use a session to persist the HTTP connection across requests
|
|
||||||
# while downloading chunks of the log file.
|
|
||||||
session = requests.Session()
|
|
||||||
session.headers = {'Accept-encoding': 'deflate, gzip'}
|
|
||||||
file_obj = session.get(source_url, stream=True)
|
|
||||||
file_obj.raise_for_status()
|
|
||||||
except requests.HTTPError as e:
|
|
||||||
if e.response.status_code == 404:
|
|
||||||
logging.info("Unable to retrieve %s: HTTP error 404" %
|
|
||||||
source_url)
|
|
||||||
else:
|
|
||||||
logging.exception("Unable to get log data.")
|
|
||||||
except Exception:
|
|
||||||
# Silently drop fatal errors when retrieving logs.
|
|
||||||
# TODO (clarkb): Handle these errors.
|
|
||||||
# Perhaps simply add a log message to file_obj?
|
|
||||||
logging.exception("Unable to retrieve source file.")
|
|
||||||
raise
|
|
||||||
|
|
||||||
return file_obj, session
|
|
||||||
|
|
||||||
|
|
||||||
class StdOutLogProcessor(object):
|
|
||||||
def __init__(self, logq, pretty_print=False):
|
|
||||||
self.logq = logq
|
|
||||||
self.pretty_print = pretty_print
|
|
||||||
|
|
||||||
def handle_log_event(self):
|
|
||||||
log = self.logq.get()
|
|
||||||
if self.pretty_print:
|
|
||||||
print(json.dumps(log, sort_keys=True,
|
|
||||||
indent=4, separators=(',', ': ')))
|
|
||||||
else:
|
|
||||||
print(json.dumps(log))
|
|
||||||
# Push each log event through to keep logstash up to date.
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
|
|
||||||
class INETLogProcessor(object):
|
|
||||||
socket_type = None
|
|
||||||
|
|
||||||
def __init__(self, logq, host, port):
|
|
||||||
self.logq = logq
|
|
||||||
self.host = host
|
|
||||||
self.port = port
|
|
||||||
self.socket = None
|
|
||||||
|
|
||||||
def _connect_socket(self):
|
|
||||||
logging.debug("Creating socket.")
|
|
||||||
self.socket = socket.socket(socket.AF_INET, self.socket_type)
|
|
||||||
self.socket.connect((self.host, self.port))
|
|
||||||
|
|
||||||
def handle_log_event(self):
|
|
||||||
log = self.logq.get()
|
|
||||||
try:
|
|
||||||
if self.socket is None:
|
|
||||||
self._connect_socket()
|
|
||||||
self.socket.sendall((json.dumps(log) + '\n').encode('utf-8'))
|
|
||||||
except:
|
|
||||||
logging.exception("Exception sending INET event.")
|
|
||||||
# Logstash seems to take about a minute to start again. Wait 90
|
|
||||||
# seconds before attempting to reconnect. If logstash is not
|
|
||||||
# available after 90 seconds we will throw another exception and
|
|
||||||
# die.
|
|
||||||
semi_busy_wait(90)
|
|
||||||
self._connect_socket()
|
|
||||||
self.socket.sendall((json.dumps(log) + '\n').encode('utf-8'))
|
|
||||||
|
|
||||||
|
|
||||||
class UDPLogProcessor(INETLogProcessor):
|
|
||||||
socket_type = socket.SOCK_DGRAM
|
|
||||||
|
|
||||||
|
|
||||||
class TCPLogProcessor(INETLogProcessor):
|
|
||||||
socket_type = socket.SOCK_STREAM
|
|
||||||
|
|
||||||
|
|
||||||
class PushMQTT(object):
|
|
||||||
def __init__(self, hostname, base_topic, port=1883, client_id=None,
|
|
||||||
keepalive=60, will=None, auth=None, tls=None, qos=0):
|
|
||||||
self.hostname = hostname
|
|
||||||
self.port = port
|
|
||||||
self.client_id = client_id
|
|
||||||
self.keepalive = 60
|
|
||||||
self.will = will
|
|
||||||
self.auth = auth
|
|
||||||
self.tls = tls
|
|
||||||
self.qos = qos
|
|
||||||
self.base_topic = base_topic
|
|
||||||
|
|
||||||
def _generate_topic(self, project, job_id, action):
|
|
||||||
return '/'.join([self.base_topic, project, job_id, action])
|
|
||||||
|
|
||||||
def publish_single(self, msg, project, job_id, action, build_queue=None):
|
|
||||||
if job_id:
|
|
||||||
topic = self._generate_topic(project, job_id, action)
|
|
||||||
elif build_queue:
|
|
||||||
topic = self._generate_topic(project, build_queue, action)
|
|
||||||
else:
|
|
||||||
topic = self.base_topic + '/' + project
|
|
||||||
|
|
||||||
publish.single(topic, msg, hostname=self.hostname,
|
|
||||||
port=self.port, client_id=self.client_id,
|
|
||||||
keepalive=self.keepalive, will=self.will,
|
|
||||||
auth=self.auth, tls=self.tls, qos=self.qos)
|
|
||||||
|
|
||||||
|
|
||||||
class Server(object):
|
|
||||||
def __init__(self, config, debuglog):
|
|
||||||
# Config init.
|
|
||||||
self.config = config
|
|
||||||
self.gearman_host = self.config['gearman-host']
|
|
||||||
self.gearman_port = self.config['gearman-port']
|
|
||||||
self.output_host = self.config['output-host']
|
|
||||||
self.output_port = self.config['output-port']
|
|
||||||
self.output_mode = self.config['output-mode']
|
|
||||||
mqtt_host = self.config.get('mqtt-host')
|
|
||||||
mqtt_port = self.config.get('mqtt-port', 1883)
|
|
||||||
mqtt_user = self.config.get('mqtt-user')
|
|
||||||
mqtt_pass = self.config.get('mqtt-pass')
|
|
||||||
mqtt_topic = self.config.get('mqtt-topic', 'gearman-subunit')
|
|
||||||
mqtt_ca_certs = self.config.get('mqtt-ca-certs')
|
|
||||||
mqtt_certfile = self.config.get('mqtt-certfile')
|
|
||||||
mqtt_keyfile = self.config.get('mqtt-keyfile')
|
|
||||||
# Pythong logging output file.
|
|
||||||
self.debuglog = debuglog
|
|
||||||
self.retriever = None
|
|
||||||
self.logqueue = Queue.Queue(16384)
|
|
||||||
self.processor = None
|
|
||||||
self.filter_factories = []
|
|
||||||
# Run the severity filter first so it can filter out chatty
|
|
||||||
# logs.
|
|
||||||
self.filter_factories.append(OsloSeverityFilterFactory())
|
|
||||||
self.filter_factories.append(SystemdSeverityFilterFactory())
|
|
||||||
crmscript = self.config.get('crm114-script')
|
|
||||||
crmdata = self.config.get('crm114-data')
|
|
||||||
if crmscript and crmdata:
|
|
||||||
self.filter_factories.append(
|
|
||||||
CRM114FilterFactory(crmscript, crmdata))
|
|
||||||
# Setup MQTT
|
|
||||||
self.mqtt = None
|
|
||||||
if mqtt_host:
|
|
||||||
auth = None
|
|
||||||
if mqtt_user:
|
|
||||||
auth = {'username': mqtt_user}
|
|
||||||
if mqtt_pass:
|
|
||||||
auth['password'] = mqtt_pass
|
|
||||||
tls = None
|
|
||||||
if mqtt_ca_certs:
|
|
||||||
tls = {'ca_certs': mqtt_ca_certs, 'certfile': mqtt_certfile,
|
|
||||||
'keyfile': mqtt_keyfile}
|
|
||||||
|
|
||||||
self.mqtt = PushMQTT(mqtt_host, mqtt_topic, port=mqtt_port,
|
|
||||||
auth=auth, tls=tls)
|
|
||||||
|
|
||||||
def setup_logging(self):
|
|
||||||
if self.debuglog:
|
|
||||||
logging.basicConfig(format='%(asctime)s %(message)s',
|
|
||||||
filename=self.debuglog, level=logging.DEBUG)
|
|
||||||
else:
|
|
||||||
# Prevent leakage into the logstash log stream.
|
|
||||||
logging.basicConfig(level=logging.CRITICAL)
|
|
||||||
logging.debug("Log pusher starting.")
|
|
||||||
|
|
||||||
def wait_for_name_resolution(self, host, port):
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
socket.getaddrinfo(host, port)
|
|
||||||
except socket.gaierror as e:
|
|
||||||
if e.errno == socket.EAI_AGAIN:
|
|
||||||
logging.debug("Temporary failure in name resolution")
|
|
||||||
time.sleep(2)
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
break
|
|
||||||
|
|
||||||
def setup_retriever(self):
|
|
||||||
hostname = socket.gethostname()
|
|
||||||
gearman_worker = gear.Worker(hostname + b'-pusher')
|
|
||||||
self.wait_for_name_resolution(self.gearman_host, self.gearman_port)
|
|
||||||
gearman_worker.addServer(self.gearman_host,
|
|
||||||
self.gearman_port)
|
|
||||||
gearman_worker.registerFunction(b'push-log')
|
|
||||||
self.retriever = LogRetriever(gearman_worker, self.filter_factories,
|
|
||||||
self.logqueue, mqtt=self.mqtt)
|
|
||||||
|
|
||||||
def setup_processor(self):
|
|
||||||
if self.output_mode == "tcp":
|
|
||||||
self.processor = TCPLogProcessor(self.logqueue,
|
|
||||||
self.output_host,
|
|
||||||
self.output_port)
|
|
||||||
elif self.output_mode == "udp":
|
|
||||||
self.processor = UDPLogProcessor(self.logqueue,
|
|
||||||
self.output_host,
|
|
||||||
self.output_port)
|
|
||||||
else:
|
|
||||||
# Note this processor will not work if the process is run as a
|
|
||||||
# daemon. You must use the --foreground option.
|
|
||||||
self.processor = StdOutLogProcessor(self.logqueue)
|
|
||||||
|
|
||||||
def main(self):
|
|
||||||
self.setup_retriever()
|
|
||||||
self.setup_processor()
|
|
||||||
|
|
||||||
self.retriever.daemon = True
|
|
||||||
self.retriever.start()
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
self.processor.handle_log_event()
|
|
||||||
except:
|
|
||||||
logging.exception("Exception processing log event.")
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument("-c", "--config", required=True,
|
|
||||||
help="Path to yaml config file.")
|
|
||||||
parser.add_argument("-d", "--debuglog",
|
|
||||||
help="Enable debug log. "
|
|
||||||
"Specifies file to write log to.")
|
|
||||||
parser.add_argument("--foreground", action='store_true',
|
|
||||||
help="Run in the foreground.")
|
|
||||||
parser.add_argument("-p", "--pidfile",
|
|
||||||
default="/var/run/jenkins-log-pusher/"
|
|
||||||
"jenkins-log-gearman-worker.pid",
|
|
||||||
help="PID file to lock during daemonization.")
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
with open(args.config, 'r') as config_stream:
|
|
||||||
config = yaml.load(config_stream)
|
|
||||||
server = Server(config, args.debuglog)
|
|
||||||
|
|
||||||
if args.foreground:
|
|
||||||
server.setup_logging()
|
|
||||||
server.main()
|
|
||||||
else:
|
|
||||||
pidfile = pidfile_mod.TimeoutPIDLockFile(args.pidfile, 10)
|
|
||||||
with daemon.DaemonContext(pidfile=pidfile):
|
|
||||||
server.setup_logging()
|
|
||||||
server.main()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,73 +0,0 @@
|
|||||||
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
|
|
||||||
# Copyright 2013 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
# == Class: log_processor::client
|
|
||||||
#
|
|
||||||
class log_processor::client (
|
|
||||||
$config_file,
|
|
||||||
$statsd_host = undef,
|
|
||||||
) {
|
|
||||||
|
|
||||||
file { '/etc/logprocessor/jenkins-log-client.yaml':
|
|
||||||
ensure => present,
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
mode => '0555',
|
|
||||||
source => $config_file,
|
|
||||||
require => File['/etc/logprocessor'],
|
|
||||||
}
|
|
||||||
|
|
||||||
file { '/etc/init.d/jenkins-log-client':
|
|
||||||
ensure => present,
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
mode => '0555',
|
|
||||||
source => 'puppet:///modules/log_processor/jenkins-log-client.init',
|
|
||||||
require => [
|
|
||||||
File['/usr/local/bin/log-gearman-client.py'],
|
|
||||||
File['/etc/logprocessor/jenkins-log-client.yaml'],
|
|
||||||
File['/etc/default/jenkins-log-client'],
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
file { '/etc/default/jenkins-log-client':
|
|
||||||
ensure => present,
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
mode => '0444',
|
|
||||||
content => template('log_processor/jenkins-log-client.default.erb'),
|
|
||||||
}
|
|
||||||
|
|
||||||
service { 'jenkins-log-client':
|
|
||||||
enable => true,
|
|
||||||
hasrestart => true,
|
|
||||||
subscribe => File['/etc/logprocessor/jenkins-log-client.yaml'],
|
|
||||||
require => File['/etc/init.d/jenkins-log-client'],
|
|
||||||
}
|
|
||||||
|
|
||||||
include logrotate
|
|
||||||
logrotate::file { 'log-client-debug.log':
|
|
||||||
log => '/var/log/logprocessor/log-client-debug.log',
|
|
||||||
options => [
|
|
||||||
'compress',
|
|
||||||
'copytruncate',
|
|
||||||
'missingok',
|
|
||||||
'rotate 7',
|
|
||||||
'daily',
|
|
||||||
'notifempty',
|
|
||||||
],
|
|
||||||
require => Service['jenkins-log-client'],
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,85 +0,0 @@
|
|||||||
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
|
|
||||||
# Copyright 2017 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
# == Class: log_processor::geard
|
|
||||||
#
|
|
||||||
# Run geard as system service
|
|
||||||
class log_processor::geard (
|
|
||||||
$statsd_host = undef,
|
|
||||||
$geard_port = '4730',
|
|
||||||
) {
|
|
||||||
file { '/var/log/geard':
|
|
||||||
ensure => directory,
|
|
||||||
owner => 'logprocessor',
|
|
||||||
group => 'logprocessor',
|
|
||||||
mode => '0755',
|
|
||||||
require => User['logprocessor'],
|
|
||||||
}
|
|
||||||
|
|
||||||
file { '/etc/init.d/geard':
|
|
||||||
ensure => present,
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
mode => '0555',
|
|
||||||
source => 'puppet:///modules/log_processor/geard.init',
|
|
||||||
require => [
|
|
||||||
Package['gear'],
|
|
||||||
File['/etc/default/geard'],
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
file { '/etc/default/geard':
|
|
||||||
ensure => present,
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
mode => '0444',
|
|
||||||
content => template('log_processor/geard.default.erb'),
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($::operatingsystem == 'Ubuntu') and ($::operatingsystemrelease >= '16.04') {
|
|
||||||
# This is a hack to make sure that systemd is aware of the new service
|
|
||||||
# before we attempt to start it.
|
|
||||||
exec { 'geard-systemd-daemon-reload':
|
|
||||||
command => '/bin/systemctl daemon-reload',
|
|
||||||
before => Service['geard'],
|
|
||||||
subscribe => File['/etc/init.d/geard'],
|
|
||||||
refreshonly => true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
service { 'geard':
|
|
||||||
enable => true,
|
|
||||||
hasrestart => true,
|
|
||||||
subscribe => File['/etc/default/geard'],
|
|
||||||
require => [
|
|
||||||
File['/etc/init.d/geard'],
|
|
||||||
File['/var/log/geard'],
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
include logrotate
|
|
||||||
logrotate::file { 'rotate-geard.log':
|
|
||||||
log => '/var/log/geard/geard.log',
|
|
||||||
options => [
|
|
||||||
'compress',
|
|
||||||
'copytruncate',
|
|
||||||
'missingok',
|
|
||||||
'rotate 7',
|
|
||||||
'daily',
|
|
||||||
'notifempty',
|
|
||||||
],
|
|
||||||
require => Service['geard'],
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,148 +0,0 @@
|
|||||||
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
|
|
||||||
# Copyright 2013 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
# == Class: log_processor
|
|
||||||
#
|
|
||||||
class log_processor (
|
|
||||||
) {
|
|
||||||
if ! defined(Package['python-daemon']) {
|
|
||||||
package { 'python-daemon':
|
|
||||||
ensure => present,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
group { 'logprocessor':
|
|
||||||
ensure => present,
|
|
||||||
}
|
|
||||||
user { 'logprocessor':
|
|
||||||
ensure => present,
|
|
||||||
comment => 'Log Processor User',
|
|
||||||
home => '/etc/logprocessor',
|
|
||||||
gid => 'logprocessor',
|
|
||||||
shell => '/bin/bash',
|
|
||||||
membership => 'minimum',
|
|
||||||
require => Group['logprocessor'],
|
|
||||||
}
|
|
||||||
|
|
||||||
file { '/etc/logprocessor':
|
|
||||||
ensure => directory,
|
|
||||||
owner => 'logprocessor',
|
|
||||||
group => 'logprocessor',
|
|
||||||
mode => '0755',
|
|
||||||
require => User['logprocessor'],
|
|
||||||
}
|
|
||||||
|
|
||||||
file { '/var/log/logprocessor':
|
|
||||||
ensure => directory,
|
|
||||||
owner => 'logprocessor',
|
|
||||||
group => 'logprocessor',
|
|
||||||
mode => '0755',
|
|
||||||
require => User['logprocessor'],
|
|
||||||
}
|
|
||||||
|
|
||||||
package { 'python-zmq':
|
|
||||||
ensure => present,
|
|
||||||
}
|
|
||||||
|
|
||||||
package { 'python-yaml':
|
|
||||||
ensure => present,
|
|
||||||
}
|
|
||||||
|
|
||||||
package { 'crm114':
|
|
||||||
ensure => present,
|
|
||||||
}
|
|
||||||
|
|
||||||
include pip
|
|
||||||
package { 'gear':
|
|
||||||
ensure => latest,
|
|
||||||
provider => openstack_pip,
|
|
||||||
require => Class['pip'],
|
|
||||||
}
|
|
||||||
package { 'requests':
|
|
||||||
ensure => latest,
|
|
||||||
provider => openstack_pip,
|
|
||||||
require => Class['pip'],
|
|
||||||
}
|
|
||||||
|
|
||||||
if ! defined(Package['statsd']) {
|
|
||||||
package { 'statsd':
|
|
||||||
# NOTE(cmurphy) If this is not pinned, the openstack_pip provider will
|
|
||||||
# attempt to install latest and conflict with the <3 cap from
|
|
||||||
# os-performance-tools. Unpin this when os-performance-tools raises its
|
|
||||||
# cap.
|
|
||||||
# NOTE (clarkb) we also install it here because geard can report stats
|
|
||||||
# with statsd so need it even if subunit2sql is not used.
|
|
||||||
ensure => '2.1.2',
|
|
||||||
provider => openstack_pip,
|
|
||||||
require => Class['pip']
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Temporarily pin paho-mqtt to 1.2.3 since 1.3.0 won't support TLS on
|
|
||||||
# Trusty's Python 2.7.
|
|
||||||
if ! defined(Package['paho-mqtt']) {
|
|
||||||
package { 'paho-mqtt':
|
|
||||||
ensure => '1.2.3',
|
|
||||||
provider => openstack_pip,
|
|
||||||
require => Class['pip'],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
file { '/var/lib/crm114':
|
|
||||||
ensure => directory,
|
|
||||||
owner => 'logprocessor',
|
|
||||||
group => 'logprocessor',
|
|
||||||
require => User['logprocessor'],
|
|
||||||
}
|
|
||||||
|
|
||||||
file { '/usr/local/bin/classify-log.crm':
|
|
||||||
ensure => present,
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
mode => '0755',
|
|
||||||
source => 'puppet:///modules/log_processor/classify-log.crm',
|
|
||||||
require => [
|
|
||||||
Package['crm114'],
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
file { '/usr/local/bin/log-gearman-client.py':
|
|
||||||
ensure => present,
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
mode => '0755',
|
|
||||||
source => 'puppet:///modules/log_processor/log-gearman-client.py',
|
|
||||||
require => [
|
|
||||||
Package['python-daemon'],
|
|
||||||
Package['python-zmq'],
|
|
||||||
Package['python-yaml'],
|
|
||||||
Package['gear'],
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
file { '/usr/local/bin/log-gearman-worker.py':
|
|
||||||
ensure => present,
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
mode => '0755',
|
|
||||||
source => 'puppet:///modules/log_processor/log-gearman-worker.py',
|
|
||||||
require => [
|
|
||||||
Package['python-daemon'],
|
|
||||||
Package['python-zmq'],
|
|
||||||
Package['python-yaml'],
|
|
||||||
Package['gear'],
|
|
||||||
],
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,75 +0,0 @@
|
|||||||
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
|
|
||||||
# Copyright 2013 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
# == Class: log_processor::worker
|
|
||||||
#
|
|
||||||
define log_processor::worker (
|
|
||||||
$config_file,
|
|
||||||
) {
|
|
||||||
$suffix = "-${name}"
|
|
||||||
|
|
||||||
file { "/etc/logprocessor/jenkins-log-worker${suffix}.yaml":
|
|
||||||
ensure => present,
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
mode => '0555',
|
|
||||||
source => $config_file,
|
|
||||||
require => User['logprocessor'],
|
|
||||||
}
|
|
||||||
|
|
||||||
file { "/etc/init.d/jenkins-log-worker${suffix}":
|
|
||||||
ensure => present,
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
mode => '0555',
|
|
||||||
content => template('log_processor/jenkins-log-worker.init.erb'),
|
|
||||||
require => [
|
|
||||||
File['/usr/local/bin/log-gearman-worker.py'],
|
|
||||||
File["/etc/logprocessor/jenkins-log-worker${suffix}.yaml"],
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($::operatingsystem == 'Ubuntu') and ($::operatingsystemrelease >= '16.04') {
|
|
||||||
# This is a hack to make sure that systemd is aware of the new service
|
|
||||||
# before we attempt to start it.
|
|
||||||
exec { "jenkins-log-worker${suffix}-systemd-daemon-reload":
|
|
||||||
command => '/bin/systemctl daemon-reload',
|
|
||||||
before => Service["jenkins-log-worker${suffix}"],
|
|
||||||
subscribe => File["/etc/init.d/jenkins-log-worker${suffix}"],
|
|
||||||
refreshonly => true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
service { "jenkins-log-worker${suffix}":
|
|
||||||
enable => true,
|
|
||||||
hasrestart => true,
|
|
||||||
subscribe => File["/etc/logprocessor/jenkins-log-worker${suffix}.yaml"],
|
|
||||||
require => File["/etc/init.d/jenkins-log-worker${suffix}"],
|
|
||||||
}
|
|
||||||
|
|
||||||
include logrotate
|
|
||||||
logrotate::file { "log-worker${suffix}-debug.log":
|
|
||||||
log => "/var/log/logprocessor/log-worker${suffix}-debug.log",
|
|
||||||
options => [
|
|
||||||
'compress',
|
|
||||||
'copytruncate',
|
|
||||||
'missingok',
|
|
||||||
'rotate 7',
|
|
||||||
'daily',
|
|
||||||
'notifempty',
|
|
||||||
],
|
|
||||||
require => Service["jenkins-log-worker${suffix}"],
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,14 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "openstackinfra-log_processor",
|
|
||||||
"version": "0.0.1",
|
|
||||||
"author": "OpenStack CI",
|
|
||||||
"summary": "Puppet module for Log Processor",
|
|
||||||
"license": "Apache 2.0",
|
|
||||||
"source": "https://git.openstack.org/openstack-infra/puppet-log_processor.git",
|
|
||||||
"project_page": "http://docs.openstack.org/infra/system-config/",
|
|
||||||
"issues_url": "https://storyboard.openstack.org/#!/project/801",
|
|
||||||
"dependencies": [
|
|
||||||
{"name":"openstackci/pip","version_requirement":">= 0.0.1"},
|
|
||||||
{"name":"openstackci/logrotate","version_requirement":">= 0.0.1"}
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,38 +0,0 @@
|
|||||||
require 'puppet-openstack_infra_spec_helper/spec_helper_acceptance'
|
|
||||||
|
|
||||||
describe 'log_processor', if: os[:family] == 'ubuntu' do
|
|
||||||
|
|
||||||
def pp_path
|
|
||||||
base_path = File.dirname(__FILE__)
|
|
||||||
File.join(base_path, 'fixtures')
|
|
||||||
end
|
|
||||||
|
|
||||||
def puppet_manifest
|
|
||||||
manifest_path = File.join(pp_path, 'default.pp')
|
|
||||||
File.read(manifest_path)
|
|
||||||
end
|
|
||||||
|
|
||||||
def postconditions_puppet_manifest
|
|
||||||
manifest_path = File.join(pp_path, 'postconditions.pp')
|
|
||||||
File.read(manifest_path)
|
|
||||||
end
|
|
||||||
|
|
||||||
it 'should work with no errors' do
|
|
||||||
apply_manifest(puppet_manifest, catch_failures: true)
|
|
||||||
end
|
|
||||||
|
|
||||||
it 'should be idempotent' do
|
|
||||||
apply_manifest(puppet_manifest, catch_changes: true)
|
|
||||||
end
|
|
||||||
|
|
||||||
it 'should start' do
|
|
||||||
apply_manifest(postconditions_puppet_manifest, catch_failures: true)
|
|
||||||
end
|
|
||||||
|
|
||||||
['jenkins-log-client', 'jenkins-log-worker-A'].each do |service|
|
|
||||||
describe service(service) do
|
|
||||||
it { should be_running }
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
|
@ -1,49 +0,0 @@
|
|||||||
$worker_config = 'gearman-host: localhost
|
|
||||||
gearman-port: 4730
|
|
||||||
output-host: localhost
|
|
||||||
output-port: 9999
|
|
||||||
output-mode: tcp
|
|
||||||
crm114-script: /usr/local/bin/classify-log.crm
|
|
||||||
crm114-data: /var/lib/crm114
|
|
||||||
mqtt-host: firehose.openstack.org
|
|
||||||
mqtt-port: 8883
|
|
||||||
mqtt-topic: gearman-logstash/localhost
|
|
||||||
mqtt-user: infra
|
|
||||||
mqtt-pass: mqtt_password
|
|
||||||
mqtt-ca-certs: /etc/logstash/mqtt-root-CA.pem.crt'
|
|
||||||
|
|
||||||
$client_config = 'source-url: http://localhost
|
|
||||||
zmq-publishers: []
|
|
||||||
subunit-files:
|
|
||||||
- name: logs/testrepository.subunit
|
|
||||||
build-queue-filter: gate
|
|
||||||
source-files:
|
|
||||||
- name: console.html
|
|
||||||
tags:
|
|
||||||
- console'
|
|
||||||
|
|
||||||
file { '/tmp/jenkins-log-client.yaml':
|
|
||||||
ensure => present,
|
|
||||||
content => $client_config,
|
|
||||||
}
|
|
||||||
|
|
||||||
file { '/etc/logprocessor/worker.yaml':
|
|
||||||
ensure => present,
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
mode => '0644',
|
|
||||||
content => $worker_config,
|
|
||||||
require => Class['::log_processor'],
|
|
||||||
}
|
|
||||||
|
|
||||||
class { 'log_processor': }
|
|
||||||
|
|
||||||
class { 'log_processor::client':
|
|
||||||
config_file => '/tmp/jenkins-log-client.yaml',
|
|
||||||
statsd_host => 'graphite.openstack.org',
|
|
||||||
}
|
|
||||||
|
|
||||||
log_processor::worker { 'A':
|
|
||||||
config_file => '/etc/logprocessor/worker.yaml',
|
|
||||||
require => File['/etc/logprocessor/worker.yaml'],
|
|
||||||
}
|
|
@ -1,7 +0,0 @@
|
|||||||
service { 'jenkins-log-client':
|
|
||||||
ensure => running,
|
|
||||||
}
|
|
||||||
|
|
||||||
service { 'jenkins-log-worker-A':
|
|
||||||
ensure => running,
|
|
||||||
}
|
|
@ -1,11 +0,0 @@
|
|||||||
HOSTS:
|
|
||||||
ubuntu-server-1404-x64:
|
|
||||||
roles:
|
|
||||||
- master
|
|
||||||
platform: ubuntu-14.04-amd64
|
|
||||||
box: puppetlabs/ubuntu-14.04-64-nocm
|
|
||||||
box_url: https://vagrantcloud.com/puppetlabs/ubuntu-14.04-64-nocm
|
|
||||||
hypervisor: vagrant
|
|
||||||
CONFIG:
|
|
||||||
log_level: debug
|
|
||||||
type: git
|
|
@ -1,10 +0,0 @@
|
|||||||
HOSTS:
|
|
||||||
centos-70-x64:
|
|
||||||
roles:
|
|
||||||
- master
|
|
||||||
platform: el-7-x86_64
|
|
||||||
hypervisor: none
|
|
||||||
ip: 127.0.0.1
|
|
||||||
CONFIG:
|
|
||||||
type: foss
|
|
||||||
set_env: false
|
|
@ -1,10 +0,0 @@
|
|||||||
HOSTS:
|
|
||||||
ubuntu-14.04-amd64:
|
|
||||||
roles:
|
|
||||||
- master
|
|
||||||
platform: ubuntu-14.04-amd64
|
|
||||||
hypervisor: none
|
|
||||||
ip: 127.0.0.1
|
|
||||||
CONFIG:
|
|
||||||
type: foss
|
|
||||||
set_env: false
|
|
@ -1,10 +0,0 @@
|
|||||||
HOSTS:
|
|
||||||
ubuntu-16.04-amd64:
|
|
||||||
roles:
|
|
||||||
- master
|
|
||||||
platform: ubuntu-16.04-amd64
|
|
||||||
hypervisor: none
|
|
||||||
ip: 127.0.0.1
|
|
||||||
CONFIG:
|
|
||||||
type: foss
|
|
||||||
set_env: false
|
|
@ -1,7 +0,0 @@
|
|||||||
<% if @statsd_host != nil %>
|
|
||||||
export STATSD_HOST=<%= @statsd_host %>
|
|
||||||
export STATSD_PORT=8125
|
|
||||||
export STATSD_PREFIX="logstash.geard"
|
|
||||||
<% end %>
|
|
||||||
export GEARD_LISTEN_ADDRESS=::
|
|
||||||
export GEARD_PORT=<%= @geard_port %>
|
|
@ -1,5 +0,0 @@
|
|||||||
<% if @statsd_host != nil %>
|
|
||||||
export STATSD_HOST=<%= @statsd_host %>
|
|
||||||
export STATSD_PORT=8125
|
|
||||||
export STATSD_PREFIX="logstash.geard"
|
|
||||||
<% end %>
|
|
@ -1,158 +0,0 @@
|
|||||||
#! /bin/sh
|
|
||||||
### BEGIN INIT INFO
|
|
||||||
# Provides: jenkins-log-worker<%= @suffix %>
|
|
||||||
# Required-Start: $remote_fs $syslog $named $network
|
|
||||||
# Required-Stop: $remote_fs $syslog $named $network
|
|
||||||
# Default-Start: 2 3 4 5
|
|
||||||
# Default-Stop: 0 1 6
|
|
||||||
# Short-Description: Jenkins Log Worker
|
|
||||||
# Description: Service to push Jenkins logs into logstash.
|
|
||||||
### END INIT INFO
|
|
||||||
|
|
||||||
# Do NOT "set -e"
|
|
||||||
|
|
||||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
|
||||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
|
||||||
DESC="Jenkins Log Worker"
|
|
||||||
NAME=jenkins-log-worker<%= @suffix %>
|
|
||||||
DAEMON=/usr/local/bin/log-gearman-worker.py
|
|
||||||
PIDFILE=/var/run/$NAME/$NAME.pid
|
|
||||||
DAEMON_ARGS="-c /etc/logprocessor/jenkins-log-worker<%= @suffix %>.yaml -d /var/log/logprocessor/log-worker<%= @suffix %>-debug.log -p $PIDFILE"
|
|
||||||
SCRIPTNAME=/etc/init.d/$NAME
|
|
||||||
USER=logprocessor
|
|
||||||
|
|
||||||
# Exit if the package is not installed
|
|
||||||
[ -x "$DAEMON" ] || exit 0
|
|
||||||
|
|
||||||
# Read configuration variable file if it is present
|
|
||||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
|
||||||
|
|
||||||
# Load the VERBOSE setting and other rcS variables
|
|
||||||
. /lib/init/vars.sh
|
|
||||||
|
|
||||||
# Define LSB log_* functions.
|
|
||||||
# Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
|
|
||||||
. /lib/lsb/init-functions
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that starts the daemon/service
|
|
||||||
#
|
|
||||||
do_start()
|
|
||||||
{
|
|
||||||
# Return
|
|
||||||
# 0 if daemon has been started
|
|
||||||
# 1 if daemon was already running
|
|
||||||
# 2 if daemon could not be started
|
|
||||||
|
|
||||||
mkdir -p /var/run/$NAME
|
|
||||||
chown $USER /var/run/$NAME
|
|
||||||
start-stop-daemon --start --quiet --pidfile $PIDFILE -c $USER --exec $DAEMON --test > /dev/null \
|
|
||||||
|| return 1
|
|
||||||
start-stop-daemon --start --quiet --pidfile $PIDFILE -c $USER --exec $DAEMON -- \
|
|
||||||
$DAEMON_ARGS \
|
|
||||||
|| return 2
|
|
||||||
# Add code here, if necessary, that waits for the process to be ready
|
|
||||||
# to handle requests from services started subsequently which depend
|
|
||||||
# on this one. As a last resort, sleep for some time.
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that stops the daemon/service
|
|
||||||
#
|
|
||||||
do_stop()
|
|
||||||
{
|
|
||||||
# Return
|
|
||||||
# 0 if daemon has been stopped
|
|
||||||
# 1 if daemon was already stopped
|
|
||||||
# 2 if daemon could not be stopped
|
|
||||||
# other if a failure occurred
|
|
||||||
start-stop-daemon --stop --signal 9 --pidfile $PIDFILE
|
|
||||||
RETVAL="$?"
|
|
||||||
[ "$RETVAL" = 2 ] && return 2
|
|
||||||
rm -f /var/run/$NAME/*
|
|
||||||
return "$RETVAL"
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that stops the daemon/service
|
|
||||||
#
|
|
||||||
#do_graceful_stop()
|
|
||||||
#{
|
|
||||||
# PID=`cat $PIDFILE`
|
|
||||||
# kill -USR1 $PID
|
|
||||||
#
|
|
||||||
# # wait until really stopped
|
|
||||||
# if [ -n "${PID:-}" ]; then
|
|
||||||
# i=0
|
|
||||||
# while kill -0 "${PID:-}" 2> /dev/null; do
|
|
||||||
# if [ $i -eq '0' ]; then
|
|
||||||
# echo -n " ... waiting "
|
|
||||||
# else
|
|
||||||
# echo -n "."
|
|
||||||
# fi
|
|
||||||
# i=$(($i+1))
|
|
||||||
# sleep 1
|
|
||||||
# done
|
|
||||||
# fi
|
|
||||||
#
|
|
||||||
# rm -f /var/run/$NAME/*
|
|
||||||
#}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Function that sends a SIGHUP to the daemon/service
|
|
||||||
#
|
|
||||||
#do_reload() {
|
|
||||||
# #
|
|
||||||
# # If the daemon can reload its configuration without
|
|
||||||
# # restarting (for example, when it is sent a SIGHUP),
|
|
||||||
# # then implement that here.
|
|
||||||
# #
|
|
||||||
# start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name zuul-server
|
|
||||||
# return 0
|
|
||||||
#}
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
start)
|
|
||||||
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
|
|
||||||
do_start
|
|
||||||
case "$?" in
|
|
||||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
|
||||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
stop)
|
|
||||||
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
|
|
||||||
do_stop
|
|
||||||
case "$?" in
|
|
||||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
|
||||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
status)
|
|
||||||
status_of_proc -p "$PIDFILE" "$DAEMON" "$NAME" && exit 0 || exit $?
|
|
||||||
;;
|
|
||||||
# reload)
|
|
||||||
# #
|
|
||||||
# # If do_reload() is not implemented then leave this commented out
|
|
||||||
# # and leave 'force-reload' as an alias for 'restart'.
|
|
||||||
# #
|
|
||||||
# log_daemon_msg "Reloading $DESC" "$NAME"
|
|
||||||
# do_reload
|
|
||||||
# log_end_msg $?
|
|
||||||
# ;;
|
|
||||||
restart|force-reload)
|
|
||||||
#
|
|
||||||
# If the "reload" option is implemented then remove the
|
|
||||||
# 'force-reload' alias
|
|
||||||
#
|
|
||||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
|
||||||
do_stop
|
|
||||||
do_start
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
|
||||||
exit 3
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
:
|
|
Loading…
Reference in New Issue
Block a user