Switch to InfluxDB 0.9 mode

This change removes all code which supports InfluxDB 0.8.

Change-Id: Ic136094f37b2f6ebfa1fe383460c4622676e2bfd
Implements: blueprint upgrade-influxdb-grafana
This commit is contained in:
Simon Pasquier 2015-08-17 11:09:42 +02:00
parent 254eda470b
commit fd78371015
22 changed files with 30 additions and 1479 deletions

View File

@ -22,7 +22,7 @@ Requirements
| ------------------------------ | ------------------------------------------------------------- |
| Mirantis OpenStack compatility | 6.1 or higher |
| A running Elasticsearch server | 1.4 or higher, the RESTful API must be enabled over port 9200 |
| A running InfluxDB server | 0.8.8, the RESTful API must be enabled over port 8086 |
| A running InfluxDB server | 0.9.2, the RESTful API must be enabled over port 8086 |
Limitations
@ -136,6 +136,7 @@ Release Notes
**0.8.0**
* Monitoring of the LMA collector.
* Support of InfluxDB 0.9.2 and higher.
**0.7.0**

View File

@ -144,11 +144,7 @@ case $influxdb_mode {
if $is_base_os {
if $current_node_name == $influxdb_node_name and $influxdb_mode == 'local' {
if $lma_collector['influxdb_legacy'] {
$processes = ['hekad', 'collectd', 'influxdb']
} else {
$processes = ['hekad', 'collectd', 'influxd', 'grafana-server']
}
$processes = ['hekad', 'collectd', 'influxd', 'grafana-server']
} else {
$processes = ['hekad', 'collectd']
}
@ -173,44 +169,23 @@ case $influxdb_mode {
$collectd_read_threads = 5
}
if $lma_collector['influxdb_legacy'] {
class { 'lma_collector::collectd::base_legacy':
processes => $processes,
process_matches => $process_matches,
read_threads => $collectd_read_threads,
require => Class['lma_collector'],
}
class { 'lma_collector::collectd::base':
processes => $processes,
process_matches => $process_matches,
read_threads => $collectd_read_threads,
require => Class['lma_collector'],
}
class { 'lma_collector::influxdb_legacy':
server => $influxdb_server,
database => $influxdb_database,
user => $influxdb_user,
password => $influxdb_password,
require => Class['lma_collector'],
}
class { 'lma_collector::influxdb':
server => $influxdb_server,
database => $influxdb_database,
user => $influxdb_user,
password => $influxdb_password,
require => Class['lma_collector'],
}
class { 'lma_collector::metrics::heka_monitoring_legacy':
require => Class['lma_collector']
}
} else {
class { 'lma_collector::collectd::base':
processes => $processes,
process_matches => $process_matches,
read_threads => $collectd_read_threads,
require => Class['lma_collector'],
}
class { 'lma_collector::influxdb':
server => $influxdb_server,
database => $influxdb_database,
user => $influxdb_user,
password => $influxdb_password,
require => Class['lma_collector'],
}
class { 'lma_collector::metrics::heka_monitoring':
require => Class['lma_collector']
}
class { 'lma_collector::metrics::heka_monitoring':
require => Class['lma_collector']
}
}

View File

@ -118,7 +118,7 @@ if $lma_collector['influxdb_mode'] != 'disabled' {
password => $nova['db_password'],
report_interval => 60,
downtime_factor => 2,
require => Class['lma_collector::collectd::dbi'],
require => Class['lma_collector::collectd::dbi'],
}
lma_collector::collectd::dbi_mysql_status{ 'mysql_status':
@ -134,7 +134,7 @@ if $lma_collector['influxdb_mode'] != 'disabled' {
password => $cinder['db_password'],
report_interval => 60,
downtime_factor => 2,
require => Class['lma_collector::collectd::dbi'],
require => Class['lma_collector::collectd::dbi'],
}
lma_collector::collectd::dbi_services { 'neutron':
@ -143,41 +143,24 @@ if $lma_collector['influxdb_mode'] != 'disabled' {
password => $neutron['database']['passwd'],
report_interval => 15,
downtime_factor => 4,
require => Class['lma_collector::collectd::dbi'],
require => Class['lma_collector::collectd::dbi'],
}
if $lma_collector['influxdb_legacy'] {
class { 'lma_collector::logs::metrics_legacy': }
class { 'lma_collector::logs::metrics': }
# Notification are always collected, lets extract metrics from there
class { 'lma_collector::notifications::metrics_legacy': }
} else {
class { 'lma_collector::logs::metrics': }
# Notification are always collected, lets extract metrics from there
class { 'lma_collector::notifications::metrics': }
}
# Notification are always collected, lets extract metrics from there
class { 'lma_collector::notifications::metrics': }
# Enable Apache status module
class { 'lma_collector::mod_status': }
# Enable service heartbeat metrics
if $lma_collector['influxdb_legacy'] {
class { 'lma_collector::metrics::service_heartbeat_legacy':
services => ['mysql', 'rabbitmq', 'haproxy', 'memcached', 'apache']
}
} else {
class { 'lma_collector::metrics::service_heartbeat':
services => ['mysql', 'rabbitmq', 'haproxy', 'memcached', 'apache']
}
class { 'lma_collector::metrics::service_heartbeat':
services => ['mysql', 'rabbitmq', 'haproxy', 'memcached', 'apache']
}
# Service status metrics and annotations
if $lma_collector['influxdb_legacy'] {
class { 'lma_collector::metrics::service_status_legacy': }
} else {
class { 'lma_collector::metrics::service_status': }
}
class { 'lma_collector::metrics::service_status': }
}
@ -223,9 +206,9 @@ if $alerting_mode != 'disabled' {
if $use_nagios {
class { 'lma_collector::nagios':
openstack_deployment_name => $deployment_id,
url => $nagios_url,
user => $nagios_user,
password => $nagios_password,
url => $nagios_url,
user => $nagios_user,
password => $nagios_password,
}
}
}

View File

@ -1,172 +0,0 @@
-- Copyright 2015 Mirantis, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
require "string"
require "cjson"
local utils = require 'lma_utils'
local sep = '.'
local processes_map = {
ps_code = 'memory.code',
ps_count = 'count',
ps_cputime = 'cputime',
ps_data = 'memory.data',
ps_disk_octets = 'disk.bytes',
ps_disk_ops = 'disk.ops',
ps_pagefaults = 'pagefaults',
ps_rss = 'memory.rss',
ps_stacksize = 'stacksize',
ps_vm = 'memory.virtual',
}
function process_message ()
local ok, samples = pcall(cjson.decode, read_message("Payload"))
if not ok then
-- TODO: log error
return -1
end
for _, sample in ipairs(samples) do
local metric_prefix = sample['type']
if sample['type_instance'] ~= "" then metric_prefix = metric_prefix .. sep .. sample['type_instance'] end
local metric_source = sample['plugin']
for i, value in ipairs(sample['values']) do
local metric_name = metric_prefix
if sample['dsnames'][i] ~= "value" then metric_name = metric_name .. sep .. sample['dsnames'][i] end
local msg = {
Timestamp = sample['time'] * 1e9, -- Heka expects nanoseconds
Hostname = sample['host'],
Logger = "collectd",
Payload = cjson.encode(sample),
Severity = 6,
Type = "metric",
Fields = {
hostname = sample['host'],
interval = sample['interval'],
source = metric_source,
type = sample['dstypes'][i],
value = value,
}
}
-- Normalize metric name, unfortunately collectd plugins aren't
-- always consistent on metric namespaces so we need a few if/else
-- statements to cover all cases.
if metric_source == 'df' then
local mount = sample['plugin_instance']
local entity
if sample['type'] == 'df_inodes' then
entity = 'inodes'
else -- sample['type'] == 'df_complex'
entity = 'space'
end
msg['Fields']['name'] = 'fs' .. sep .. mount .. sep .. entity .. sep .. sample['type_instance']
msg['Fields']['device'] = '/' .. string.gsub(mount, '-', '/')
elseif metric_source == 'disk' then
msg['Fields']['device'] = sample['plugin_instance']
msg['Fields']['name'] = 'disk' .. sep .. sample['plugin_instance'] .. sep .. metric_name
elseif metric_source == 'cpu' then
msg['Fields']['device'] = 'cpu' .. sample['plugin_instance']
msg['Fields']['name'] = 'cpu' .. sep .. sample['plugin_instance'] .. sep .. sample['type_instance']
elseif metric_source == 'interface' then
msg['Fields']['device'] = sample['plugin_instance']
msg['Fields']['name'] = 'net' .. sep .. sample['plugin_instance'] .. sep .. sample['type'] .. sep .. sample['dsnames'][i]
elseif metric_source == 'processes' then
if processes_map[sample['type']] then
msg['Fields']['name'] = 'lma_components' .. sep .. sample['plugin_instance'] .. sep .. processes_map[sample['type']]
if sample['dsnames'][i] ~= 'value' then
msg['Fields']['name'] = msg['Fields']['name'] .. sep .. sample['dsnames'][i]
end
-- For ps_cputime, convert it to percentage. We have number of microseconds within one second.
if sample['type'] == 'ps_cputime' then
msg['Fields']['value'] = value / 10000
end
else
msg['Fields']['name'] = 'processes'
if sample['type'] == 'ps_state' then
msg['Fields']['name'] = msg['Fields']['name'] .. sep .. 'state' .. sep .. sample['type_instance']
else
msg['Fields']['name'] = msg['Fields']['name'] .. sep .. sample['type']
end
end
elseif metric_source == 'dbi' and sample['plugin_instance'] == 'mysql_status' then
msg['Fields']['name'] = 'mysql' .. sep .. sample['type_instance']
elseif metric_source == 'mysql' then
if sample['type'] == 'threads' then
msg['Fields']['name'] = 'mysql_' .. metric_name
else
msg['Fields']['name'] = metric_name
end
elseif metric_source == 'check_openstack_api' then
-- OpenStack API metrics
-- 'plugin_instance' = <service name>
msg['Fields']['name'] = 'openstack' .. sep .. sample['plugin_instance'] .. sep .. 'check_api'
if sample['type_instance'] ~= nil and sample['type_instance'] ~= '' then
msg['Fields']['os_region'] = sample['type_instance']
end
elseif metric_source == 'hypervisor_stats' then
-- OpenStack hypervisor metrics
-- 'type_instance' = <metric name> which can end by _MB or _GB
msg['Fields']['name'] = 'openstack' .. sep .. 'nova' .. sep
local name, unit
name, unit = string.match(sample['type_instance'], '^(.+)_(.B)$')
if name then
msg['Fields']['name'] = msg['Fields']['name'] .. name
msg.Fields['value'] = {value = msg.Fields['value'], representation = unit}
else
msg['Fields']['name'] = msg['Fields']['name'] .. sample['type_instance']
end
elseif metric_source == 'rabbitmq_info' then
msg['Fields']['name'] = 'rabbitmq' .. sep .. sample['type_instance']
elseif metric_source == 'nova' then
msg['Fields']['name'] = 'openstack.nova' .. sep .. sample['plugin_instance'] .. sep .. sample['type_instance']
elseif metric_source == 'cinder' then
msg['Fields']['name'] = 'openstack.cinder' .. sep .. sample['plugin_instance'] .. sep .. sample['type_instance']
elseif metric_source == 'glance' then
msg['Fields']['name'] = 'openstack.glance' .. sep .. sample['type_instance']
elseif metric_source == 'keystone' then
msg['Fields']['name'] = 'openstack.keystone' .. sep .. sample['type_instance']
elseif metric_source == 'neutron' then
msg['Fields']['name'] = 'openstack.neutron' .. sep .. sample['type_instance']
elseif metric_source == 'memcached' then
msg['Fields']['name'] = 'memcached' .. sep .. string.gsub(metric_name, 'memcached_', '')
elseif metric_source == 'haproxy' then
msg['Fields']['name'] = 'haproxy' .. sep .. sample['type_instance']
elseif metric_source == 'apache' then
metric_name = string.gsub(metric_name, 'apache_', '')
msg['Fields']['name'] = 'apache' .. sep .. string.gsub(metric_name, 'scoreboard', 'workers')
elseif metric_source == 'ceph' then
msg['Fields']['name'] = 'ceph' .. sep .. sample['plugin_instance'] .. sep .. sample['type_instance']
elseif metric_source == 'dbi' and sample['plugin_instance'] == 'services_nova' then
msg['Fields']['name'] = 'openstack.nova' .. sep .. sample['type_instance']
elseif metric_source == 'dbi' and sample['plugin_instance'] == 'services_cinder' then
msg['Fields']['name'] = 'openstack.cinder' .. sep .. sample['type_instance']
elseif metric_source == 'dbi' and sample['plugin_instance'] == 'agents_neutron' then
msg['Fields']['name'] = 'openstack.neutron' .. sep .. sample['type_instance']
elseif metric_source == 'pacemaker_resource' then
msg['Fields']['name'] = 'pacemaker.resource' .. sep .. sample['type_instance'] .. sep .. 'active'
else
msg['Fields']['name'] = metric_name
end
utils.inject_tags(msg)
inject_message(msg)
end
end
return 0
end

View File

@ -1,80 +0,0 @@
-- Copyright 2015 Mirantis, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
require 'cjson'
require 'string'
require 'math'
local utils = require 'lma_utils'
function process_table(datapoints, timestamp, hostname, kind, array)
-- NOTE: It has been written for "filters" and "decoders". If we need
-- to use it to process other part of the Heka pipeline we need to ensure
-- that JSON provides names and table with ProcessMessageCount and
-- ProcessMessageAvgDuration:
-- "decoder": {
-- ...
-- },
-- "Name": "a name",
-- "ProcessMessageCount" : {
-- "representation": "count",
-- "value": 12
-- },
-- "ProcessMessageAvgDuration" : {
-- "representation": "ns",
-- "value": 192913
-- },
-- { ... }}
for _, v in pairs(array) do
if type(v) == "table" then
name = v['Name']:gsub("_" .. kind, "")
msgCount = v['ProcessMessageCount']['value']
avgDuration = v['ProcessMessageAvgDuration']['value']
utils.add_metric(datapoints,
string.format('%s.lma_components.hekad.%s.%s.count', hostname, kind, name),
{timestamp, msgCount})
utils.add_metric(datapoints,
string.format('%s.lma_components.hekad.%s.%s.duration', hostname, kind, name),
{timestamp, avgDuration})
end
end
end
function process_message ()
local ok, json = pcall(cjson.decode, read_message("Payload"))
if not ok then
return -1
end
local hostname = read_message("Hostname")
local ts = read_message("Timestamp")
local ts_ms = math.floor(ts/1e6)
local datapoints = {}
for k, v in pairs(json) do
if k == "filters" or k == "decoders" then
-- remove the last character from k
process_table(datapoints, ts_ms, hostname, k:sub(1, -2), v)
end
end
if #datapoints > 0 then
inject_payload("json", "influxdb", cjson.encode(datapoints))
return 0
end
-- We should not reach this point
return -1
end

View File

@ -1,50 +0,0 @@
-- Copyright 2015 Mirantis, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
require 'string'
local utils = require 'lma_utils'
local msg = {
Type = "metric", -- will be prefixed by "heka.sandbox."
Timestamp = nil,
Severity = 6,
Fields = nil
}
function process_message ()
local http_method = read_message("Fields[http_method]")
local http_status = read_message("Fields[http_status]")
local response_time = read_message("Fields[http_response_time]")
if http_method == nil or http_status == nil or response_time == nil then
return -1
end
-- keep only the first 2 tokens because some services like Neutron report
-- themselves as 'openstack.<service>.server'
local service = string.gsub(read_message("Logger"), '(%w+)%.(%w+).*', '%1.%2')
msg.Timestamp = read_message("Timestamp")
msg.Fields = {
source = read_message('Fields[programname]') or service,
name = string.format("%s.http.%s.%s", service, http_method, http_status),
type = utils.metric_type['GAUGE'],
value = {value = response_time, representation = 's'},
tenant_id = read_message('Fields[tenant_id]'),
user_id = read_message('Fields[user_id]'),
}
utils.inject_tags(msg)
inject_message(msg)
return 0
end

View File

@ -1,57 +0,0 @@
-- Copyright 2015 Mirantis, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
require "cjson"
require "string"
require "os"
local last_flush = os.time()
local datapoints = {}
local flush_count = read_config('flush_count') or 100
local flush_interval = read_config('flush_interval') or 5
function flush ()
local now = os.time()
if #datapoints > 0 and (#datapoints > flush_count or now - last_flush > flush_interval) then
inject_payload("json", "influxdb", cjson.encode(datapoints))
datapoints = {}
last_flush = now
end
end
function process_message ()
local ts = read_message("Timestamp") / 1e6 -- InfluxDB defaults to ms
local hostname = read_message("Fields[hostname]") or read_message("Hostname")
local metric_name = read_message("Fields[name]")
local value = read_message("Fields[value]")
if value == nil and metric_name == nil then
return -1
end
local serie_name = string.format('%s.%s', hostname, metric_name)
datapoints[#datapoints+1] = {
name = serie_name,
columns = {"time", "value"},
points = {{ts, value}}
}
flush()
return 0
end
function timer_event(ns)
flush()
end

View File

@ -1,77 +0,0 @@
-- Copyright 2015 Mirantis, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
require 'cjson'
require 'string'
require 'table'
require "os"
require 'math'
local floor = math.floor
local utils = require 'lma_utils'
local last_flush = os.time()
local datapoints = {}
local base_serie_name = 'annotation'
local html_break_line = '<br />'
local flush_count = read_config('flush_count') or 100
local flush_interval = read_config('flush_interval') or 5
function flush ()
local now = os.time()
if #datapoints > 0 and (#datapoints > flush_count or now - last_flush > flush_interval) then
inject_payload("json", "influxdb", cjson.encode(datapoints))
datapoints = {}
last_flush = now
end
end
function process_message ()
local ts = floor(read_message('Timestamp')/1e6) -- ms
local msg_type = read_message('Type')
local payload = read_message('Payload')
local service = read_message('Fields[service]')
local name = string.gsub(service, ' ', '_')
local serie_name = string.format('%s.%s', base_serie_name, name)
local title
local text = ''
if msg_type == 'heka.sandbox.status' then
local status = read_message('Fields[status]')
local prev_status = read_message('Fields[previous_status]')
local ok, details = pcall(cjson.decode, payload)
if ok then
text = table.concat(details, html_break_line)
end
if prev_status ~= status then
title = string.format('General status %s -> %s',
utils.global_status_to_label_map[prev_status],
utils.global_status_to_label_map[status])
else
title = string.format('General status remains %s',
utils.global_status_to_label_map[status])
end
datapoints[#datapoints+1] = {
name = serie_name,
columns = {"time", "title", "tag", "text"},
points = {{ts, title, service, text}}
}
end
flush()
return 0
end
function timer_event(ns)
flush()
end

View File

@ -1,47 +0,0 @@
-- Copyright 2015 Mirantis, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
local utils = require 'lma_utils'
local msg = {
Type = "metric", -- will be prefixed by "heka.sandbox."
Timestamp = nil,
Severity = 6,
}
count = 0
function process_message ()
local state = read_message("Fields[state]")
local old_state = read_message("Fields[old_state]")
if old_state ~= nil and state == old_state then
-- nothing to do
return 0
end
msg.Timestamp = read_message("Timestamp")
msg.Fields = {
source = read_message('Logger'),
name = "openstack.nova.instance_state." .. state,
-- preserve the original hostname in the Fields attribute because
-- sandboxed filters cannot override the Hostname attribute
hostname = read_message("Fields[hostname]"),
type = utils.metric_type['COUNTER'],
value = 1,
tenant_id = read_message("Fields[tenant_id]"),
user_id = read_message("Fields[user_id]"),
}
utils.inject_tags(msg)
inject_message(msg)
return 0
end

View File

@ -1,62 +0,0 @@
-- Copyright 2015 Mirantis, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
require 'math'
local patt = require 'patterns'
local utils = require 'lma_utils'
local msg = {
Type = "metric", -- will be prefixed by "heka.sandbox."
Timestamp = nil,
Severity = 6,
}
local event_type_to_name = {
["compute.instance.create.end"] = "openstack.nova.instance_creation_time",
["volume.create.end"] = "openstack.cinder.volume_creation_time",
}
function process_message ()
local metric_name = event_type_to_name[read_message("Fields[event_type]")]
if not metric_name then
return -1
end
local created_at = read_message("Fields[created_at]") or ''
local launched_at = read_message("Fields[launched_at]") or ''
created_at = patt.Timestamp:match(created_at)
launched_at = patt.Timestamp:match(launched_at)
if created_at == nil or launched_at == nil or created_at == 0 or launched_at == 0 or created_at > launched_at then
return -1
end
msg.Timestamp = read_message("Timestamp")
msg.Fields = {
source = read_message('Logger'),
name = metric_name,
-- preserve the original hostname in the Fields attribute because
-- sandboxed filters cannot override the Hostname attribute
hostname = read_message("Fields[hostname]"),
type = utils.metric_type['GAUGE'],
-- Having a millisecond precision for creation time is good enough given
-- that the created_at field has only a 1-second precision.
value = {value = math.floor((launched_at - created_at)/1e6 + 0.5) / 1e3, representation = 's'},
tenant_id = read_message("Fields[tenant_id]"),
user_id = read_message("Fields[user_id]"),
}
utils.inject_tags(msg)
inject_message(msg)
return 0
end

View File

@ -1,176 +0,0 @@
-- Copyright 2015 Mirantis, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
-- The filter accumulates data into a table and emits regularly a message per
-- service with a payload like this:
-- {
-- "vip_active_at": 1435829917607,
-- "name": "nova",
-- "states": {
-- "check_api":{
-- "nova":{
-- "down":{
-- "value":0,
-- "group_name":"endpoint",
-- "last_seen":1433252000524
-- },
-- "up":{
-- "value":1,
-- "group_name":"endpoint",
-- "last_seen":1433252000524
-- }
-- },
-- ...
-- },
-- "workers":{
-- "scheduler":{
-- "down":{
-- "value":0,
-- "group_name":"services",
-- "last_seen":1433251999229
-- },
-- "disabled":{
-- "value":1,
-- "group_name":"services",
-- "last_seen":1433251999226
-- },
-- "up":{
-- "value":2,
-- "group_name":"services",
-- "last_seen":1433251999227
-- }
-- },
-- ...
-- },
-- "haproxy":{
-- "nova-api":{
-- "down":{
-- "value":0,
-- "group_name":"pool",
-- "last_seen":1433252000957
-- },
-- "up":{
-- "value":3,
-- "group_name":"pool",
-- "last_seen":1433252000954
-- }
-- }
-- }
-- ...
-- }
-- }
require 'cjson'
require 'string'
require 'math'
local floor = math.floor
local utils = require 'lma_utils'
_PRESERVATION_VERSION = 1
-- variables with global scope are preserved between restarts
services = {}
vip_active_at = 0
local payload_name = read_config('inject_payload_name') or 'service_status'
function process_message ()
local ts = floor(read_message("Timestamp")/1e6) -- ms
local metric_name = read_message("Fields[name]")
local value = read_message("Fields[value]")
local name
local top_entry
local item_name
local group_name
local state
if string.find(metric_name, '^pacemaker.resource.vip__public') then
if value == 1 then
vip_active_at = ts
else
vip_active_at = 0
end
return 0
end
if string.find(metric_name, '%.up$') then
state = utils.state_map.UP
elseif string.find(metric_name, '%.down$') then
state = utils.state_map.DOWN
elseif string.find(metric_name, '%.disabled$') then
state = utils.state_map.DISABLED
end
if string.find(metric_name, '^openstack') then
name, group_name, item_name = string.match(metric_name, '^openstack%.([^._]+)%.([^._]+)%.([^._]+)')
top_entry = 'workers'
if not item_name then
-- A service can have several API checks, by convention the service name
-- is written down "<name>-<item>" or just "<name>".
item_name = string.match(metric_name, '^openstack%.([^.]+)%.check_api$')
name, _ = string.match(item_name, '^([^-]+)\-(.*)')
if not name then
name = item_name
end
top_entry = 'check_api'
group_name = 'endpoint'
-- retrieve the current state
state = utils.check_api_status_to_state_map[value]
-- and always override value to 1
value = 1
end
elseif string.find(metric_name, '^haproxy%.backend') then
top_entry = 'haproxy'
group_name = 'pool'
item_name = string.match(metric_name, '^haproxy%.backend%.([^.]+)%.servers')
name = string.match(item_name, '^([^-]+)')
end
if not name or not item_name then
return -1
end
-- table initialization for the first time we see a service
if not services[name] then services[name] = {} end
if not services[name][top_entry] then services[name][top_entry] = {} end
if not services[name][top_entry][item_name] then services[name][top_entry][item_name] = {} end
local service = services[name][top_entry][item_name]
service[state] = {last_seen=ts, value=value, group_name=group_name}
-- In the logic to treat check_api results like others, group by up/down
-- and reset the counterpart w/ value=0
if top_entry == 'check_api' then
local invert_state
if state == utils.state_map.UP then
invert_state = utils.state_map.DOWN
elseif state == utils.state_map.DOWN then
invert_state = utils.state_map.UP
end
if invert_state then
if not service[invert_state] then
service[invert_state] = {}
end
service[invert_state] = {last_seen=ts, value=0, group_name=group_name}
end
end
return 0
end
function timer_event(ns)
for name, states in pairs(services) do
inject_payload('json', payload_name,
cjson.encode({vip_active_at=vip_active_at, name=name, states=states}))
end
end

View File

@ -1,59 +0,0 @@
-- Copyright 2015 Mirantis, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
require 'cjson'
require 'string'
require 'math'
local timeout = read_config("timeout") or 30
services = {}
local floor = math.floor
function process_message ()
local ts = floor(read_message("Timestamp")/1e6) -- in ms
local service_name = string.match(read_message("Fields[name]"), '^[^._]+')
local hostname = read_message("Fields[hostname]")
local key = string.format('%s.%s', hostname, service_name)
local service = services[key]
if service then
service.last_seen = ts
else
service = {last_seen = ts, status = 1, host = hostname, name = service_name}
services[key] = service
end
return 0
end
function timer_event(ns)
local current_time = floor(ns / 1e6) -- in ms
local datapoints = {}
for k, service in pairs(services) do
if current_time - service.last_seen > timeout * 1000 then
service.status = 0
else
service.status = 1
end
datapoints[#datapoints+1] = {
name = string.format('%s.%s.status', service.host, service.name),
columns = {"time", "value"},
points = {{service.last_seen, service.status}}
}
end
if #datapoints > 0 then
inject_payload("json", "influxdb", cjson.encode(datapoints))
end
end

View File

@ -1,251 +0,0 @@
-- Copyright 2015 Mirantis, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
require 'cjson'
require 'string'
require 'math'
local floor = math.floor
local max = math.max
local utils = require 'lma_utils'
_PRESERVATION_VERSION = 2
-- variables with global scope are preserved between restarts
all_service_status = {}
-- local scope variables
local timeout = read_config("timeout") or 60
local hostname
local datapoints = {}
function process_message ()
local ok, data = pcall(cjson.decode, read_message("Payload"))
if not ok then
return -1
end
local timestamp = read_message('Timestamp')
local ts = floor(timestamp/1e6) -- in ms
hostname = read_message("Hostname")
local service_name = data.name
local states = data.states
local worker_status = -1
local check_api_status = -1
local haproxy_server_status = -1
local global_status
local events = {}
local not_up_status = {}
local msg_event
if not all_service_status[service_name] then all_service_status[service_name] = {} end
if states.workers then
worker_status = compute_status(events, not_up_status, ts, 'workers', service_name, states.workers, true)
end
if states.check_api then
check_api_status = compute_status(events, not_up_status, ts, 'check_api', service_name, states.check_api, false)
end
if states.haproxy then
haproxy_server_status = compute_status(events, not_up_status, ts, 'haproxy', service_name, states.haproxy, true)
end
global_status = max(worker_status, check_api_status, haproxy_server_status)
-- global service status
utils.add_metric(datapoints,
string.format('%s.openstack.%s.status', hostname, service_name),
{ts, global_status})
-- only emit status if the public vip is active
if not expired(ts, data.vip_active_at) then
local prev = all_service_status[service_name].global_status or utils.global_status_map.UNKNOWN
local updated
updated = (prev ~= global_status or #events > 0)
-- always append not UP status elements in details
for k, v in pairs(not_up_status) do events[#events+1] = v end
local details = ''
if #events > 0 then
details = cjson.encode(events)
end
utils.inject_status_message(timestamp, service_name,
global_status, prev,
updated, details)
end
all_service_status[service_name].global_status = global_status
if #datapoints > 0 then
inject_payload("json", "influxdb", cjson.encode(datapoints))
datapoints = {}
end
return 0
end
function get_previous_status(service_name, top_entry, name)
if not all_service_status[service_name] then
all_service_status[service_name] = {}
end
if not all_service_status[service_name][top_entry] then
all_service_status[service_name][top_entry] = {}
end
if not all_service_status[service_name][top_entry][name] then
all_service_status[service_name][top_entry][name] = utils.service_status_map.UNKNOWN
end
return all_service_status[service_name][top_entry][name]
end
function set_status(service_name, top_entry, name, status)
all_service_status[service_name][top_entry][name] = status
end
function compute_status(events, not_up_status, current_time, elts_name, name, states, display_num)
local down_elts = {}
local down_elts_count = 0
local zero_up = {}
local zero_up_count = 0
local one_up = {}
local one_disabled = {}
local one_disabled_count = 0
local service_status = utils.service_status_map.UNKNOWN
local up_elements = {}
local total_elements = {}
for worker, worker_data in pairs(states) do
if not total_elements[worker] then
total_elements[worker] = 0
end
if not up_elements[worker] then
up_elements[worker] = 0
end
for state, data in pairs(worker_data) do
if not expired(current_time, data.last_seen) then
total_elements[worker] = total_elements[worker] + data.value
if state == utils.state_map.DOWN and data.value > 0 then
down_elts[worker] = data
down_elts_count = down_elts_count + 1
end
if state == utils.state_map.UP then
if data.value > 0 then
one_up[worker] = data
else
zero_up[worker] = data
zero_up_count = zero_up_count + 1
end
up_elements[worker] = data.value
end
if state == utils.state_map.DISABLED and data.value > 0 then
one_disabled[worker] = data
one_disabled_count = one_disabled_count + 1
end
end
end
end
-- general element status
if zero_up_count > 0 then
service_status = utils.service_status_map.DOWN
elseif down_elts_count > 0 then
service_status = utils.service_status_map.DEGRADED
elseif down_elts_count == 0 then
service_status = utils.service_status_map.UP
end
-- elements clearly down
for worker_name, worker in pairs(zero_up) do
local prev = get_previous_status(name, elts_name, worker_name)
local DOWN = utils.service_status_map.DOWN
local event_detail = ""
set_status(name, elts_name, worker_name, DOWN)
if display_num then
event_detail = string.format("(%s/%s UP)", up_elements[worker_name],
total_elements[worker_name])
end
if prev and prev ~= DOWN then
events[#events+1] = string.format("%s %s %s -> %s %s", worker_name,
worker.group_name,
utils.service_status_to_label_map[prev],
utils.service_status_to_label_map[DOWN],
event_detail)
else
not_up_status[#not_up_status+1] = string.format("%s %s %s %s",
worker_name,
worker.group_name,
utils.service_status_to_label_map[DOWN],
event_detail)
end
utils.add_metric(datapoints, string.format('%s.openstack.%s.%s.%s.status',
hostname, name, worker.group_name, worker_name),
{current_time, utils.service_status_map.DOWN})
end
-- elements down or degraded
for worker_name, worker in pairs(down_elts) do
local prev = get_previous_status(name, elts_name, worker_name)
local new_status
local event_detail
if one_up[worker_name] then
new_status = utils.service_status_map.DEGRADED
else
new_status = utils.service_status_map.DOWN
end
set_status(name, elts_name, worker_name, new_status)
utils.add_metric(datapoints,
string.format("%s.openstack.%s.%s.%s.status",
hostname, name, worker.group_name, worker_name),
{current_time, new_status})
if display_num then
event_detail = string.format("(%s/%s UP)", up_elements[worker_name],
total_elements[worker_name])
else
event_detail = ""
end
if prev ~= new_status then
events[#events+1] = string.format("%s %s %s -> %s %s", worker_name,
worker.group_name,
utils.service_status_to_label_map[prev],
utils.service_status_to_label_map[new_status],
event_detail)
elseif not zero_up[worker_name] then
not_up_status[#not_up_status+1] = string.format("%s %s %s %s", worker_name,
worker.group_name,
utils.service_status_to_label_map[new_status],
event_detail)
end
end
-- elements up
for worker_name, worker in pairs(one_up) do
if not zero_up[worker_name] and not down_elts[worker_name] then
local prev = get_previous_status(name, elts_name, worker_name)
local UP = utils.service_status_map.UP
set_status(name, elts_name, worker_name, UP)
if prev and prev ~= utils.service_status_map.UP then
events[#events+1] = string.format("%s %s %s -> %s", worker_name,
worker.group_name,
utils.service_status_to_label_map[prev],
utils.service_status_to_label_map[UP])
end
utils.add_metric(datapoints, string.format("%s.openstack.%s.%s.%s.status",
hostname, name, worker.group_name, worker_name),
{current_time, utils.service_status_map.UP})
end
end
return service_status
end
function expired(current_time, last_time)
if last_time > 0 and current_time - last_time <= timeout * 1000 then
return false
end
return true
end

View File

@ -1,115 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class lma_collector::collectd::base_legacy (
$processes = undef,
$process_matches = undef,
$queue_limit = $lma_collector::params::collectd_queue_limit,
$read_threads = $lma_collector::params::collectd_read_threads,
){
include lma_collector::params
include lma_collector::service
$port = $lma_collector::params::collectd_port
class { '::collectd':
purge => true,
recurse => true,
purge_config => true,
fqdnlookup => false,
interval => $lma_collector::params::collectd_interval,
threads => $read_threads,
write_queue_limit_low => $lma_collector::params::collectd_queue_limit,
write_queue_limit_high => $lma_collector::params::collectd_queue_limit,
}
class { 'collectd::plugin::logfile':
log_level => 'warning',
log_file => $lma_collector::params::collectd_logfile,
}
$urls = {
"http://127.0.0.1:${port}" => {
'format' => 'JSON',
storerates => true
}
}
if $::osfamily == 'RedHat' {
# collectd Puppet manifest is broken for RedHat derivatives as it tries to
# install the collectd-write_http package which doesn't exist (for CentOS
# at least)
collectd::plugin {'write_http':
ensure => present,
content => template('collectd/plugin/write_http.conf.erb'),
}
}
else {
class { 'collectd::plugin::write_http':
urls => $urls,
}
}
class { 'collectd::plugin::cpu':
}
# TODO: pass this list as a parameter or add a custom fact
class { 'collectd::plugin::df':
mountpoints => ['/', '/boot'],
}
$block_devices = join(split($::blockdevices, ','), '|')
class { 'collectd::plugin::disk':
disks => [ "/^${ block_devices }$/" ],
}
class { 'collectd::plugin::interface':
interfaces => grep(split($::interfaces, ','), '^eth\d+$')
}
class { 'collectd::plugin::load':
}
class { 'collectd::plugin::memory':
}
class { 'collectd::plugin::processes':
processes => $processes,
process_matches => $process_matches,
}
class { 'collectd::plugin::swap':
}
class { 'collectd::plugin::users':
}
file { '/etc/logrotate.d/collectd':
ensure => present,
content => "${lma_collector::params::collectd_logfile} {\n daily\n missingok\n}"
}
heka::decoder::sandbox { 'collectd':
config_dir => $lma_collector::params::config_dir,
filename => "${lma_collector::params::plugins_dir}/decoders/collectd_legacy.lua" ,
notify => Class['lma_collector::service'],
}
heka::input::httplisten { 'collectd':
config_dir => $lma_collector::params::config_dir,
address => '127.0.0.1',
port => $port,
decoder => 'collectd',
require => Heka::Decoder::Sandbox['collectd'],
notify => Class['lma_collector::service'],
}
}

View File

@ -1,65 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class lma_collector::influxdb_legacy (
$server = $lma_collector::params::influxdb_server,
$port = $lma_collector::params::influxdb_port,
$database = $lma_collector::params::influxdb_database,
$user = $lma_collector::params::influxdb_user,
$password = $lma_collector::params::influxdb_password,
) inherits lma_collector::params {
include lma_collector::service
validate_string($server)
heka::filter::sandbox { 'influxdb_accumulator':
config_dir => $lma_collector::params::config_dir,
filename => "${lma_collector::params::plugins_dir}/filters/influxdb_accumulator_legacy.lua",
message_matcher => 'Type == \'metric\' || Type == \'heka.sandbox.metric\'',
ticker_interval => 1,
config => {
flush_interval => $lma_collector::params::influxdb_flush_interval,
flush_count => $lma_collector::params::influxdb_flush_count,
},
notify => Class['lma_collector::service'],
}
heka::filter::sandbox { 'influxdb_annotation':
config_dir => $lma_collector::params::config_dir,
filename => "${lma_collector::params::plugins_dir}/filters/influxdb_annotation_legacy.lua",
message_matcher => 'Type == \'heka.sandbox.status\' && Fields[updated] == TRUE',
ticker_interval => 1,
config => {
flush_interval => $lma_collector::params::influxdb_flush_interval,
flush_count => $lma_collector::params::influxdb_flush_count,
},
notify => Class['lma_collector::service'],
}
heka::encoder::payload { 'influxdb':
config_dir => $lma_collector::params::config_dir,
notify => Class['lma_collector::service'],
}
heka::output::http { 'influxdb':
config_dir => $lma_collector::params::config_dir,
url => "http://${server}:${port}/db/${database}/series",
message_matcher => 'Fields[payload_type] == \'json\' && Fields[payload_name] == \'influxdb\'',
username => $user,
password => $password,
timeout => $lma_collector::params::influxdb_timeout,
require => [Heka::Encoder::Payload['influxdb'], Heka::Filter::Sandbox['influxdb_accumulator']],
notify => Class['lma_collector::service'],
}
}

View File

@ -1,25 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class lma_collector::logs::metrics_legacy {
include lma_collector::params
include lma_collector::service
heka::filter::sandbox { 'http_metrics_legacy':
config_dir => $lma_collector::params::config_dir,
filename => "${lma_collector::params::plugins_dir}/filters/http_metrics_legacy.lua",
message_matcher => 'Type == \'log\' && Fields[http_response_time] != NIL',
notify => Class['lma_collector::service'],
}
}

View File

@ -1,34 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class lma_collector::metrics::heka_monitoring_legacy (
$dashboard_address = $lma_collector::params::dashboard_address,
$dashboard_port = $lma_collector::params::dashboard_port,
){
include lma_collector::service
heka::filter::sandbox { 'heka_monitoring':
config_dir => $lma_collector::params::config_dir,
filename => "${lma_collector::params::plugins_dir}/filters/heka_monitoring_legacy.lua",
message_matcher => "Type == 'heka.all-report'",
notify => Class['lma_collector::service'],
}
# Dashboard is required to enable monitoring messages
heka::output::dashboard { 'dashboard':
config_dir => $lma_collector::params::config_dir,
dashboard_address => $dashboard_address,
dashboard_port => $dashboard_port,
}
}

View File

@ -1,37 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class lma_collector::metrics::service_heartbeat_legacy (
$services,
$timeout = $lma_collector::params::heartbeat_timeout,
) inherits lma_collector::params {
include lma_collector::service
validate_array($services)
if (size($services) > 0) {
$regexp = join(sort($services), '|')
heka::filter::sandbox { 'service_heartbeat':
config_dir => $lma_collector::params::config_dir,
filename => "${lma_collector::params::plugins_dir}/filters/service_heartbeat_legacy.lua",
message_matcher => join(['Fields[name] =~ /^', join(sort($services), '|'), '/'], ''),
ticker_interval => 10,
config => {
timeout => $timeout,
},
notify => Class['lma_collector::service'],
}
}
}

View File

@ -1,49 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class lma_collector::metrics::service_status_legacy (
$metrics_regexp = $lma_collector::params::service_status_metrics_regexp_legacy,
$payload_name = $lma_collector::params::service_status_payload_name,
$timeout = $lma_collector::params::service_status_timeout,
){
include heka::params
validate_array($metrics_regexp)
if (size(metrics_regexp) > 0){
heka::filter::sandbox { 'service_accumulator_states':
config_dir => $lma_collector::params::config_dir,
filename => "${lma_collector::params::plugins_dir}/filters/service_accumulator_states_legacy.lua",
message_matcher => inline_template('<%= @metrics_regexp.collect{|x| "Fields[name] =~ /%s/" % x}.join(" || ") %>'),
ticker_interval => $lma_collector::params::service_status_interval,
preserve_data => true,
config => {
inject_payload_name => $payload_name,
},
notify => Class['lma_collector::service'],
}
heka::filter::sandbox { 'service_status':
config_dir => $lma_collector::params::config_dir,
filename => "${lma_collector::params::plugins_dir}/filters/service_status_legacy.lua",
message_matcher => "Fields[payload_type] == 'json' && Fields[payload_name] == '${payload_name}'",
preserve_data => true,
config => {
timeout => $timeout,
},
notify => Class['lma_collector::service'],
}
}
}

View File

@ -1,34 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class lma_collector::notifications::metrics_legacy {
include lma_collector::params
include lma_collector::service
# Filter to compute resource's creation time metric
heka::filter::sandbox { 'resource_creation_time_legacy':
config_dir => $lma_collector::params::config_dir,
filename => "${lma_collector::params::plugins_dir}/filters/resource_creation_time_legacy.lua",
message_matcher => 'Type == \'notification\' && Fields[event_type] =~ /^(compute.instance|volume).create.end$/',
notify => Class['lma_collector::service'],
}
# Filter to compute the instance state change metric
heka::filter::sandbox { 'instance_state_legacy':
config_dir => $lma_collector::params::config_dir,
filename => "${lma_collector::params::plugins_dir}/filters/instance_state_legacy.lua",
message_matcher => 'Type == \'notification\' && Fields[event_type] == \'compute.instance.update\' && Fields[state] != NIL',
notify => Class['lma_collector::service'],
}
}

View File

@ -113,15 +113,6 @@ class lma_collector::params {
$annotations_serie_name = 'annotations'
# Catch all metrics used to compute OpenStack service statutes
$service_status_metrics_regexp_legacy = [
'^openstack.(nova|cinder|neutron).(services|agents).*(up|down|disabled)$',
# Exception for mysqld backend because the MySQL service status is
# computed by a dedicated filter and this avoids to send an annoying
# status Heka message.
'^haproxy.backend.(horizon|nova|cinder|neutron|ceilometer|keystone|swift|heat|glance|radosgw)(-.+)?.servers.(down|up)$',
'^pacemaker.resource.vip__public.active$',
'^openstack.*check_api$'
]
$service_status_metrics_matcher = join([
'(Type == \'metric\' || Type == \'heka.sandbox.metric\') && ',
'(Fields[name] =~ /^openstack_(nova|cinder|neutron)_(services|agents)$/ || ',

View File

@ -97,15 +97,6 @@ attributes:
regex: *not_empty_parameter
restrictions: *disable_influxdb_parameters
influxdb_legacy:
value: true
label: 'Use InfluxDB version 0.8'
weight: 90
type: "checkbox"
restrictions:
- condition: "true"
action: "hide"
alerting_mode:
type: "radio"
weight: 90