Add a dedicated manifest to configure collectd

This removes duplication of code and limitations we had to deal with
because the collectd Puppet resources don't play well when they are
created at different times from several manifests.

Change-Id: I52fabb1fb5795a33f552168553a148b1520fc496
This commit is contained in:
Simon Pasquier 2016-08-11 17:39:27 +02:00
parent 16b288b57a
commit 38ec02fe46
9 changed files with 319 additions and 398 deletions

View File

@ -332,56 +332,6 @@ if hiera('lma::collector::elasticsearch::server', false) {
}
if hiera('lma::collector::influxdb::server', false) {
# TODO(all): this class is also applied by other role-specific manifests.
# This is sub-optimal and error prone. It needs to be fixed by having all
# collectd resources managed by a single manifest.
class { 'lma_collector::collectd::base':
processes => ['hekad', 'collectd'],
# Purge the default configuration shipped with the collectd package
purge => true,
require => Class['lma_collector'],
}
if $is_mysql_server {
class { 'lma_collector::collectd::mysql':
username => hiera('lma::collector::monitor::mysql_username'),
password => hiera('lma::collector::monitor::mysql_password'),
socket => hiera('lma::collector::monitor::mysql_socket'),
require => Class['lma_collector::collectd::base'],
}
lma_collector::collectd::dbi_mysql_status { 'mysql_status':
username => hiera('lma::collector::monitor::mysql_username'),
dbname => hiera('lma::collector::monitor::mysql_db'),
password => hiera('lma::collector::monitor::mysql_password'),
require => Class['lma_collector::collectd::base'],
}
}
if ($is_rabbitmq or $is_mysql_server) and ! $is_controller {
if $is_mysql_server {
$mysql_resource = {
'p_mysqld' => 'mysqld',
}
}
else {
$mysql_resource = {}
}
if $is_rabbitmq {
$rabbitmq_resource = {
'p_rabbitmq-server' => 'rabbitmq',
}
}
else {
$rabbitmq_resource = {}
}
class { 'lma_collector::collectd::pacemaker':
resources => merge($rabbitmq_resource, $mysql_resource),
hostname => $::hostname,
}
}
class { 'lma_collector::influxdb':
server => hiera('lma::collector::influxdb::server'),
port => hiera('lma::collector::influxdb::port'),
@ -409,17 +359,6 @@ if $is_rabbitmq and (hiera('lma::collector::elasticsearch::server', false) or hi
if hiera('lma::collector::influxdb::server', false) {
class { 'lma_collector::notifications::metrics': }
# If the node has the controller role, the collectd Python plugins will be
# configured in controller.pp. This limitation is imposed by the upstream
# collectd Puppet module.
unless $is_controller {
class { 'lma_collector::collectd::rabbitmq':
username => 'nova',
password => $rabbit['password'],
require => Class['lma_collector::collectd::base'],
}
}
}
}

View File

@ -1,39 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
notice('fuel-plugin-lma-collector: ceph_osd.pp')
if hiera('lma::collector::influxdb::server', false) {
# Only install this python collectd plugin if ceph-osd is not deployed on a
# controller node. This is due to a limitation of the python plugin puppet
# module which can be run only by one manifest otherwise the collectd configuration is
# overwritten by the next run. Currently only controller nodes have python
# plugins installed so it's safe to install on all other roles .. for now.
$node_profiles = hiera_hash('lma::collector::node_profiles')
$is_controller = $node_profiles['controller']
if ! $is_controller {
class { 'lma_collector::collectd::base':
processes => ['hekad', 'collectd'],
}
class { 'lma_collector::collectd::ceph_osd': }
}else{
notice('ceph_osd_perf not configured to avoid messing of collectd python plugin configuration!')
}
# Due to limitation of Python collectd plugin implementation, the
# libvirt_check is configured here instead of compute.pp manifest.
if $node_profiles['compute'] {
class { 'lma_collector::collectd::libvirt_check': }
}
}

View File

@ -0,0 +1,300 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
notice('fuel-plugin-lma-collector: collectd.pp')
if hiera('lma::collector::influxdb::server', false) {
prepare_network_config(hiera_hash('network_scheme', {}))
$management_vip = hiera('management_vip')
$mgmt_address = get_network_role_property('management', 'ipaddr')
$lma_collector = hiera_hash('lma_collector')
$node_profiles = hiera_hash('lma::collector::node_profiles')
$is_controller = $node_profiles['controller']
$is_base_os = $node_profiles['base_os']
$is_mysql_server = $node_profiles['mysql']
$is_rabbitmq = $node_profiles['rabbitmq']
$is_compute = $node_profiles['compute']
$is_ceph_osd = $node_profiles['ceph_osd']
$is_elasticsearch_node = $node_profiles['elasticsearch']
$is_influxdb_node = $node_profiles['influxdb']
$nova = hiera_hash('nova', {})
$neutron = hiera_hash('quantum_settings', {})
$cinder = hiera_hash('cinder', {})
$haproxy_socket = '/var/lib/haproxy/stats'
$storage_options = hiera_hash('storage', {})
if $storage_options['volumes_ceph'] or $storage_options['images_ceph'] or
$storage_options['objects_ceph'] or $storage_options['ephemeral_ceph']{
$ceph_enabled = true
} else {
$ceph_enabled = false
}
if $is_controller or $is_rabbitmq {
Service<| title == 'metric_collector' |> {
provider => 'pacemaker'
}
}
if $is_elasticsearch_node {
$process_matches = [{name => 'elasticsearch', regex => 'java'}]
} else {
$process_matches = undef
}
if $is_influxdb_node {
$processes = ['influxd', 'grafana-server', 'hekad', 'collectd']
} else {
$processes = ['hekad', 'collectd']
}
if $is_controller {
# collectd plugins on controller do many network I/O operations, so
# it is recommended to increase this value
$read_threads = 10
} else {
$read_threads = 5
}
class { 'lma_collector::collectd::base':
processes => $processes,
process_matches => $process_matches,
# Purge the default configuration shipped with the collectd package
purge => true,
read_threads => $read_threads,
}
if $is_mysql_server {
class { 'lma_collector::collectd::mysql':
username => hiera('lma::collector::monitor::mysql_username'),
password => hiera('lma::collector::monitor::mysql_password'),
socket => hiera('lma::collector::monitor::mysql_socket'),
require => Class['lma_collector::collectd::base'],
}
lma_collector::collectd::dbi_mysql_status { 'mysql_status':
username => hiera('lma::collector::monitor::mysql_username'),
dbname => hiera('lma::collector::monitor::mysql_db'),
password => hiera('lma::collector::monitor::mysql_password'),
require => Class['lma_collector::collectd::base'],
}
}
if $is_rabbitmq {
$rabbit = hiera_hash('rabbit')
if $rabbit['user'] {
$rabbitmq_user = $rabbit['user']
}
else {
$rabbitmq_user = 'nova'
}
class { 'lma_collector::collectd::rabbitmq':
username => $rabbitmq_user,
password => $rabbit['password'],
require => Class['lma_collector::collectd::base'],
}
}
# Configure Pacemaker plugin
if $is_controller {
$pacemaker_master_resource = 'vip__management'
$controller_resources = {
'vip__public' => 'vip__public',
'vip__management' => 'vip__management',
'vip__vrouter_pub' => 'vip__vrouter_pub',
'vip__vrouter' => 'vip__vrouter',
'p_haproxy' => 'haproxy',
}
} else {
$pacemaker_master_resource = undef
$controller_resources = {}
}
# Deal with detach-* plugins
if $is_mysql_server {
$mysql_resource = {
'p_mysqld' => 'mysqld',
}
}
else {
$mysql_resource = {}
}
if $is_rabbitmq {
$rabbitmq_resource = {
'p_rabbitmq-server' => 'rabbitmq',
}
}
else {
$rabbitmq_resource = {}
}
$resources = merge($controller_resources, $mysql_resource, $rabbitmq_resource)
if ! empty($resources) {
class { 'lma_collector::collectd::pacemaker':
resources => $resources,
notify_resource => $pacemaker_master_resource,
hostname => $::fqdn,
require => Class['lma_collector::collectd::base'],
}
}
if $is_controller {
# Configure OpenStack plugins
$openstack_service_config = {
user => 'nova',
password => $nova['user_password'],
tenant => 'services',
keystone_url => "http://${management_vip}:5000/v2.0",
pacemaker_master_resource => $pacemaker_master_resource,
require => Class['lma_collector::collectd::base'],
}
$openstack_services = {
'nova' => $openstack_service_config,
'cinder' => $openstack_service_config,
'glance' => $openstack_service_config,
'keystone' => $openstack_service_config,
'neutron' => $openstack_service_config,
}
create_resources(lma_collector::collectd::openstack, $openstack_services)
# FIXME(elemoine) use the special attribute * when Fuel uses a Puppet version
# that supports it.
class { 'lma_collector::collectd::openstack_checks':
user => $openstack_service_config[user],
password => $openstack_service_config[password],
tenant => $openstack_service_config[tenant],
keystone_url => $openstack_service_config[keystone_url],
pacemaker_master_resource => $openstack_service_config[pacemaker_master_resource],
require => Class['lma_collector::collectd::base'],
}
# FIXME(elemoine) use the special attribute * when Fuel uses a Puppet version
# that supports it.
class { 'lma_collector::collectd::hypervisor':
user => $openstack_service_config[user],
password => $openstack_service_config[password],
tenant => $openstack_service_config[tenant],
keystone_url => $openstack_service_config[keystone_url],
pacemaker_master_resource => $openstack_service_config[pacemaker_master_resource],
# Fuel sets cpu_allocation_ratio to 8.0 in nova.conf
cpu_allocation_ratio => 8.0,
require => Class['lma_collector::collectd::base'],
}
class { 'lma_collector::collectd::haproxy':
socket => $haproxy_socket,
# Ignore internal stats ('Stats' for 6.1, 'stats' for 7.0), lma proxies and
# Nova EC2
proxy_ignore => ['Stats', 'stats', 'lma', 'nova-api-1'],
proxy_names => {
'ceilometer' => 'ceilometer-api',
'cinder-api' => 'cinder-api',
'glance-api' => 'glance-api',
'glance-registry' => 'glance-registry-api',
'heat-api' => 'heat-api',
'heat-api-cfn' => 'heat-cfn-api',
'heat-api-cloudwatch' => 'heat-cloudwatch-api',
'horizon' => 'horizon-web',
'horizon-ssl' => 'horizon-https',
'keystone-1' => 'keystone-public-api',
'keystone-2' => 'keystone-admin-api',
'murano' => 'murano-api',
'mysqld' => 'mysqld-tcp',
'neutron' => 'neutron-api',
# starting with Mitaka (and later)
'nova-api' => 'nova-api',
# before Mitaka
'nova-api-2' => 'nova-api',
'nova-novncproxy' => 'nova-novncproxy-websocket',
'nova-metadata-api' => 'nova-metadata-api',
'sahara' => 'sahara-api',
'swift' => 'swift-api',
},
require => Class['lma_collector::collectd::base'],
}
if $ceph_enabled {
class { 'lma_collector::collectd::ceph_mon':
require => Class['lma_collector::collectd::base'],
}
}
class { 'lma_collector::collectd::memcached':
host => get_network_role_property('mgmt/memcache', 'ipaddr'),
require => Class['lma_collector::collectd::base'],
}
# Enable the Apache status module
class { 'fuel_lma_collector::mod_status': }
class { 'lma_collector::collectd::apache':
require => Class['lma_collector::collectd::base'],
}
# VIP checks
$influxdb_server = hiera('lma::collector::influxdb::server')
$influxdb_port = hiera('lma::collector::influxdb::port')
class { 'lma_collector::collectd::http_check':
urls => {
'influxdb' => "http://${influxdb_server}:${influxdb_port}/ping",
},
expected_codes => {
'influxdb' => 204
},
timeout => 1,
max_retries => 3,
pacemaker_master_resource => $pacemaker_master_resource,
require => Class['lma_collector::collectd::base'],
}
}
# Compute
if $is_compute {
class { 'lma_collector::collectd::libvirt':
require => Class['lma_collector::collectd::base'],
}
class { 'lma_collector::collectd::libvirt_check':
require => Class['lma_collector::collectd::base'],
}
}
# Ceph OSD
if $is_ceph_osd {
class { 'lma_collector::collectd::ceph_osd':
require => Class['lma_collector::collectd::base'],
}
}
# InfluxDB
if $is_influxdb_node {
class { 'lma_collector::collectd::influxdb':
username => 'root',
password => hiera('lma::collector::influxdb::root_password'),
address => hiera('lma::collector::influxdb::listen_address'),
port => hiera('lma::collector::influxdb::influxdb_port', 8086),
require => Class['lma_collector::collectd::base'],
}
}
# Elasticsearch
if $is_elasticsearch_node {
class { 'lma_collector::collectd::elasticsearch':
address => hiera('lma::collector::elasticsearch::listen_address'),
port => hiera('lma::collector::elasticsearch::rest_port', 9200),
require => Class['lma_collector::collectd::base'],
}
}
if $is_influxdb_node or $is_elasticsearch_node {
class { 'lma_collector::collectd::haproxy':
socket => $haproxy_socket,
require => Class['lma_collector::collectd::base'],
}
}
}

View File

@ -26,19 +26,6 @@ if hiera('lma::collector::influxdb::server', false) {
class { 'lma_collector::logs::counter':
hostname => $::hostname,
}
class { 'lma_collector::collectd::base':
processes => ['hekad', 'collectd'],
}
class { 'lma_collector::collectd::libvirt': }
# Due to limitation of Python collectd plugin implementation, the
# libvirt_check is configured by ceph_osd manifests if it is a ceph-osd node.
$node_profiles = hiera_hash('lma::collector::node_profiles')
if ! $node_profiles['ceph_osd'] {
class { 'lma_collector::collectd::libvirt_check': }
}
}
if $ceilometer['enabled'] {

View File

@ -14,23 +14,12 @@
notice('fuel-plugin-lma-collector: controller.pp')
prepare_network_config(hiera_hash('network_scheme', {}))
$messaging_address = get_network_role_property('mgmt/messaging', 'ipaddr')
$memcache_address = get_network_role_property('mgmt/memcache', 'ipaddr')
$network_metadata = hiera_hash('network_metadata')
$node_profiles = hiera_hash('lma::collector::node_profiles')
$is_rabbitmq = $node_profiles['rabbitmq']
$is_mysql_server = $node_profiles['mysql']
$ceilometer = hiera_hash('ceilometer', {})
$lma_collector = hiera_hash('lma_collector')
$rabbit = hiera_hash('rabbit')
$management_vip = hiera('management_vip')
$storage_options = hiera_hash('storage', {})
$murano = hiera_hash('murano')
$sahara = hiera_hash('sahara')
$contrail = hiera('contrail', false)
if $ceilometer['enabled'] {
$notification_topics = ['notifications', 'lma_notifications']
@ -39,13 +28,6 @@ else {
$notification_topics = ['lma_notifications']
}
if $rabbit['user'] {
$rabbitmq_user = $rabbit['user']
}
else {
$rabbitmq_user = 'nova'
}
# Make sure the Log and Metric collector services are configured with the
# "pacemaker" provider
Service<| title == 'log_collector' |> {
@ -268,187 +250,19 @@ if hiera('lma::collector::elasticsearch::server', false) or hiera('lma::collecto
# Metrics
if hiera('lma::collector::influxdb::server', false) {
$nova = hiera_hash('nova', {})
$neutron = hiera_hash('quantum_settings', {})
$cinder = hiera_hash('cinder', {})
$haproxy_socket = '/var/lib/haproxy/stats'
if $storage_options['volumes_ceph'] or $storage_options['images_ceph'] or
$storage_options['objects_ceph'] or $storage_options['ephemeral_ceph']{
$ceph_enabled = true
} else {
$ceph_enabled = false
}
class { 'lma_collector::logs::counter':
hostname => $::hostname,
}
class { 'lma_collector::collectd::base':
processes => ['hekad', 'collectd'],
# collectd plugins on controller do many network I/O operations, so
# it is recommended to increase this value
read_threads => 10,
}
# All collectd Python plugins must be configured in the same manifest.
# This limitation is imposed by the upstream collectd Puppet module.
# That's why we declare the RabbitMQ plugin if it is running on the
# controller.
if $is_rabbitmq {
class { 'lma_collector::collectd::rabbitmq':
username => 'nova',
password => $rabbit['password'],
require => Class['lma_collector::collectd::base'],
}
}
$pacemaker_master_resource = 'vip__management'
# Deal with detach-* plugins
if $is_mysql_server {
$mysql_resource = {
'p_mysqld' => 'mysqld',
}
}
else {
$mysql_resource = {}
}
if $is_rabbitmq {
$rabbitmq_resource = {
'p_rabbitmq-server' => 'rabbitmq',
}
}
else {
$rabbitmq_resource = {}
}
class { 'lma_collector::collectd::pacemaker':
resources => merge({
'vip__public' => 'vip__public',
'vip__management' => 'vip__management',
'vip__vrouter_pub' => 'vip__vrouter_pub',
'vip__vrouter' => 'vip__vrouter',
'p_haproxy' => 'haproxy',
}, $mysql_resource, $rabbitmq_resource),
notify_resource => $pacemaker_master_resource,
hostname => $::fqdn,
}
$openstack_service_config = {
user => 'nova',
password => $nova['user_password'],
tenant => 'services',
keystone_url => "http://${management_vip}:5000/v2.0",
pacemaker_master_resource => $pacemaker_master_resource,
}
$openstack_services = {
'nova' => $openstack_service_config,
'cinder' => $openstack_service_config,
'glance' => $openstack_service_config,
'keystone' => $openstack_service_config,
'neutron' => $openstack_service_config,
}
create_resources(lma_collector::collectd::openstack, $openstack_services)
# FIXME(elemoine) use the special attribute * when Fuel uses a Puppet version
# that supports it.
class { 'lma_collector::collectd::openstack_checks':
user => $openstack_service_config[user],
password => $openstack_service_config[password],
tenant => $openstack_service_config[tenant],
keystone_url => $openstack_service_config[keystone_url],
pacemaker_master_resource => $openstack_service_config[pacemaker_master_resource],
}
# FIXME(elemoine) use the special attribute * when Fuel uses a Puppet version
# that supports it.
class { 'lma_collector::collectd::hypervisor':
user => $openstack_service_config[user],
password => $openstack_service_config[password],
tenant => $openstack_service_config[tenant],
keystone_url => $openstack_service_config[keystone_url],
pacemaker_master_resource => $openstack_service_config[pacemaker_master_resource],
# Fuel sets cpu_allocation_ratio to 8.0 in nova.conf
cpu_allocation_ratio => 8.0,
}
class { 'lma_collector::collectd::haproxy':
socket => $haproxy_socket,
# Ignore internal stats ('Stats' for 6.1, 'stats' for 7.0), lma proxies and
# Nova EC2
proxy_ignore => ['Stats', 'stats', 'lma', 'nova-api-1'],
proxy_names => {
'ceilometer' => 'ceilometer-api',
'cinder-api' => 'cinder-api',
'glance-api' => 'glance-api',
'glance-registry' => 'glance-registry-api',
'heat-api' => 'heat-api',
'heat-api-cfn' => 'heat-cfn-api',
'heat-api-cloudwatch' => 'heat-cloudwatch-api',
'horizon' => 'horizon-web',
'horizon-ssl' => 'horizon-https',
'keystone-1' => 'keystone-public-api',
'keystone-2' => 'keystone-admin-api',
'murano' => 'murano-api',
'mysqld' => 'mysqld-tcp',
'neutron' => 'neutron-api',
# starting with Mitaka (and later)
'nova-api' => 'nova-api',
# before Mitaka
'nova-api-2' => 'nova-api',
'nova-novncproxy' => 'nova-novncproxy-websocket',
'nova-metadata-api' => 'nova-metadata-api',
'sahara' => 'sahara-api',
'swift' => 'swift-api',
},
}
if $ceph_enabled {
class { 'lma_collector::collectd::ceph_mon': }
}
class { 'lma_collector::collectd::memcached':
host => $memcache_address,
}
class { 'lma_collector::collectd::apache': }
# TODO(all): This class is still called to ensure the sandbox deletion
# when upgrading the plugin. Can be removed for next release after 0.10.0.
class { 'lma_collector::logs::http_metrics': }
class { 'lma_collector::logs::aggregated_http_metrics': }
# Enable the Apache status module
class { 'fuel_lma_collector::mod_status': }
# AFD filters
class { 'lma_collector::afd::api': }
class { 'lma_collector::afd::workers': }
# VIP checks
if hiera('lma::collector::influxdb::server', false) {
$influxdb_server = hiera('lma::collector::influxdb::server')
$influxdb_port = hiera('lma::collector::influxdb::port')
$influxdb_url = "http://${influxdb_server}:${influxdb_port}/ping"
}
$vip_urls = {
'influxdb' => $influxdb_url,
}
$expected_codes = {
'influxdb' => 204,
}
class { 'lma_collector::collectd::http_check':
urls => delete_undef_values($vip_urls),
expected_codes => $expected_codes,
timeout => 1,
max_retries => 3,
pacemaker_master_resource => $pacemaker_master_resource,
}
}
$alerting_mode = $lma_collector['alerting_mode']

View File

@ -1,66 +0,0 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
notice('fuel-plugin-lma-collector: lma_backends.pp')
prepare_network_config(hiera_hash('network_scheme', {}))
if hiera('lma::collector::influxdb::server', false) {
$network_metadata = hiera_hash('network_metadata')
$node_profiles = hiera_hash('lma::collector::node_profiles')
$is_elasticsearch_node = $node_profiles['elasticsearch']
$is_influxdb_node = $node_profiles['influxdb']
if $is_elasticsearch_node {
$process_matches = [{name => 'elasticsearch', regex => 'java'}]
} else {
$process_matches = undef
}
if $is_influxdb_node {
$processes = ['influxd', 'grafana-server', 'hekad', 'collectd']
} else {
$processes = ['hekad', 'collectd']
}
class { 'lma_collector::collectd::base':
processes => $processes,
process_matches => $process_matches,
}
if $is_influxdb_node {
class { 'lma_collector::collectd::influxdb':
username => 'root',
password => hiera('lma::collector::influxdb::root_password'),
address => hiera('lma::collector::influxdb::listen_address'),
port => hiera('lma::collector::influxdb::influxdb_port', 8086)
}
}
if $is_elasticsearch_node {
class { 'lma_collector::collectd::elasticsearch':
address => hiera('lma::collector::elasticsearch::listen_address'),
port => hiera('lma::collector::elasticsearch::rest_port', 9200)
}
}
if $network_metadata['vips']['influxdb'] or $network_metadata['vips']['es_vip_mgmt'] {
# Only when used with the version 0.9 (and higher) of the
# Elasticsearch-Kibana and InfluxDB-Grafana plugins
class { 'lma_collector::collectd::haproxy':
socket => '/var/lib/haproxy/stats',
}
}
}

View File

@ -14,6 +14,7 @@
#
class lma_collector::afd::api () {
include lma_collector::params
include lma_collector::service::metric
$lua_modules_dir = $lma_collector::params::lua_modules_dir

View File

@ -14,6 +14,7 @@
#
class lma_collector::afd::workers () {
include lma_collector::params
include lma_collector::service::metric
$lua_modules_dir = $lma_collector::params::lua_modules_dir

View File

@ -49,13 +49,26 @@
reexecute_on:
- deploy_changes
- id: lma-collectd
type: puppet
version: 2.0.0
requires: [lma-base]
required_for: [post_deployment_end]
role: '*'
parameters:
puppet_manifest: puppet/manifests/collectd.pp
puppet_modules: puppet/modules:/etc/puppet/modules
timeout: 600
reexecute_on:
- deploy_changes
# All tasks lma-main-* must be executed before lma-aggregator. So we don't
# need to add a requirement to post_deployment_end because it is implied
# by the one to lma-aggregator.
- id: lma-main-controller
type: puppet
version: 2.0.0
requires: [lma-base]
requires: [lma-base, lma-collectd]
required_for: [lma-aggregator]
role: [controller, primary-controller]
parameters:
@ -68,7 +81,7 @@
- id: lma-main-compute
type: puppet
version: 2.0.0
requires: [lma-base]
requires: [lma-base, lma-collectd]
required_for: [lma-aggregator]
role: [compute]
parameters:
@ -81,7 +94,7 @@
- id: lma-main-cinder
type: puppet
version: 2.0.0
requires: [lma-base]
requires: [lma-base, lma-collectd]
required_for: [lma-aggregator]
role: [cinder]
parameters:
@ -91,23 +104,10 @@
reexecute_on:
- deploy_changes
- id: lma-main-ceph-osd
type: puppet
version: 2.0.0
requires: [lma-base]
required_for: [lma-aggregator]
role: [ceph-osd]
parameters:
puppet_manifest: puppet/manifests/ceph_osd.pp
puppet_modules: puppet/modules:/etc/puppet/modules
timeout: 600
reexecute_on:
- deploy_changes
- id: lma-aggregator
type: puppet
version: 2.0.0
requires: [lma-base]
requires: [lma-base, lma-collectd]
required_for: [post_deployment_end]
role: '*'
parameters:
@ -137,22 +137,6 @@
reexecute_on:
- deploy_changes
- id: lma-backends
type: puppet
version: 2.0.0
requires: [lma-base]
required_for: [lma-cleanup-apt-config]
role:
- primary-elasticsearch_kibana
- elasticsearch_kibana
- primary-influxdb_grafana
- influxdb_grafana
parameters:
puppet_manifest: puppet/manifests/lma_backends.pp
puppet_modules: puppet/modules:/etc/puppet/modules
timeout: 600
reexecute_on:
- deploy_changes
# This task must be executed at the very end of the deployment.
- id: lma-cleanup-apt-config