Playbooks to help with Telemetry testing.

* Sync ntp across nodes
* Install and setup at daemon
* Raise Haproxy defaults maxconn
* Change Ceilometer polling interval and between reduced metrics/"*" metrics
* Sync Ceilometer polling across Controller+Computes
* Adjust virtlogd max files
* Adjust Gnocchi metricd workers and processing_delay
* Adjust httpd prefork settings
* Adjust Ceilometer rabbitmq messaging parameters
* Adjust Nova filter_scheduler parameters for "evenly" distributed instances
* Adjust Gnocchi API Processes/Threads
* Centos tiny flavor (1vcpu, 192MiB Memory, 8G Disk)
* Enable/Disable Telemetry Services
* Utility playbook to disable polling (Useful to allow system
  to "catch up" on backlog)

Change-Id: I119a5cfb479c6a742673244cd70a0df0cd2a869a
This commit is contained in:
akrzos 2017-07-13 14:20:44 -04:00
parent 330a5c9129
commit 9d822e71c4
33 changed files with 1005 additions and 282 deletions

View File

@ -1,10 +1,11 @@
[defaults]
gathering = smart
callback_whitelist = profile_tasks
fact_caching_timeout = 86400
fact_caching = jsonfile
fact_caching_connection = /tmp/browbeat_fact_cache
gathering = smart
roles_path = ./browbeat/roles:./install/roles:
timeout = 30
callback_whitelist = profile_tasks
[ssh_connection]
# Load the specific ssh config file in this directory
ssh_args = -F ssh-config

View File

@ -0,0 +1,24 @@
---
#
# Playbook to adjust Apache prefork settings
#
# Example:
#
# ansible-playbook -i hosts browbeat/adjustment-httpd.yml -e 'httpd_startservers=8 httpd_minspareservers=5 httpd_maxspareservers=20 httpd_serverlimit=256 httpd_maxclients=256 httpd_maxrequestsperchild=4000'
#
- hosts: controller
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
vars:
# Defaults per Pike (OSP11)
httpd_startservers: 8
httpd_minspareservers: 5
httpd_maxspareservers: 20
httpd_serverlimit: 256
httpd_maxclients: 256
httpd_maxrequestsperchild: 4000
roles:
- apache-config

View File

@ -1,22 +1,67 @@
---
#
# Playbook to apply changes to ceilometer.
# Playbook to adjust Ceilometer configuration items and restart either the
# notification agent or the collector.
#
# Examples (backend):
# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "ceilometer_backend=database"
# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "ceilometer_backend=gnocchi"
# Examples:
# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "rabbit_qos_prefetch_count=64"
# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "rabbit_qos_prefetch_count=64 executor_thread_pool_size=64"
# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "rabbit_qos_prefetch_count=64 restart_notification=true"
# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "rabbit_qos_prefetch_count=64 restart_collector=true"
#
# Examples (interval):
# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "ceilometer_interval=60"
# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml -e "ceilometer_interval=600"
# * Note not setting a variable does not change that configuration item then. Setting no variables
# and running the playbook sets all configuration items to defaults (Ocata)
#
# ansible-playbook -i hosts browbeat/adjustment-ceilometer.yml
#
- hosts: controller
remote_user: heat-admin
roles:
- { role: ceilometer-backend, when: ceilometer_backend is defined }
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
vars:
restart_notification: false
restart_collector: false
# Create initial blank configuration list
ceilometer_configuration: []
# Defaults
default_rabbit_qos_prefetch_count: 0
default_executor_thread_pool_size: 64
# Each configuration item needs to be a list so it can be merged
rabbit_qos_prefetch_count_item: []
executor_thread_pool_size_item: []
pre_tasks:
- name: Set default rabbit_qos_prefetch_count and executor_thread_pool_size
set_fact:
ceilometer_configuration:
- section: oslo_messaging_rabbit
option: rabbit_qos_prefetch_count
value: "{{default_rabbit_qos_prefetch_count}}"
- section: DEFAULT
option: executor_thread_pool_size
value: "{{default_executor_thread_pool_size}}"
when: rabbit_qos_prefetch_count is undefined and executor_thread_pool_size is undefined
- name: Set rabbit_qos_prefetch_count configuration for Ceilometer
set_fact:
rabbit_qos_prefetch_count_item:
- section: oslo_messaging_rabbit
option: rabbit_qos_prefetch_count
value: "{{rabbit_qos_prefetch_count}}"
when: rabbit_qos_prefetch_count is defined
- name: Set executor_thread_pool_size configuration for Ceilometer
set_fact:
executor_thread_pool_size_item:
- section: DEFAULT
option: executor_thread_pool_size
value: "{{executor_thread_pool_size}}"
when: executor_thread_pool_size is defined
- name: Merge configuration items
set_fact:
ceilometer_configuration: "{{ceilometer_configuration + rabbit_qos_prefetch_count_item + executor_thread_pool_size_item }}"
- hosts: controller,compute
remote_user: heat-admin
roles:
- { role: ceilometer-interval, when: ceilometer_interval is defined }
- ceilometer-config

View File

@ -0,0 +1,20 @@
---
#
# Playbook to adjust Gnocchi API wsgi settings
#
# Examples:
# ansible-playbook -i hosts browbeat/adjustment-gnocchi-wsgi.yml -e "gnocchi_api_processes=24"
# ansible-playbook -i hosts browbeat/adjustment-gnocchi-wsgi.yml -e "gnocchi_api_processes=24 gnocchi_api_threads=6"
#
#
- hosts: controller
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
vars:
gnocchi_api_processes: 12
gnocchi_api_threads: 1
roles:
- gnocchi-api-config

View File

@ -0,0 +1,43 @@
---
#
# Playbook to adjust Gnocchi config options
#
# Example:
#
# ansible-playbook -i hosts browbeat/adjustment-gnocchi.yml -e 'metricd_workers=12 metric_processing_delay=60 processing_replicas=3'
#
- hosts: controller
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
vars:
metricd_workers: 12
metric_processing_delay: 60
processing_replicas: 3
tasks:
- name: Configure Gnocchi Options
become: true
ini_file:
dest: /etc/gnocchi/gnocchi.conf
mode: 0640
group: gnocchi
section: "{{item.section}}"
option: "{{item.option}}"
value: "{{item.value}}"
backup: yes
with_items:
- section: metricd
option: workers
value: "{{metricd_workers}}"
- section: metricd
option: metric_processing_delay
value: "{{metric_processing_delay}}"
- section: metricd
option: processing_replicas
value: "{{processing_replicas}}"
- name: Restart openstack-gnocchi-metricd
become: true
command: systemctl restart openstack-gnocchi-metricd

View File

@ -0,0 +1,31 @@
---
#
# Playbook to bump the number of max "defaults" (vs global) connections through haproxy
#
# Examples:
#
# ansible-playbook -i hosts browbeat/adjustment-haproxy.yml -e 'old_maxconn=4096 new_maxconn=8192'
#
#
- hosts: controller
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
vars:
old_maxconn: 4096
new_maxconn: 8192
tasks:
- name: Adjusting haproxy maxconn
become: true
replace:
dest: /etc/haproxy/haproxy.cfg
regexp: " maxconn {{old_maxconn}}"
replace: " maxconn {{new_maxconn}}"
backup: true
- name: Reload haproxy
become: true
command: systemctl reload haproxy

View File

@ -1,12 +1,17 @@
---
#
# Playbook to change Nova allocation ratios
# Playbook to adjust Nova allocation ratios
#
# Examples:
# ansible-playbook -i hosts browbeat/adjustment-nova-allocation.yml -e "cpu_allocation_ratio=24"
# ansible-playbook -i hosts browbeat/adjustment-nova-allocation.yml -e "cpu_allocation_ratio=24 ram_allocation_ratio=10.0"
# ansible-playbook -i hosts browbeat/adjustment-nova-allocation.yml -e "cpu_allocation_ratio=24 ram_allocation_ratio=10.0 disk_allocation_ratio=10.0"
#
# In order for new settings to take affect, you need to restart the Nova services
# by adding variable restart_nova=true into the extra vars.
#
# ansible-playbook -i hosts browbeat/adjustment-nova-allocation.yml -e "cpu_allocation_ratio=24 ram_allocation_ratio=10.0 disk_allocation_ratio=10.0 restart_nova=true"
#
# * Note not setting a variable does not change that configuration item then. Setting no variables
# and running the playbook sets all configuration items to defaults (cpu/ram/disk - 16/1/1)
#
@ -14,9 +19,12 @@
#
- hosts: controller
remote_user: heat-admin
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
vars:
restart_nova: false
# Create initial blank configuration list
nova_configuration: []
# Defaults

View File

@ -0,0 +1,98 @@
---
#
# Playbook to adjust Nova Scheduler settings to avoid over-scheduling hosts
# with greater memory in uneven memory environments.
#
# Examples:
# ansible-playbook -i hosts browbeat/adjustment-nova-scheduler.yml -e 'max_instances_per_host=350'
# ansible-playbook -i hosts browbeat/adjustment-nova-scheduler.yml -e 'max_instances_per_host=350 ram_weight_multiplier=0'
# ansible-playbook -i hosts browbeat/adjustment-nova-scheduler.yml -e 'max_instances_per_host=350 ram_weight_multiplier=0 enabled_filters="NumInstancesFilter,RetryFilter,RamFilter,ComputeFilter"'
# ansible-playbook -i hosts browbeat/adjustment-nova-scheduler.yml -e 'max_instances_per_host=350 ram_weight_multiplier=0 enabled_filters="NumInstancesFilter,RetryFilter,RamFilter,ComputeFilter" host_subset_size=4'
#
# In order for new settings to take affect, you need to restart the Nova services
# by adding variable restart_nova=true into the extra vars.
#
# ansible-playbook -i hosts browbeat/adjustment-nova-scheduler.yml -e 'max_instances_per_host=350 ram_weight_multiplier=0 enabled_filters="NumInstancesFilter,RetryFilter,RamFilter,ComputeFilter" host_subset_size=4 restart_nova=true'
#
# * Note not setting a variable does not change that configuration item then. Setting no variables
# and running the playbook sets all configuration items to defaults (Ocata)
#
# ansible-playbook -i hosts browbeat/adjustment-nova-scheduler.yml
#
- hosts: controller
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
vars:
restart_nova: false
# Create initial blank configuration list
nova_configuration: []
# Defaults
default_max_instances_per_host: 50
default_ram_weight_multiplier: 1.0
default_enabled_filters: "RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter"
default_host_subset_size: 1
# Each configuration item needs to be a list so it can be merged
max_instances_per_host_item: []
ram_weight_multiplier_item: []
enabled_filters_item: []
host_subset_size_item: []
pre_tasks:
- name: Set default max_instances_per_host, ram_weight_multiplier, enabled_filters, and host_subset_size
set_fact:
nova_configuration:
- section: filter_scheduler
option: max_instances_per_host
value: "{{default_max_instances_per_host}}"
- section: filter_scheduler
option: ram_weight_multiplier
value: "{{default_ram_weight_multiplier}}"
- section: filter_scheduler
option: enabled_filters
value: "{{default_enabled_filters}}"
- section: filter_scheduler
option: host_subset_size
value: "{{default_host_subset_size}}"
when: max_instances_per_host is undefined and ram_weight_multiplier is undefined and enabled_filters is undefined and host_subset_size is undefined
- name: Set max_instances_per_host configuration for Nova
set_fact:
max_instances_per_host_item:
- section: filter_scheduler
option: max_instances_per_host
value: "{{max_instances_per_host}}"
when: max_instances_per_host is defined
- name: Set ram_weight_multiplier configuration for Nova
set_fact:
ram_weight_multiplier_item:
- section: filter_scheduler
option: ram_weight_multiplier
value: "{{ram_weight_multiplier}}"
when: ram_weight_multiplier is defined
- name: Set enabled_filters configuration for Nova
set_fact:
enabled_filters_item:
- section: filter_scheduler
option: enabled_filters
value: "{{enabled_filters}}"
when: enabled_filters is defined
- name: Set host_subset_size configuration for Nova
set_fact:
host_subset_size_item:
- section: filter_scheduler
option: host_subset_size
value: "{{host_subset_size}}"
when: host_subset_size is defined
- name: Merge configuration items
set_fact:
nova_configuration: "{{nova_configuration + max_instances_per_host_item + ram_weight_multiplier_item + enabled_filters_item + host_subset_size_item }}"
roles:
- nova-config

View File

@ -0,0 +1,28 @@
---
#
# Playbook to install and enable atd
#
# This allows you to syncohize a script/command across multiple machines.
# Example: Synconhized restarting of ceilometer polling across computes
# and controllers.
#
- hosts: overcloud
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
roles:
- repo
tasks:
- name: Install at
yum:
name: at
become: true
- name: Start atd
service:
name: atd
enabled: true
state: restarted
become: true

View File

@ -0,0 +1,17 @@
---
#
# Playbook to force ntp time sync
#
# Example:
#
# ansible-playbook -i hosts browbeat/ntp-sync.yml -e 'ntp_server=clock.walkabout.com'
#
- hosts: overcloud
remote_user: "{{ host_remote_user }}"
vars_files:
- ../install/group_vars/all.yml
tasks:
- name: Sync NTP Time
command: ntpdate -u {{ntp_server}}
become: true

View File

@ -0,0 +1,18 @@
---
#
# Tasks to deploy new prefork.conf settings for httpd
#
- name: Push new prefork.conf
become: true
template:
src: prefork.conf.j2
dest: /etc/httpd/conf.modules.d/prefork.conf
mode: 0644
owner: root
group: root
backup: true
- name: Restart httpd
become: true
command: systemctl restart httpd

View File

@ -0,0 +1,17 @@
# Deployed by Browbeat
<IfModule mpm_prefork_module>
StartServers {{httpd_startservers}}
MinSpareServers {{httpd_minspareservers}}
MaxSpareServers {{httpd_maxspareservers}}
ServerLimit {{httpd_serverlimit}}
MaxClients {{httpd_maxclients}}
MaxRequestsPerChild {{httpd_maxrequestsperchild}}
</IfModule>
# Defaults:
# httpd_startservers: 8
# httpd_minspareservers: 5
# httpd_maxspareservers: 20
# httpd_serverlimit: 256
# httpd_maxclients: 256
# httpd_maxrequestsperchild: 4000

View File

@ -1,49 +0,0 @@
---
#
# Ceilometer change backend handlers
#
- name: pacemaker default unmanaged
become: true
command: pcs property set is-managed-default=false
when: pacemaker_controlled
- name: restart ceilometer services
become: true
service: name={{item}} state=restarted
with_items:
- openstack-ceilometer-api
- openstack-ceilometer-central
- openstack-ceilometer-collector
- openstack-ceilometer-notification
when: pacemaker_controlled
- name: restart gnocchi services
become: true
service: name={{item}} state=restarted
with_items:
- openstack-gnocchi-metricd
- openstack-gnocchi-statsd
- name: pacemaker default managed
become: true
command: pcs property set is-managed-default=true
when: pacemaker_controlled
- name: pacemaker cleanup ceilometer
become: true
command: pcs resource cleanup {{item}}
with_items:
- openstack-ceilometer-api
- openstack-ceilometer-central
- openstack-ceilometer-collector
- openstack-ceilometer-notification
when: inventory_hostname == groups['controller'][0] and pacemaker_controlled
- name: pacemaker cleanup gnocchi
become: true
command: pcs resource cleanup {{item}}
with_items:
- openstack-gnocchi-metricd
- openstack-gnocchi-statsd
when: inventory_hostname == groups['controller'][0] and pacemaker_controlled

View File

@ -1,34 +0,0 @@
---
#
# Ceilometer tasks for browbeat
# * Change backend between database and gnocchi
#
- name: Get current backend(s)
become: true
command: crudini --get /etc/ceilometer/ceilometer.conf DEFAULT meter_dispatchers
register: current_backend
- debug: msg="Current Backend={{current_backend.stdout}}"
- name: Configure Ceilometer Backend
become: true
ini_file:
dest: /etc/ceilometer/ceilometer.conf
mode: 0640
section: DEFAULT
option: meter_dispatchers
value: "{{ ceilometer_backend }}"
backup: yes
notify:
- pacemaker default unmanaged
- restart ceilometer services
- restart gnocchi services
- pacemaker default managed
- pacemaker cleanup ceilometer
- pacemaker cleanup gnocchi
- name: Configure for gnocchi
become: true
when: "('{{ceilometer_backend}}' == 'gnocchi') and (inventory_hostname == groups['controller'][0])"
shell: gnocchi-upgrade --create-legacy-resource-types

View File

@ -0,0 +1,27 @@
---
#
# Configure ceilometer.conf tasks
#
- name: Configure ceilometer.conf
become: true
ini_file:
dest: /etc/ceilometer/ceilometer.conf
mode: 0640
group: ceilometer
section: "{{ item.section }}"
option: "{{ item.option }}"
value: "{{ item.value }}"
backup: yes
with_items:
- "{{ceilometer_configuration}}"
- name: Restart Ceilometer Agent Notification
become: true
command: systemctl restart openstack-ceilometer-notification
when: restart_notification
- name: Restart Ceilometer Collector
become: true
command: systemctl restart openstack-ceilometer-collector
when: restart_collector

View File

@ -1,24 +0,0 @@
---
#
# Ceilometer Interval handlers
#
- name: pacemaker unmanage openstack-ceilometer-central
become: true
command: pcs resource unmanage openstack-ceilometer-central
when: inventory_hostname in groups['controller']
- name: restart openstack-ceilometer-central
become: true
service: name=openstack-ceilometer-central state=restarted
when: inventory_hostname in groups['controller']
- name: pacemaker manage openstack-ceilometer-central
become: true
command: pcs resource manage openstack-ceilometer-central
when: inventory_hostname in groups['controller']
- name: restart openstack-ceilometer-compute
become: true
service: name=openstack-ceilometer-compute state=restarted
when: inventory_hostname in groups['compute']

View File

@ -1,19 +0,0 @@
---
#
# Ceilometer interval tasks for Browbeat
#
- name: Deploy pipeline.yaml files
become: true
template:
src: pipeline.yaml.j2
dest: /etc/ceilometer/pipeline.yaml
owner: root
group: ceilometer
mode: 0640
backup: true
notify:
- pacemaker unmanage openstack-ceilometer-central
- restart openstack-ceilometer-central
- pacemaker manage openstack-ceilometer-central
- restart openstack-ceilometer-compute

View File

@ -1,92 +0,0 @@
---
sources:
- name: meter_source
interval: {{ceilometer_interval}}
meters:
- "*"
sinks:
- meter_sink
- name: cpu_source
interval: {{ceilometer_interval}}
meters:
- "cpu"
sinks:
- cpu_sink
- cpu_delta_sink
- name: disk_source
interval: {{ceilometer_interval}}
meters:
- "disk.read.bytes"
- "disk.read.requests"
- "disk.write.bytes"
- "disk.write.requests"
- "disk.device.read.bytes"
- "disk.device.read.requests"
- "disk.device.write.bytes"
- "disk.device.write.requests"
sinks:
- disk_sink
- name: network_source
interval: {{ceilometer_interval}}
meters:
- "network.incoming.bytes"
- "network.incoming.packets"
- "network.outgoing.bytes"
- "network.outgoing.packets"
sinks:
- network_sink
sinks:
- name: meter_sink
transformers:
publishers:
- notifier://
- name: cpu_sink
transformers:
- name: "rate_of_change"
parameters:
target:
name: "cpu_util"
unit: "%"
type: "gauge"
scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
publishers:
- notifier://
- name: cpu_delta_sink
transformers:
- name: "delta"
parameters:
target:
name: "cpu.delta"
growth_only: True
publishers:
- notifier://
- name: disk_sink
transformers:
- name: "rate_of_change"
parameters:
source:
map_from:
name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)"
unit: "(B|request)"
target:
map_to:
name: "\\1.\\2.\\3.rate"
unit: "\\1/s"
type: "gauge"
publishers:
- notifier://
- name: network_sink
transformers:
- name: "rate_of_change"
parameters:
source:
map_from:
name: "network\\.(incoming|outgoing)\\.(bytes|packets)"
unit: "(B|packet)"
target:
map_to:
name: "network.\\1.\\2.rate"
unit: "\\1/s"
type: "gauge"
publishers:
- notifier://

View File

@ -0,0 +1,26 @@
---
#
# Deploy the Ceilometer polling.yaml file
#
- name: Deploy polling.yaml file
become: true
template:
src: polling.yaml.j2
dest: /etc/ceilometer/polling.yaml
owner: root
group: ceilometer
mode: 0640
backup: true
when: "{{reduced_metrics}} == false"
- name: Deploy the reduced metrics polling.yaml file
become: true
template:
src: reduced_polling.yaml.j2
dest: /etc/ceilometer/polling.yaml
owner: root
group: ceilometer
mode: 0640
backup: true
when: reduced_metrics

View File

@ -0,0 +1,6 @@
---
sources:
- name: some_pollsters
interval: {{polling_interval}}
meters:
- "*"

View File

@ -0,0 +1,26 @@
---
sources:
- name: some_pollsters
interval: {{polling_interval}}
meters:
- cpu
- memory.usage
- network.incoming.bytes
- network.incoming.packets
- network.outgoing.bytes
- network.outgoing.packets
- disk.read.bytes
- disk.read.requests
- disk.write.bytes
- disk.write.requests
- hardware.cpu.util
- hardware.memory.used
- hardware.memory.total
- hardware.memory.buffer
- hardware.memory.cached
- hardware.memory.swap.avail
- hardware.memory.swap.total
- hardware.system_stats.io.outgoing.blocks
- hardware.system_stats.io.incoming.blocks
- hardware.network.ip.incoming.datagrams
- hardware.network.ip.outgoing.datagrams

View File

@ -0,0 +1,23 @@
---
#
# Tasks to reconfigure Gnocchi API wsgi service
#
- name: Get internal API address
become: true
shell: "grep {{inventory_hostname}}.internalapi.localdomain /etc/hosts | awk '{print $1}'"
register: internal_api_ip
- name: Push new 10-gnocchi_wsgi.conf
become: true
template:
src: gnocchi_wsgi.conf.j2
dest: /etc/httpd/conf.d/10-gnocchi_wsgi.conf
mode: 0640
owner: root
group: root
backup: true
- name: Restart Gnocchi API (httpd)
become: true
command: systemctl restart httpd

View File

@ -0,0 +1,26 @@
# Browbeat Deployed Gnocchi API wsgi config
<VirtualHost {{internal_api_ip.stdout}}:8041>
ServerName {{inventory_hostname}}.internalapi.localdomain
## Vhost docroot
DocumentRoot "/var/www/cgi-bin/gnocchi"
## Directories, there should at least be a declaration for /var/www/cgi-bin/gnocchi
<Directory "/var/www/cgi-bin/gnocchi">
Options Indexes FollowSymLinks MultiViews
AllowOverride None
Require all granted
</Directory>
## Logging
ErrorLog "/var/log/httpd/gnocchi_wsgi_error.log"
ServerSignature Off
CustomLog "/var/log/httpd/gnocchi_wsgi_access.log" combined
SetEnvIf X-Forwarded-Proto https HTTPS=1
WSGIApplicationGroup %{GLOBAL}
WSGIDaemonProcess gnocchi display-name=gnocchi_wsgi group=gnocchi processes={{gnocchi_api_processes}} threads={{gnocchi_api_threads}} user=gnocchi
WSGIProcessGroup gnocchi
WSGIScriptAlias / "/var/www/cgi-bin/gnocchi/app"
</VirtualHost>

View File

@ -1,42 +0,0 @@
---
#
# Nova handlers for browbeat adjustment
#
- name: unmanage nova services
become: true
command: pcs resource unmanage {{ item }}
with_items:
- openstack-nova-api
- openstack-nova-scheduler
- openstack-nova-conductor
ignore_errors: true
when: pacemaker_controlled
- name: restart nova services
become: true
service: name={{ item }} state=restarted
with_items:
- openstack-nova-api
- openstack-nova-scheduler
- openstack-nova-conductor
- name: manage nova services
become: true
command: pcs resource manage {{ item }}
with_items:
- openstack-nova-api
- openstack-nova-scheduler
- openstack-nova-conductor
ignore_errors: true
when: pacemaker_controlled
- name: cleanup nova services
become: true
command: pcs resource cleanup {{ item }}
with_items:
- openstack-nova-api
- openstack-nova-scheduler
- openstack-nova-conductor
ignore_errors: true
when: pacemaker_controlled

View File

@ -8,14 +8,24 @@
ini_file:
dest: /etc/nova/nova.conf
mode: 0640
group: nova
section: "{{ item.section }}"
option: "{{ item.option }}"
value: "{{ item.value }}"
backup: yes
with_items:
- "{{nova_configuration}}"
notify:
- unmanage nova services
- restart nova services
- manage nova services
- cleanup nova services
- name: Restart Nova Services
become: true
service:
name: "{{ item }}"
state: restarted
with_items:
- openstack-nova-scheduler
- openstack-nova-api
- openstack-nova-conductor
- openstack-nova-novncproxy
- openstack-nova-consoleauth
- httpd
when: restart_nova

View File

@ -0,0 +1,20 @@
---
#
# Tasks to kick a task off at a specific time using at daemon
#
- name: Create job file
become: true
shell: "echo '#!/bin/bash\n {{the_task}} '>/root/browbeat-sync.sh"
- name: Set execute on file
become: true
file:
path: /root/browbeat-sync.sh
owner: root
group: root
mode: 0744
- name: Create at job
become: true
command: "at -f /root/browbeat-sync.sh {{task_time}}"

View File

@ -0,0 +1,47 @@
---
#
# Playbook to prevent virtlogd from running out of files when scaling instances
# on a compute node.
#
# With OS defaults, virtlogd will prevent more than 252 instances per compute
# due to a maximum number of files it can open.
#
# Example:
#
# ansible-playbook -i hosts browbeat/scale-virtlogd.yml -e 'max_open_files=10000'
#
- hosts: compute
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
vars:
max_open_files: 10000
tasks:
# Virtlogd
- name: Replace max open files setting for virtlogd
become: true
replace:
dest: "/usr/lib/systemd/system/virtlogd.service"
regexp: "LimitNOFILE=[0-9]+"
replace: "LimitNOFILE={{max_open_files}}"
backup: true
ignore_errors: true
register: replace_output
- name: Override max open files for virtlogd
become: true
lineinfile:
dest: "/usr/lib/systemd/system/virtlogd.service"
line: "LimitNOFILE={{max_open_files}}"
insertafter: "OOMScoreAdjust=-900"
when: replace_output.changed != true
- name: Issue daemon-reload
become: true
command: systemctl daemon-reload
- name: Restart virtlogd
become: true
command: systemctl restart virtlogd

View File

@ -0,0 +1,44 @@
---
#
# Playbook to sync ceilometer polling across the controller and compute nodes
#
# Example:
#
# ansible-playbook -i hosts browbeat/sync-ceilometer-polling.yml -e 'task_time=18:25'
#
# Pike
- hosts: controller,compute
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
pre_tasks:
- name: Get OSP Version
slurp:
src: "/etc/rhosp-release"
register: osp_version
become: true
- name: (Ocata) Set Controller Task
set_fact:
the_task: "systemctl restart openstack-ceilometer-central.service"
when: "('Ocata' in osp_version['content'] | b64decode) and ('controller' in group_names)"
- name: (Ocata) Set Compute Task
set_fact:
the_task: "systemctl restart openstack-ceilometer-compute.service"
when: "('Ocata' in osp_version['content'] | b64decode) and ('compute' in group_names)"
- name: (Pike) Set Controller Task
set_fact:
the_task: "systemctl restart openstack-ceilometer-polling.service"
when: "('Pike' in osp_version['content'] | b64decode) and ('controller' in group_names)"
- name: (Pike) Set Compute Task
set_fact:
the_task: "systemctl restart openstack-ceilometer-polling.service"
when: "('Pike' in osp_version['content'] | b64decode) and ('compute' in group_names)"
roles:
- run-task-at

View File

@ -0,0 +1,44 @@
---
#
# Playbook to quickly disable polling across all overcloud nodes.
#
# Cuts off the flow of measures into the backlog.
#
- hosts: controller, compute
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
tasks:
- name: Get OSP Version
slurp:
src: "/etc/rhosp-release"
register: osp_version
become: true
- name: (Ocata) Set Controller Polling Daemon
set_fact:
polling_daemon: "openstack-ceilometer-central.service"
when: "('Ocata' in osp_version['content'] | b64decode) and ('controller' in group_names)"
- name: (Ocata) Set Compute Polling Daemon
set_fact:
polling_daemon: "openstack-ceilometer-compute.service"
when: "('Ocata' in osp_version['content'] | b64decode) and ('compute' in group_names)"
- name: (Pike) Set Controller Polling Daemon
set_fact:
polling_daemon: "openstack-ceilometer-polling.service"
when: "('Pike' in osp_version['content'] | b64decode) and ('controller' in group_names)"
- name: (Pike) Set Compute Polling Daemon
set_fact:
polling_daemon: "openstack-ceilometer-polling.service"
when: "('Pike' in osp_version['content'] | b64decode) and ('compute' in group_names)"
- name: Stopping Ceilometer Polling Daemon
become: true
command: "systemctl stop {{item}}"
with_items:
- "{{polling_daemon}}"

View File

@ -0,0 +1,150 @@
---
#
# Playbook to disable Telemetry Services
#
- hosts: controller, compute
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
tasks:
- name: Get OSP Version
slurp:
src: "/etc/rhosp-release"
register: osp_version
become: true
- name: (Ocata) Set Telemetry Controller Services
set_fact:
controller_services:
- openstack-aodh-evaluator.service
- openstack-aodh-listener.service
- openstack-aodh-notifier.service
- openstack-ceilometer-notification
- openstack-ceilometer-collector
- openstack-gnocchi-metricd.service
- openstack-gnocchi-statsd.service
- openstack-ceilometer-central.service
when: "'Ocata' in osp_version['content'] | b64decode"
- name: (Ocata) Set Compute Polling Daemon
set_fact:
compute_services:
- "openstack-ceilometer-compute.service"
when: "'Ocata' in osp_version['content'] | b64decode"
- name: (Pike) Set Telemetry Controller Services
set_fact:
controller_services:
- openstack-aodh-evaluator.service
- openstack-aodh-listener.service
- openstack-aodh-notifier.service
- openstack-ceilometer-notification
- openstack-gnocchi-metricd.service
- openstack-gnocchi-statsd.service
- openstack-ceilometer-polling.service
when: "'Pike' in osp_version['content'] | b64decode"
- name: (Pike) Set Compute Polling Daemon
set_fact:
compute_services:
- "openstack-ceilometer-polling.service"
when: "'Pike' in osp_version['content'] | b64decode"
- name: Copy HTTPD wsgi service config files in order to temporarily disable them
become: true
copy:
remote_src: true
src: "{{item.src}}"
dest: "{{item.dest}}"
with_items:
- src: /etc/httpd/conf.d/10-aodh_wsgi.conf
dest: /root/10-aodh_wsgi.conf
- src: /etc/httpd/conf.d/10-ceilometer_wsgi.conf
dest: /root/10-ceilometer_wsgi.conf
- src: /etc/httpd/conf.d/10-gnocchi_wsgi.conf
dest: /root/10-gnocchi_wsgi.conf
- src: /etc/httpd/conf.d/10-panko_wsgi.conf
dest: /root/10-panko_wsgi.conf
when: "'controller' in group_names"
- name: Delete HTTPD wsgi service config files in order to temporarily disable them
become: true
file:
path: "{{item}}"
state: absent
with_items:
- "/etc/httpd/conf.d/10-aodh_wsgi.conf"
- "/etc/httpd/conf.d/10-ceilometer_wsgi.conf"
- "/etc/httpd/conf.d/10-gnocchi_wsgi.conf"
- "/etc/httpd/conf.d/10-panko_wsgi.conf"
when: "'controller' in group_names"
- name: Stopping Telemetry Controller Services
become: true
command: "systemctl stop {{item}}"
with_items: "{{controller_services}}"
when: "'controller' in group_names"
- name: Stopping Telemetry Compute Services
become: true
command: "systemctl stop {{item}}"
with_items: "{{compute_services}}"
when: "'compute' in group_names"
- name: Setting Nova Notification Driver to noop
become: true
ini_file:
dest: "{{item.dest}}"
mode: 0640
group: "{{item.group}}"
section: oslo_messaging_notifications
option: driver
value: noop
backup: yes
with_items:
- dest: /etc/nova/nova.conf
group: nova
- name: Setting Notification Driver to noop
become: true
ini_file:
dest: "{{item.dest}}"
mode: 0640
group: "{{item.group}}"
section: oslo_messaging_notifications
option: driver
value: noop
backup: yes
with_items:
- dest: /etc/cinder/cinder.conf
group: cinder
- dest: /etc/glance/glance-api.conf
group: glance
- dest: /etc/heat/heat.conf
group: heat
- dest: /etc/keystone/keystone.conf
group: keystone
- dest: /etc/neutron/neutron.conf
group: neutron
when: "'controller' in group_names"
- name: Restart Controller Services to disable notifications
become: true
command: "systemctl restart {{item}}"
with_items:
- openstack-cinder-scheduler.service
- openstack-glance-api.service
- openstack-heat-engine.service
- neutron-server.service
- openstack-nova-api.service
- openstack-nova-conductor.service
- openstack-nova-scheduler.service
- httpd
when: "'controller' in group_names"
- name: Restart Nova Compute Service to disable notifications
become: true
command: "systemctl restart openstack-nova-compute.service"
when: "'compute' in group_names"

View File

@ -0,0 +1,155 @@
---
#
# Playbook to reverse disable Telemetry Services playbook
#
- hosts: controller, compute
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
tasks:
- name: Get OSP Version
slurp:
src: "/etc/rhosp-release"
register: osp_version
become: true
- name: (Ocata) Set Telemetry Controller Services
set_fact:
controller_services:
- openstack-aodh-evaluator.service
- openstack-aodh-listener.service
- openstack-aodh-notifier.service
- openstack-ceilometer-notification
- openstack-ceilometer-collector
- openstack-gnocchi-metricd.service
- openstack-gnocchi-statsd.service
- openstack-ceilometer-central.service
when: "'Ocata' in osp_version['content'] | b64decode"
- name: (Ocata) Set Compute Polling Daemon
set_fact:
compute_services:
- "openstack-ceilometer-compute.service"
when: "'Ocata' in osp_version['content'] | b64decode"
- name: (Pike) Set Telemetry Controller Services
set_fact:
controller_services:
- openstack-aodh-evaluator.service
- openstack-aodh-listener.service
- openstack-aodh-notifier.service
- openstack-ceilometer-notification
- openstack-gnocchi-metricd.service
- openstack-gnocchi-statsd.service
- openstack-ceilometer-polling.service
when: "'Pike' in osp_version['content'] | b64decode"
- name: (Pike) Set Compute Polling Daemon
set_fact:
compute_services:
- "openstack-ceilometer-polling.service"
when: "'Pike' in osp_version['content'] | b64decode"
- name: Copy HTTPD wsgi service config files back to /etc/httpd/conf.d
become: true
copy:
remote_src: true
src: "{{item.src}}"
dest: "{{item.dest}}"
with_items:
- src: /root/10-aodh_wsgi.conf
dest: /etc/httpd/conf.d/10-aodh_wsgi.conf
- src: /root/10-ceilometer_wsgi.conf
dest: /etc/httpd/conf.d/10-ceilometer_wsgi.conf
- src: /root/10-gnocchi_wsgi.conf
dest: /etc/httpd/conf.d/10-gnocchi_wsgi.conf
- src: /root/10-panko_wsgi.conf
dest: /etc/httpd/conf.d/10-panko_wsgi.conf
when: "'controller' in group_names"
- name: Clean Up HTTPD wsgi service config files in /root
become: true
file:
path: "{{item}}"
state: absent
with_items:
- "/root/10-aodh_wsgi.conf"
- "/root/10-ceilometer_wsgi.conf"
- "/root/10-gnocchi_wsgi.conf"
- "/root/10-panko_wsgi.conf"
when: "'controller' in group_names"
- name: Starting Telemetry Controller Services
become: true
command: "systemctl start {{item}}"
with_items: "{{controller_services}}"
when: "'controller' in group_names"
- name: Starting Telemetry Compute Service(s)
become: true
command: "systemctl start {{item}}"
with_items: "{{compute_services}}"
when: "'compute' in group_names"
- name: Setting Nova Notification Driver to messagingv2
become: true
ini_file:
dest: "{{item.dest}}"
mode: 0640
group: "{{item.group}}"
section: oslo_messaging_notifications
option: driver
value: messagingv2
backup: yes
with_items:
- dest: /etc/nova/nova.conf
group: nova
- name: Setting Notification Driver to messagingv2/messaging
become: true
ini_file:
dest: "{{item.dest}}"
mode: 0640
group: "{{item.group}}"
section: oslo_messaging_notifications
option: driver
value: "{{item.value}}"
backup: yes
with_items:
- dest: /etc/cinder/cinder.conf
group: cinder
value: messagingv2
- dest: /etc/glance/glance-api.conf
group: glance
value: messagingv2
- dest: /etc/heat/heat.conf
group: heat
value: messaging
- dest: /etc/keystone/keystone.conf
group: keystone
value: messaging
- dest: /etc/neutron/neutron.conf
group: neutron
value:
when: "'controller' in group_names"
- name: Restart Controller Services to enable notifications
become: true
command: "systemctl restart {{item}}"
with_items:
- openstack-cinder-scheduler.service
- openstack-glance-api.service
- openstack-heat-engine.service
- neutron-server.service
- openstack-nova-api.service
- openstack-nova-conductor.service
- openstack-nova-scheduler.service
- httpd
when: "'controller' in group_names"
- name: Restart Nova Compute Services to enable notifications
become: true
command: "systemctl restart openstack-nova-compute.service"
when: "'compute' in group_names"

View File

@ -0,0 +1,25 @@
---
#
# Playbook to change telemetry's polling yaml
#
# Deploy polling.yaml with specific interval:
#
# ansible-playbook -i hosts browbeat/telemetry-polling.yml -e 'polling_interval=600'
#
# Deploy with reduced metrics:
#
# ansible-playbook -i hosts browbeat/telemetry-polling.yml -e 'polling_interval=600 reduced_metrics=true'
#
# Remember to sync polling daemons afterwards (Restarting openstack-ceilometer-[central,compute])
#
- hosts: controller,compute
remote_user: "{{ host_remote_user }}"
gather_facts: false
vars_files:
- ../install/group_vars/all.yml
vars:
polling_interval: 300
reduced_metrics: false
roles:
- ceilometer-polling

View File

@ -8,6 +8,10 @@ browbeat_flavors:
cpu: 1
memory: 64
disk: 1
- name: m1.tiny-centos
cpu: 1
memory: 192
disk: 8
- name: m1.tiny
cpu: 1
memory: 512