Adding ObjectStorage Node type

+ Distinguishes between Object and Ceph Storage Nodes
+ Collectd config for ObjectStorage Nodes
+ Small fixes to generate_tripleo_hostfile.sh and controller collectd config
+ Added ObjectStorage Nodes to the Dashboards

Change-Id: I6f59a743bc9dda141ea96f8559b137828ffa72b9
This commit is contained in:
akrzos 2017-01-03 11:12:10 -05:00
parent 8e3afe378c
commit 1f8059f18a
14 changed files with 355 additions and 54 deletions

View File

@ -27,18 +27,37 @@ if [ ${#controller_id} -lt 1 ]; then
echo "Error: Controller ID is not reporting correctly. Please see check the openstack-heat-api on the undercloud."
exit 1
fi
objectstorage_id=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" stack@${tripleo_ip_address} ". ~/stackrc; openstack stack resource show overcloud ObjectStorage > >(grep physical_resource_id) 2>/dev/null" | awk '{print $4}')
if [ ${#objectstorage_id} -lt 1 ]; then
echo "Error: ObjectStorage ID is not reporting correctly. Please see check the openstack-heat-api on the undercloud."
exit 1
fi
cephstorage_id=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" stack@${tripleo_ip_address} ". ~/stackrc; openstack stack resource show overcloud CephStorage > >(grep physical_resource_id) 2>/dev/null" | awk '{print $4}')
if [ ${#cephstorage_id} -lt 1 ]; then
echo "Error: CephStorage ID is not reporting correctly. Please see check the openstack-heat-api on the undercloud."
exit 1
fi
compute_id=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" stack@${tripleo_ip_address} ". ~/stackrc; openstack stack resource show overcloud Compute > >(grep physical_resource_id) 2>/dev/null" | awk '{print $4}')
if [ ${#controller_id} -lt 1 ]; then
if [ ${#compute_id} -lt 1 ]; then
echo "Error: Compute ID is not reporting correctly. Please see check the openstack-heat-api on the undercloud."
exit 1
fi
controller_ids=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" stack@${tripleo_ip_address} ". ~/stackrc; openstack stack resource list ${controller_id} > >(grep -i controller) 2>/dev/null" | awk '{print $2}')
if [ ${#controller_id} -lt 1 ]; then
if [ ${#controller_ids} -lt 1 ]; then
echo "Error: Controller IDs is not reporting correctly. Please see check the openstack-heat-api on the undercloud."
exit 1
fi
objectstorage_ids=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" stack@${tripleo_ip_address} ". ~/stackrc; openstack stack resource list ${objectstorage_id} > >(grep -i objectstorage) 2>/dev/null" | awk '{print $2}')
if [ ${#objectstorage_ids} -lt 1 ]; then
echo "Info: No ObjectStorage resources."
fi
cephstorage_ids=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" stack@${tripleo_ip_address} ". ~/stackrc; openstack stack resource list ${cephstorage_id} > >(grep -i cephstorage) 2>/dev/null" | awk '{print $2}')
if [ ${#cephstorage_ids} -lt 1 ]; then
echo "Info: No CephStorage resources."
fi
compute_ids=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" stack@${tripleo_ip_address} ". ~/stackrc; openstack stack resource list ${compute_id} > >(grep -i compute) 2>/dev/null" | awk '{print $2}')
if [ ${#controller_id} -lt 1 ]; then
if [ ${#compute_ids} -lt 1 ]; then
echo "Error: Compute IDs is not reporting correctly. Please see check the openstack-heat-api on the undercloud."
exit 1
fi
@ -53,6 +72,24 @@ do
controller_uuids+=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" stack@${tripleo_ip_address} ". ~/stackrc; openstack stack resource show ${controller_id} ${controller} > >(grep -oP \"'nova_server_resource': u'([a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+)'\") 2>/dev/null" | awk '{print $2}' | grep -oP [a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+)
fi
done
objectstorage_uuids=()
for objectstorage in ${objectstorage_ids}
do
if [[ ${version_tripleo} -lt 2 ]] ; then
objectstorage_uuids+=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" stack@${tripleo_ip_address} ". ~/stackrc; heat resource-show ${objectstorage_id} ${objectstorage} | grep -i nova_server_resource" | awk '{print $4}')
else
objectstorage_uuids+=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" stack@${tripleo_ip_address} ". ~/stackrc; openstack stack resource show ${objectstorage_id} ${objectstorage} > >(grep -oP \"'nova_server_resource': u'([a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+)'\") 2>/dev/null" | awk '{print $2}' | grep -oP [a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+)
fi
done
cephstorage_uuids=()
for cephstorage in ${cephstorage_ids}
do
if [[ ${version_tripleo} -lt 2 ]] ; then
cephstorage_uuids+=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" stack@${tripleo_ip_address} ". ~/stackrc; heat resource-show ${cephstorage_id} ${cephstorage} | grep -i nova_server_resource" | awk '{print $4}')
else
cephstorage_uuids+=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" stack@${tripleo_ip_address} ". ~/stackrc; openstack stack resource show ${cephstorage_id} ${cephstorage} > >(grep -oP \"'nova_server_resource': u'([a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+)'\") 2>/dev/null" | awk '{print $2}' | grep -oP [a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+)
fi
done
compute_uuids=()
for compute in ${compute_ids}
do
@ -101,10 +138,14 @@ for line in $nodes; do
IP=$(echo $line | awk '{print $12}' | cut -d "=" -f2)
if grep -q $uuid <<< {$controller_uuids}; then
controller_hn+=("$host")
elif grep -q $uuid <<< {$objectstorage_uuids}; then
objectstorage_hn+=("$host")
elif grep -q $uuid <<< {$cephstorage_uuids}; then
cephstorage_hn+=("$host")
elif grep -q $uuid <<< {$compute_uuids}; then
compute_hn+=("$host")
else
ceph_hn+=("$host")
other_hn+=("$host")
fi
echo "" | tee -a ${ssh_config_file}
echo "Host ${host}" | tee -a ${ssh_config_file}
@ -129,6 +170,20 @@ if [[ ${#controller_hn} -gt 0 ]]; then
echo "${ct}" | tee -a ${ansible_inventory_file}
done
fi
if [[ ${#objectstorage_hn} -gt 0 ]]; then
echo "" | tee -a ${ansible_inventory_file}
echo "[objectstorage]" | tee -a ${ansible_inventory_file}
for objectstorage in ${objectstorage_hn[@]}; do
echo "${objectstorage}" | tee -a ${ansible_inventory_file}
done
fi
if [[ ${#cephstorage_hn} -gt 0 ]]; then
echo "" | tee -a ${ansible_inventory_file}
echo "[cephstorage]" | tee -a ${ansible_inventory_file}
for cephstorage in ${cephstorage_hn[@]}; do
echo "${cephstorage}" | tee -a ${ansible_inventory_file}
done
fi
if [[ ${#compute_hn} -gt 0 ]]; then
echo "" | tee -a ${ansible_inventory_file}
echo "[compute]" | tee -a ${ansible_inventory_file}
@ -136,11 +191,11 @@ if [[ ${#compute_hn} -gt 0 ]]; then
echo "${c}" | tee -a ${ansible_inventory_file}
done
fi
if [[ ${#ceph_hn} -gt 0 ]]; then
if [[ ${#other_hn} -gt 0 ]]; then
echo "" | tee -a ${ansible_inventory_file}
echo "[ceph]" | tee -a ${ansible_inventory_file}
for ceph in ${ceph_hn[@]}; do
echo "${ceph}" | tee -a ${ansible_inventory_file}
echo "[other]" | tee -a ${ansible_inventory_file}
for other in ${other_hn[@]}; do
echo "${other}" | tee -a ${ansible_inventory_file}
done
fi
echo "" | tee -a ${ansible_inventory_file}

View File

@ -8,7 +8,8 @@
# Or use tags:
# ansible-playbook -i hosts install/collectd-openstack.yml --tag "undercloud"
# ansible-playbook -i hosts install/collectd-openstack.yml --tag "controller"
# ansible-playbook -i hosts install/collectd-openstack.yml --tag "ceph"
# ansible-playbook -i hosts install/collectd-openstack.yml --tag "objectstorage"
# ansible-playbook -i hosts install/collectd-openstack.yml --tag "cephstorage"
# ansible-playbook -i hosts install/collectd-openstack.yml --tag "compute"
#
@ -45,21 +46,37 @@
ignore_errors: true
tags: controller
- hosts: ceph
- hosts: objectstorage
remote_user: "{{ host_remote_user }}"
vars:
config_type: ceph
config_type: objectstorage
roles:
- { role: common, when: collectd_ceph }
- { role: epel, when: collectd_ceph }
- { role: collectd-openstack, when: collectd_ceph }
- { role: common, when: collectd_objectstorage }
- { role: epel, when: collectd_objectstorage }
- { role: collectd-openstack, when: collectd_objectstorage }
tasks:
- name: Collectd off if not collectd_ceph
- name: Collectd off if not collectd_objectstorage
service: name=collectd state=stopped enabled=false
become: true
when: not collectd_ceph
when: not collectd_objectstorage
ignore_errors: true
tags: ceph
tags: objectstorage
- hosts: cephstorage
remote_user: "{{ host_remote_user }}"
vars:
config_type: cephstorage
roles:
- { role: common, when: collectd_cephstorage }
- { role: epel, when: collectd_cephstorage }
- { role: collectd-openstack, when: collectd_cephstorage }
tasks:
- name: Collectd off if not collectd_cephstorage
service: name=collectd state=stopped enabled=false
become: true
when: not collectd_cephstorage
ignore_errors: true
tags: cephstorage
- hosts: compute
remote_user: "{{ host_remote_user }}"

View File

@ -29,12 +29,15 @@
- template_name: openstack
template_node_type: controller
process_list_name: OpenStack-Controller
- template_name: openstack
template_node_type: objectstorage
process_list_name: OpenStack-ObjectStorage
- template_name: openstack
template_node_type: cephstorage
process_list_name: OpenStack-CephStorage
- template_name: openstack
template_node_type: compute
process_list_name: OpenStack-Compute
- template_name: openstack
template_node_type: ceph
process_list_name: OpenStack-Ceph
- template_name: openstack
template_node_type: "*"
process_list_name: OpenStack

View File

@ -68,7 +68,8 @@ collectd_interval: 10
# Run collectd on specific openstack nodes:
collectd_undercloud: true
collectd_controller: true
collectd_ceph: true
collectd_objectstorage: true
collectd_cephstorage: true
collectd_compute: false
# Collect plugins configuration:

View File

@ -85,10 +85,6 @@ PreCacheChain "PreCache"
IgnoreSelected false
</Plugin>
#<Plugin exec>
# Exec nobody "/usr/local/bin/collectd-redis.sh"
#</Plugin>
{%if gnocchi_status_python_plugin %}
{%if inventory_hostname == groups['controller'][0] %}
<LoadPlugin python>

View File

@ -0,0 +1,147 @@
# Installed by Browbeat Ansible Installer
# Config type: {{config_type}}
# Interval default is 10s
Interval {{collectd_interval}}
# Hostname for this machine, if not defined, use gethostname(2) system call
Hostname "{{inventory_hostname}}"
# Loaded Plugins:
LoadPlugin "logfile"
<Plugin "logfile">
LogLevel "info"
File "/var/log/collectd.log"
Timestamp true
</Plugin>
LoadPlugin write_graphite
LoadPlugin cpu
LoadPlugin conntrack
LoadPlugin df
LoadPlugin disk
LoadPlugin exec
LoadPlugin interface
LoadPlugin irq
LoadPlugin load
LoadPlugin match_regex
LoadPlugin memory
LoadPlugin numa
LoadPlugin processes
LoadPlugin swap
LoadPlugin tail
LoadPlugin turbostat
LoadPlugin unixsock
LoadPlugin uptime
# Open unix domain socket for collectdctl
<Plugin unixsock>
SocketFile "/var/run/collectd-unixsock"
SocketGroup "collectd"
SocketPerms "0770"
DeleteSocket true
</Plugin>
PreCacheChain "PreCache"
<Chain "PreCache">
<Rule "ignore_tap">
<Match "regex">
Plugin "^interface$"
PluginInstance "^tap*"
</Match>
Target "stop"
</Rule>
<Rule "ignore_interfaces_q">
<Match "regex">
Plugin "^interface$"
PluginInstance "^q.*"
</Match>
Target "stop"
</Rule>
Target "return"
</Chain>
# Graphite Host Configuration
<Plugin write_graphite>
<Carbon>
Host "{{graphite_host}}"
Port "2003"
Prefix "{{graphite_prefix}}."
Protocol "tcp"
LogSendErrors true
StoreRates true
AlwaysAppendDS false
EscapeCharacter "_"
</Carbon>
</Plugin>
<Plugin df>
ValuesPercentage true
</Plugin>
<Plugin disk>
Disk "/^[hsv]d[a-z]+[0-9]?$/"
IgnoreSelected false
</Plugin>
# (akrzos) Including the version of OpenStack that the process was verified as running after
# OpenStack Installation with a comment at the end of each Process/ProcessMatch statement.
# A Minus before the version means the process was not found in that version. (Ex -9)
<Plugin processes>
# Collectd (Browbeat Installed)
ProcessMatch "collectd" "/usr/sbin/collectd"
# OVS (OpenStack Installed)
ProcessMatch "ovs-vswitchd" "ovs-vswitchd.+openvswitch" # 10
ProcessMatch "ovsdb-server" "ovsdb-server.+openvswitch" # 10
# Swift (OpenStack Installed)
ProcessMatch "swift-account-auditor" "python.+swift-account-auditor" # 10
ProcessMatch "swift-account-reaper" "python.+swift-account-reaper" # 10
ProcessMatch "swift-account-replicator" "python.+swift-account-replicator" # 10
ProcessMatch "swift-account-server" "python.+swift-account-server" # 10
ProcessMatch "swift-container-auditor" "python.+swift-container-auditor" # 10
ProcessMatch "swift-container-updater" "python.+swift-container-updater" # 10
ProcessMatch "swift-container-replicator" "python.+swift-container-replicator" # 10
ProcessMatch "swift-container-server" "python.+swift-container-server" # 10
ProcessMatch "swift-object-auditor" "python.+swift-object-auditor" # 10
ProcessMatch "swift-object-updater" "python.+swift-object-updater" # 10
ProcessMatch "swift-object-replicator" "python.+swift-object-replicator" # 10
ProcessMatch "swift-object-server" "python.+swift-object-server" # 10
</Plugin>
<Plugin swap>
ReportBytes true
ValuesPercentage true
</Plugin>
# Tail plugin configuration
<Plugin "tail">
# Swift logs all into the same file
<File "/var/log/swift/swift.log">
Instance "swift"
<Match>
Regex "account-server: ERROR "
DSType "CounterInc"
Type "counter"
Instance "account-server"
</Match>
<Match>
Regex "container-server: ERROR "
DSType "CounterInc"
Type "counter"
Instance "container-server"
</Match>
<Match>
Regex "object-server: ERROR "
DSType "CounterInc"
Type "counter"
Instance "object-server"
</Match>
</File>
</Plugin>
# Include other collectd configuration files
Include "/etc/collectd.d"

View File

@ -1,5 +1,5 @@
{% set vars = {'panel_idx': 0, 'initial': 0} %}
{% set dashboard_groups = ['undercloud', 'controller', 'ceph', 'compute'] %}
{% set dashboard_groups = ['undercloud', 'controller', 'objectstorage', 'cephstorage', 'compute'] %}
{
"dashboard": {
"id": null,

View File

@ -1,5 +1,5 @@
{% set vars = {'panel_idx': 0, 'initial1': 0, 'initial2': 0, 'initial3': 0} %}
{% set dashboard_groups = ['undercloud', 'controller', 'ceph', 'compute'] %}
{% set dashboard_groups = ['undercloud', 'controller', 'objectstorage', 'cephstorage', 'compute'] %}
{
"dashboard": {
"id": null,
@ -409,9 +409,26 @@
"includeAll": false,
"multi": false,
"multiFormat": "glob",
"name": "ceph_disk",
"name": "objectstorage_disk",
"options": [],
"query": "{{dashboard_cloud_name}}.*ceph*.disk-*",
"query": "{{dashboard_cloud_name}}.*objectstorage*.disk-*",
"refresh": true,
"type": "query"
},
{
"allFormat": "glob",
"current": {
"text": "None",
"value": "",
"isNone": true
},
"datasource": null,
"includeAll": false,
"multi": false,
"multiFormat": "glob",
"name": "cephstorage_disk",
"options": [],
"query": "{{dashboard_cloud_name}}.*cephstorage*.disk-*",
"refresh": true,
"type": "query"
},

View File

@ -1,5 +1,5 @@
{% set vars = {'panel_idx': 0, 'initial1': 0, 'initial2': 0, 'initial3': 0} %}
{% set dashboard_groups = ['undercloud', 'controller', 'ceph', 'compute'] %}
{% set dashboard_groups = ['undercloud', 'controller', 'objectstorage', 'cephstorage', 'compute'] %}
{
"dashboard": {
"id": null,
@ -401,9 +401,26 @@
"includeAll": true,
"multi": true,
"multiFormat": "glob",
"name": "ceph_disk",
"name": "objectstorage_disk",
"options": [],
"query": "{{dashboard_cloud_name}}.*ceph*.disk-*",
"query": "{{dashboard_cloud_name}}.*objectstorage*.disk-*",
"refresh": true,
"type": "query",
"regex": "/disk-[a-z]*$/"
},
{
"allFormat": "glob",
"current": {
"text": "all",
"value": "all"
},
"datasource": null,
"includeAll": true,
"multi": true,
"multiFormat": "glob",
"name": "cephstorage_disk",
"options": [],
"query": "{{dashboard_cloud_name}}.*cephstorage*.disk-*",
"refresh": true,
"type": "query",
"regex": "/disk-[a-z]*$/"

View File

@ -1,5 +1,5 @@
{% set vars = {'panel_idx': 0, 'initial': 0} %}
{% set dashboard_groups = ['undercloud', 'controller', 'ceph', 'compute'] %}
{% set dashboard_groups = ['undercloud', 'controller', 'objectstorage', 'cephstorage', 'compute'] %}
{
"dashboard": {
"id": null,

View File

@ -1,5 +1,5 @@
{% set vars = {'panel_idx': 0, 'initial': 0} %}
{% set dashboard_groups = ['undercloud', 'controller', 'ceph', 'compute'] %}
{% set dashboard_groups = ['undercloud', 'controller', 'objectstorage', 'cephstorage', 'compute'] %}
{
"dashboard": {
"id": null,

View File

@ -1,5 +1,5 @@
{% set vars = {'panel_idx': 0, 'initial': 0, 'initial2': 0} %}
{% set dashboard_groups = ['undercloud', 'controller', 'ceph', 'compute'] %}
{% set dashboard_groups = ['undercloud', 'controller', 'objectstorage', 'cephstorage', 'compute'] %}
{
"dashboard": {
"id": null,
@ -302,9 +302,26 @@
"includeAll": false,
"multi": false,
"multiFormat": "glob",
"name": "ceph_interface",
"name": "objectstorage_interface",
"options": [],
"query": "{{dashboard_cloud_name}}.*ceph*.interface-*",
"query": "{{dashboard_cloud_name}}.*objectstorage*.interface-*",
"refresh": true,
"type": "query"
},
{
"allFormat": "glob",
"current": {
"text": "None",
"value": "",
"isNone": true
},
"datasource": null,
"includeAll": false,
"multi": false,
"multiFormat": "glob",
"name": "cephstorage_interface",
"options": [],
"query": "{{dashboard_cloud_name}}.*cephstorage*.interface-*",
"refresh": true,
"type": "query"
},

View File

@ -3952,8 +3952,13 @@
},
{
"selected": false,
"text": "ceph",
"value": "ceph"
"text": "cephstorage",
"value": "cephstorage"
},
{
"selected": false,
"text": "objectstorage",
"value": "objectstorage"
},
{
"selected": true,
@ -3961,7 +3966,7 @@
"value": "*"
}
],
"query": "undercloud,controller,compute,ceph,*",
"query": "undercloud,controller,objectstorage,cephstorage,compute,*",
"refresh": false,
"type": "custom"
},

View File

@ -242,6 +242,46 @@ per_process_panels:
processes:
- collectd
#
# This dashboard should only contain OpenStack ObjectStorage Node processes
#
OpenStack-ObjectStorage:
- name: "Swift"
processes:
- swift-account-auditor
- swift-account-reaper
- swift-account-replicator
- swift-account-server
- swift-container-auditor
- swift-container-replicator
- swift-container-server
- swift-container-updater
- swift-object-auditor
- swift-object-replicator
- swift-object-server
- swift-object-updater
- name: "Open vSwitch"
processes:
- ovs-vswitchd
- ovsdb-server
- name: "Collectd"
processes:
- collectd
#
# This dashboard should only contain OpenStack CephStorage Node processes
#
OpenStack-CephStorage:
- name: "Ceph"
processes:
- ceph-osd
- ceph-mon
- salt-minion
- diamond
- name: "Collectd"
processes:
- collectd
#
# This dashboard should only contain OpenStack Compute Node processes
#
@ -273,20 +313,6 @@ per_process_panels:
processes:
- collectd
#
# This dashboard should only contain OpenStack Ceph Node processes
#
OpenStack-Ceph:
- name: "Ceph"
processes:
- ceph-osd
- ceph-mon
- salt-minion
- diamond
- name: "Collectd"
processes:
- collectd
#
# This dashboard "OpenStack" aims to be comprehensive with all processes across:
# Undercloud, Controller, Compute, Ceph, etc... Nodes