Merge "Enable yamllint checks"

This commit is contained in:
Zuul 2020-05-21 18:43:20 +00:00 committed by Gerrit Code Review
commit 3691681d84
12 changed files with 55 additions and 59 deletions

View File

@ -114,7 +114,7 @@ conf:
- filesystem
- fsstat
processes: ['.*']
cpu.metrics: ["percentages"]
cpu.metrics: ["percentages"]
core.metrics: ["percentages"]
process.include_top_n:
by_cpu: 5

View File

@ -19,7 +19,7 @@ images:
apache_proxy: docker.io/httpd:2.4
memory_init: docker.io/openstackhelm/heat:newton-ubuntu_xenial
elasticsearch: docker.io/openstackhelm/elasticsearch-s3:7_1_0-20191119
curator: docker.io/bobrik/curator:5.8.1
curator: docker.io/bobrik/curator:5.8.1
ceph_key_placement: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216
s3_bucket: docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20191216
s3_user: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216
@ -909,7 +909,7 @@ storage:
enabled: true
pvc:
name: pvc-elastic
access_mode: [ "ReadWriteOnce" ]
access_mode: ["ReadWriteOnce"]
requests:
storage: 5Gi
storage_class: general
@ -917,7 +917,7 @@ storage:
enabled: true
pvc:
name: pvc-elastic
access_mode: [ "ReadWriteOnce" ]
access_mode: ["ReadWriteOnce"]
requests:
storage: 1Gi
storage_class: general

View File

@ -420,8 +420,8 @@ conf:
search metric: 'rule:admin_or_creator or rule:metric_owner'
list metric: ''
list all metric: 'role:admin'
get measures: 'rule:admin_or_creator or rule:metric_owner'
post measures: 'rule:admin_or_creator'
get measures: 'rule:admin_or_creator or rule:metric_owner'
post measures: 'rule:admin_or_creator'
gnocchi:
DEFAULT:
debug: false

View File

@ -277,7 +277,7 @@ storage:
enabled: true
pvc:
name: kafka-pvc
access_mode: [ "ReadWriteOnce" ]
access_mode: ["ReadWriteOnce"]
requests:
storage: 5Gi
storage_class: general

View File

@ -3,32 +3,32 @@ conf:
- name: local-persistent-volume-0
reclaim_policy: Delete
storage_capacity: "1Gi"
access_modes: [ "ReadWriteOnce" ]
access_modes: ["ReadWriteOnce"]
local_path: /srv/local-volume-0
- name: local-persistent-volume-1
reclaim_policy: Delete
storage_capacity: "1Gi"
access_modes: [ "ReadWriteOnce" ]
access_modes: ["ReadWriteOnce"]
local_path: /srv/local-volume-1
- name: local-persistent-volume-2
reclaim_policy: Delete
storage_capacity: "1Gi"
access_modes: [ "ReadWriteOnce" ]
access_modes: ["ReadWriteOnce"]
local_path: /srv/local-volume-2
- name: local-persistent-volume-3
reclaim_policy: Delete
storage_capacity: "1Gi"
access_modes: [ "ReadWriteOnce" ]
access_modes: ["ReadWriteOnce"]
local_path: /srv/local-volume-3
- name: local-persistent-volume-4
reclaim_policy: Delete
storage_capacity: "1Gi"
access_modes: [ "ReadWriteOnce" ]
access_modes: ["ReadWriteOnce"]
local_path: /srv/local-volume-4
- name: local-persistent-volume-5
reclaim_policy: Delete
storage_capacity: "1Gi"
access_modes: [ "ReadWriteOnce" ]
access_modes: ["ReadWriteOnce"]
local_path: /srv/local-volume-5
manifests:
storage_class: true

View File

@ -166,7 +166,7 @@ secrets:
storage:
enabled: true
pvc:
access_mode: [ "ReadWriteOnce" ]
access_mode: ["ReadWriteOnce"]
requests:
storage: 5Gi
storage_class: general

View File

@ -243,7 +243,7 @@ storage:
enabled: true
pvc:
name: prometheus-pvc
access_mode: [ "ReadWriteOnce" ]
access_mode: ["ReadWriteOnce"]
requests:
storage: 5Gi
storage_class: general

View File

@ -31,7 +31,7 @@ conf:
description: 'no ceph active mgr is present or all ceph mgr are down'
summary: 'no ceph active mgt is present'
- alert: ceph_monitor_quorum_low
expr: ceph_mon_quorum_count < 3
expr: ceph_mon_quorum_count < 3
for: 5m
labels:
severity: page
@ -39,7 +39,7 @@ conf:
description: 'ceph monitor quorum has been less than 3 for more than 5 minutes'
summary: 'ceph high availability is at risk'
- alert: ceph_monitor_quorum_absent
expr: absent(avg_over_time(ceph_mon_quorum_status[5m]))
expr: absent(avg_over_time(ceph_mon_quorum_status[5m]))
labels:
severity: page
annotations:
@ -60,7 +60,7 @@ conf:
description: 'ceph placement group degradation is more than 80 percent'
summary: 'ceph placement groups degraded'
- alert: ceph_osd_down_pct_high
expr: avg_over_time(ceph_osd_down_percent[5m]) > 80
expr: avg_over_time(ceph_osd_down_percent[5m]) > 80
labels:
severity: critical
annotations:

View File

@ -354,7 +354,7 @@ conf:
description: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} has a CreateContainerConfigError error for more than 10 minutes'
summary: 'Pod {{$labels.pod}} in namespace {{$labels.namespace}} in error status'
- alert: replicaset_missing_replicas
expr: kube_replicaset_spec_replicas - kube_replicaset_status_ready_replicas > 0
expr: kube_replicaset_spec_replicas - kube_replicaset_status_ready_replicas > 0
for: 10m
labels:
severity: page

View File

@ -30,7 +30,7 @@ conf:
description: 'Mariadb has high table lock waits of {{ $value }} percentage'
summary: 'Mariadb table lock waits are high'
- alert: mariadb_node_not_ready
expr: mysql_global_status_wsrep_ready != 1
expr: mysql_global_status_wsrep_ready != 1
for: 10m
labels:
severity: warning
@ -38,7 +38,7 @@ conf:
description: '{{$labels.job}} on {{$labels.instance}} is not ready.'
summary: 'Galera cluster node not ready'
- alert: mariadb_galera_node_out_of_sync
expr: mysql_global_status_wsrep_local_state != 4 AND mysql_global_variables_wsrep_desync == 0
expr: mysql_global_status_wsrep_local_state != 4 AND mysql_global_variables_wsrep_desync == 0
for: 10m
labels:
severity: warning
@ -46,7 +46,7 @@ conf:
description: '{{$labels.job}} on {{$labels.instance}} is not in sync ({{$value}} != 4)'
summary: 'Galera cluster node out of sync'
- alert: mariadb_innodb_replication_fallen_behind
expr: (mysql_global_variables_innodb_replication_delay > 30) AND on (instance) (predict_linear(mysql_global_variables_innodb_replication_delay[5m], 60*2) > 0)
expr: (mysql_global_variables_innodb_replication_delay > 30) AND on (instance) (predict_linear(mysql_global_variables_innodb_replication_delay[5m], 60*2) > 0)
for: 10m
labels:
severity: warning
@ -64,7 +64,7 @@ conf:
description: Openstack exporter is not collecting metrics or is not available for past 10 minutes
title: Openstack exporter is not collecting metrics or is not available
- alert: os_glance_api_availability
expr: openstack_check_glance_api != 1
expr: openstack_check_glance_api != 1
for: 5m
labels:
severity: page
@ -72,7 +72,7 @@ conf:
description: 'Glance API is not available at {{$labels.url}} for more than 5 minutes'
summary: 'Glance API is not available at {{$labels.url}}'
- alert: os_nova_api_availability
expr: openstack_check_nova_api != 1
expr: openstack_check_nova_api != 1
for: 5m
labels:
severity: page
@ -80,7 +80,7 @@ conf:
description: 'Nova API is not available at {{$labels.url}} for more than 5 minutes'
summary: 'Nova API is not available at {{$labels.url}}'
- alert: os_keystone_api_availability
expr: openstack_check_keystone_api != 1
expr: openstack_check_keystone_api != 1
for: 5m
labels:
severity: page
@ -88,7 +88,7 @@ conf:
description: 'Keystone API is not available at {{$labels.url}} for more than 5 minutes'
summary: 'Keystone API is not available at {{$labels.url}}'
- alert: os_neutron_api_availability
expr: openstack_check_neutron_api != 1
expr: openstack_check_neutron_api != 1
for: 5m
labels:
severity: page
@ -96,7 +96,7 @@ conf:
description: 'Neutron API is not available at {{$labels.url}} for more than 5 minutes'
summary: 'Neutron API is not available at {{$labels.url}}'
- alert: os_neutron_metadata_agent_availability
expr: openstack_services_neutron_metadata_agent_down_total > 0
expr: openstack_services_neutron_metadata_agent_down_total > 0
for: 5m
labels:
severity: page
@ -104,7 +104,7 @@ conf:
description: 'One or more neutron metadata_agents are not available for more than 5 minutes'
summary: 'One or more neutron metadata_agents are not available'
- alert: os_neutron_openvswitch_agent_availability
expr: openstack_services_neutron_openvswitch_agent_down_total > 0
expr: openstack_services_neutron_openvswitch_agent_down_total > 0
for: 5m
labels:
severity: page
@ -112,7 +112,7 @@ conf:
description: 'One or more neutron openvswitch agents are not available for more than 5 minutes'
summary: 'One or more neutron openvswitch agents are not available'
- alert: os_neutron_dhcp_agent_availability
expr: openstack_services_neutron_dhcp_agent_down_total > 0
expr: openstack_services_neutron_dhcp_agent_down_total > 0
for: 5m
labels:
severity: page
@ -120,7 +120,7 @@ conf:
description: 'One or more neutron dhcp agents are not available for more than 5 minutes'
summary: 'One or more neutron dhcp agents are not available'
- alert: os_neutron_l3_agent_availability
expr: openstack_services_neutron_l3_agent_down_total > 0
expr: openstack_services_neutron_l3_agent_down_total > 0
for: 5m
labels:
severity: page
@ -128,7 +128,7 @@ conf:
description: 'One or more neutron L3 agents are not available for more than 5 minutes'
summary: 'One or more neutron L3 agents are not available'
- alert: os_swift_api_availability
expr: openstack_check_swift_api != 1
expr: openstack_check_swift_api != 1
for: 5m
labels:
severity: page
@ -136,7 +136,7 @@ conf:
description: 'Swift API is not available at {{$labels.url}} for more than 5 minutes'
summary: 'Swift API is not available at {{$labels.url}}'
- alert: os_cinder_api_availability
expr: openstack_check_cinder_api != 1
expr: openstack_check_cinder_api != 1
for: 5m
labels:
severity: page
@ -144,7 +144,7 @@ conf:
description: 'Cinder API is not available at {{$labels.url}} for more than 5 minutes'
summary: 'Cinder API is not available at {{$labels.url}}'
- alert: os_cinder_scheduler_availability
expr: openstack_services_cinder_cinder_scheduler != 1
expr: openstack_services_cinder_cinder_scheduler != 1
for: 5m
labels:
severity: page
@ -152,7 +152,7 @@ conf:
description: 'Cinder scheduler is not available for more than 5 minutes'
summary: 'Cinder scheduler is not available'
- alert: os_heat_api_availability
expr: openstack_check_heat_api != 1
expr: openstack_check_heat_api != 1
for: 5m
labels:
severity: page
@ -160,7 +160,7 @@ conf:
description: 'Heat API is not available at {{$labels.url}} for more than 5 minutes'
summary: 'Heat API is not available at {{$labels.url}}'
- alert: os_nova_compute_disabled
expr: openstack_services_nova_compute_disabled_total > 0
expr: openstack_services_nova_compute_disabled_total > 0
for: 5m
labels:
severity: page
@ -168,7 +168,7 @@ conf:
description: 'nova-compute is disabled on certain hosts for more than 5 minutes'
summary: 'Openstack compute service nova-compute is disabled on some hosts'
- alert: os_nova_conductor_disabled
expr: openstack_services_nova_conductor_disabled_total > 0
expr: openstack_services_nova_conductor_disabled_total > 0
for: 5m
labels:
severity: page
@ -176,7 +176,7 @@ conf:
description: 'nova-conductor is disabled on certain hosts for more than 5 minutes'
summary: 'Openstack compute service nova-conductor is disabled on some hosts'
- alert: os_nova_consoleauth_disabled
expr: openstack_services_nova_consoleauth_disabled_total > 0
expr: openstack_services_nova_consoleauth_disabled_total > 0
for: 5m
labels:
severity: page
@ -184,7 +184,7 @@ conf:
description: 'nova-consoleauth is disabled on certain hosts for more than 5 minutes'
summary: 'Openstack compute service nova-consoleauth is disabled on some hosts'
- alert: os_nova_scheduler_disabled
expr: openstack_services_nova_scheduler_disabled_total > 0
expr: openstack_services_nova_scheduler_disabled_total > 0
for: 5m
labels:
severity: page
@ -192,7 +192,7 @@ conf:
description: 'nova-scheduler is disabled on certain hosts for more than 5 minutes'
summary: 'Openstack compute service nova-scheduler is disabled on some hosts'
- alert: os_nova_compute_down
expr: openstack_services_nova_compute_down_total > 0
expr: openstack_services_nova_compute_down_total > 0
for: 5m
labels:
severity: page
@ -200,7 +200,7 @@ conf:
description: 'nova-compute is down on certain hosts for more than 5 minutes'
summary: 'Openstack compute service nova-compute is down on some hosts'
- alert: os_nova_conductor_down
expr: openstack_services_nova_conductor_down_total > 0
expr: openstack_services_nova_conductor_down_total > 0
for: 5m
labels:
severity: page
@ -208,7 +208,7 @@ conf:
description: 'nova-conductor is down on certain hosts for more than 5 minutes'
summary: 'Openstack compute service nova-conductor is down on some hosts'
- alert: os_nova_consoleauth_down
expr: openstack_services_nova_consoleauth_down_total > 0
expr: openstack_services_nova_consoleauth_down_total > 0
for: 5m
labels:
severity: page
@ -216,7 +216,7 @@ conf:
description: 'nova-consoleauth is down on certain hosts for more than 5 minutes'
summary: 'Openstack compute service nova-consoleauth is down on some hosts'
- alert: os_nova_scheduler_down
expr: openstack_services_nova_scheduler_down_total > 0
expr: openstack_services_nova_scheduler_down_total > 0
for: 5m
labels:
severity: page
@ -258,7 +258,7 @@ conf:
description: 'RabbitMQ at {{ $labels.instance }} has {{ $value }} partitions'
summary: 'RabbitMQ Network partitions detected'
- alert: rabbitmq_down
expr: min(rabbitmq_up) by(instance) != 1
expr: min(rabbitmq_up) by(instance) != 1
for: 10m
labels:
severity: page
@ -266,7 +266,7 @@ conf:
description: 'RabbitMQ Server instance {{ $labels.instance }} is down'
summary: 'The RabbitMQ Server instance at {{ $labels.instance }} has been down the last 10 mins'
- alert: rabbitmq_file_descriptor_usage_high
expr: fd_used * 100 /fd_total > 80
expr: fd_used * 100 /fd_total > 80
for: 10m
labels:
severity: warning
@ -274,7 +274,7 @@ conf:
description: 'RabbitMQ Server instance {{ $labels.instance }} has high file descriptor usage of {{ $value }} percent.'
summary: 'RabbitMQ file descriptors usage is high for last 10 mins'
- alert: rabbitmq_node_disk_free_alarm
expr: node_disk_free_alarm > 0
expr: node_disk_free_alarm > 0
for: 10m
labels:
severity: warning
@ -282,7 +282,7 @@ conf:
description: 'RabbitMQ Server instance {{ $labels.instance }} has low disk free space available.'
summary: 'RabbitMQ disk space usage is high'
- alert: rabbitmq_node_memory_alarm
expr: node_mem_alarm > 0
expr: node_mem_alarm > 0
for: 10m
labels:
severity: warning
@ -290,7 +290,7 @@ conf:
description: 'RabbitMQ Server instance {{ $labels.instance }} has low free memory.'
summary: 'RabbitMQ memory usage is high'
- alert: rabbitmq_less_than_3_nodes
expr: running < 3
expr: running < 3
for: 10m
labels:
severity: warning
@ -298,7 +298,7 @@ conf:
description: 'RabbitMQ Server has less than 3 nodes running.'
summary: 'RabbitMQ server is at risk of loosing data'
- alert: rabbitmq_queue_messages_returned_high
expr: queue_messages_returned_total/queue_messages_published_total * 100 > 50
expr: queue_messages_returned_total/queue_messages_published_total * 100 > 50
for: 5m
labels:
severity: warning
@ -306,7 +306,7 @@ conf:
description: 'RabbitMQ Server is returing more than 50 percent of messages received.'
summary: 'RabbitMQ server is returning more than 50 percent of messages received.'
- alert: rabbitmq_consumers_low_utilization
expr: queue_consumer_utilisation < .4
expr: queue_consumer_utilisation < .4
for: 5m
labels:
severity: warning
@ -314,7 +314,7 @@ conf:
description: 'RabbitMQ consumers message consumption speed is low'
summary: 'RabbitMQ consumers message consumption speed is low'
- alert: rabbitmq_high_message_load
expr: queue_messages_total > 17000 or increase(queue_messages_total[5m]) > 4000
expr: queue_messages_total > 17000 or increase(queue_messages_total[5m]) > 4000
for: 5m
labels:
severity: warning

View File

@ -6,14 +6,10 @@ yaml-files:
- '.yamllint'
rules:
braces:
level: warning
brackets:
level: warning
colons:
level: warning
commas:
level: warning
braces: enable
brackets: enable
colons: enable
commas: enable
comments:
level: warning
comments-indentation:

View File

@ -205,7 +205,7 @@ storage:
enabled: true
pvc:
name: zookeeper-pvc
access_mode: [ "ReadWriteOnce" ]
access_mode: ["ReadWriteOnce"]
requests:
storage: 5Gi
storage_class: general