Browse Source

Validate the playboooks metadata structure

This patch add a custom ansible-lint rule to enforce the structure of
the validations playbooks:

*ValidationHasMetadataRule*:
Throw an ansible-lint error if:
- the *hosts* key is empty or not found,
- *vars* dictionary is missing,
- *metadata* dict is missing in *vars*
- *name*/*description*/*groups* keys are missing or found with a wrong
  data type
- the validation belongs to one or several groups NOT in the official list of
  groups (groups.yaml)

*YAMLLINT*:
- Enable yamllint check in tox linters
- WIP Fix detected yamllint errors

Change-Id: If233286aa9f4299f02f13dc34f1e8c05d89df851
Signed-off-by: Gael Chamoulaud <gchamoul@redhat.com>
changes/16/700016/5
Gael Chamoulaud 2 years ago
parent
commit
e50e1a067d
No known key found for this signature in database GPG Key ID: 4119D0305C651D66
  1. 2
      .ansible-lint
  2. 138
      .ansible-lint_rules/ValidationHasMetadataRule.py
  3. 11
      .yamllint
  4. 2
      playbooks/ceph-dependencies-installed.yaml
  5. 2
      playbooks/check-latest-packages-version.yaml
  6. 4
      playbooks/collect-flavors-and-verify-profiles.yaml
  7. 2
      playbooks/controller-token.yaml
  8. 2
      playbooks/controller-ulimits.yaml
  9. 2
      playbooks/default-node-count.yaml
  10. 12
      playbooks/neutron-sanity-check.yaml
  11. 2
      playbooks/no-op-firewall-nova-driver.yaml
  12. 2
      playbooks/ntp.yaml
  13. 6
      playbooks/openshift-hw-requirements.yaml
  14. 2
      playbooks/openshift-nw-requirements.yaml
  15. 6
      playbooks/openstack-endpoints.yaml
  16. 2
      playbooks/repos.yaml
  17. 3
      playbooks/stonith-exists.yaml
  18. 2
      playbooks/tls-everywhere-post-deployment.yaml
  19. 2
      playbooks/tls-everywhere-pre-deployment.yaml
  20. 2
      playbooks/tls-everywhere-prep.yaml
  21. 2
      playbooks/undercloud-cpu.yaml
  22. 6
      playbooks/undercloud-disk-space-pre-upgrade.yaml
  23. 10
      playbooks/undercloud-disk-space.yaml
  24. 4
      playbooks/undercloud-heat-purge-deleted.yaml
  25. 14
      playbooks/undercloud-neutron-sanity-check.yaml
  26. 2
      playbooks/undercloud-ram.yaml
  27. 2
      playbooks/undercloud-selinux-mode.yaml
  28. 2
      playbooks/undercloud-tokenflush.yaml
  29. 2
      roles/advanced-format-512e-support/tasks/main.yml
  30. 1
      roles/ceph/defaults/main.yml
  31. 9
      roles/ceph/tasks/ceph-ansible-installed.yaml
  32. 101
      roles/ceph/tasks/ceph-health.yaml
  33. 2
      roles/check-latest-packages-version/molecule/default/prepare.yml
  34. 2
      roles/check-network-gateway/molecule/default/playbook.yml
  35. 2
      roles/check-network-gateway/molecule/default/prepare.yml
  36. 8
      roles/check-network-gateway/tasks/main.yml
  37. 4
      roles/collect-flavors-and-verify-profiles/vars/main.yml
  38. 36
      roles/container-status/tasks/main.yaml
  39. 4
      roles/containerized-undercloud-docker/tasks/main.yml
  40. 2
      roles/controller-token/molecule/default/playbook.yml
  41. 2
      roles/controller-token/tasks/main.yml
  42. 2
      roles/controller-token/vars/main.yml
  43. 2
      roles/controller-ulimits/molecule/default/playbook.yml
  44. 4
      roles/controller-ulimits/tasks/main.yml
  45. 2
      roles/ctlplane-ip-range/molecule/default/playbook.yml
  46. 2
      roles/ctlplane-ip-range/molecule/default/prepare.yml
  47. 4
      roles/ctlplane-ip-range/tasks/main.yml
  48. 2
      roles/default-node-count/vars/main.yml
  49. 6
      roles/dhcp-validations/tasks/dhcp-introspection.yaml
  50. 2
      roles/dhcp-validations/tasks/dhcp-provisioning.yaml
  51. 2
      roles/dns/tasks/main.yml
  52. 2
      roles/haproxy/molecule/default/playbook.yml
  53. 4
      roles/healthcheck-service-status/tasks/main.yml
  54. 4
      roles/image-serve/molecule/default/molecule.yml
  55. 4
      roles/image-serve/molecule/default/playbook.yml
  56. 2
      roles/image-serve/molecule/default/prepare.yml
  57. 2
      roles/image-serve/tasks/main.yaml
  58. 1
      roles/image-serve/vars/main.yml
  59. 2
      roles/mysql-open-files-limit/tasks/main.yml
  60. 34
      roles/neutron-sanity-check/tasks/main.yml
  61. 4
      roles/no-op-firewall-nova-driver/molecule/default/playbook.yml
  62. 2
      roles/no-op-firewall-nova-driver/vars/main.yml
  63. 2
      roles/node-health/tasks/main.yml
  64. 14
      roles/nova-event-callback/tasks/main.yml
  65. 2
      roles/nova-status/molecule/default/playbook.yml
  66. 2
      roles/nova-status/molecule/default/prepare.yml
  67. 2
      roles/nova-status/tasks/main.yml
  68. 26
      roles/ntp/tasks/main.yml
  69. 2
      roles/ntp/vars/main.yml
  70. 4
      roles/openshift-on-openstack/defaults/main.yml
  71. 10
      roles/openshift-on-openstack/tasks/openshift-hw-requirements.yaml
  72. 106
      roles/openshift-on-openstack/tasks/openshift-nw-requirements.yaml
  73. 6
      roles/openstack-endpoints/tasks/main.yml
  74. 6
      roles/ovs-dpdk-pmd/tasks/main.yml
  75. 8
      roles/pacemaker-status/tasks/main.yml
  76. 2
      roles/rabbitmq-limits/molecule/default/playbook.yml
  77. 2
      roles/rabbitmq-limits/molecule/default/prepare.yml
  78. 4
      roles/repos/molecule/default/playbook.yml
  79. 10
      roles/repos/tasks/main.yml
  80. 27
      roles/service-status/meta/main.yml
  81. 2
      roles/service-status/tasks/main.yaml
  82. 2
      roles/stonith-exists/molecule/default/prepare.yml
  83. 4
      roles/stonith-exists/tasks/main.yml
  84. 3
      roles/stonith-exists/vars/main.yml
  85. 10
      roles/tls-everywhere/tasks/common.yaml
  86. 6
      roles/tls-everywhere/tasks/overcloud-post-deployment.yaml
  87. 16
      roles/tls-everywhere/tasks/pre-deployment-containerized.yaml
  88. 13
      roles/tls-everywhere/tasks/pre-deployment-non-containerized.yaml
  89. 34
      roles/tls-everywhere/tasks/pre-deployment.yaml
  90. 16
      roles/tls-everywhere/tasks/prep.yaml
  91. 2
      roles/undercloud-debug/defaults/main.yml
  92. 6
      roles/undercloud-debug/molecule/default/playbook.yml
  93. 4
      roles/undercloud-debug/tasks/main.yml
  94. 11
      roles/undercloud-disk-space/defaults/main.yml
  95. 8
      roles/undercloud-disk-space/tasks/main.yml
  96. 4
      roles/undercloud-heat-purge-deleted/molecule/default/molecule.yml
  97. 2
      roles/undercloud-heat-purge-deleted/molecule/default/playbook.yml
  98. 2
      roles/undercloud-heat-purge-deleted/molecule/default/prepare.yml
  99. 2
      roles/undercloud-heat-purge-deleted/tasks/main.yml
  100. 4
      roles/undercloud-heat-purge-deleted/vars/main.yaml

2
.ansible-lint

@ -2,6 +2,8 @@ exclude_paths:
- releasenotes/
parseable: true
quiet: false
rulesdir:
- .ansible-lint_rules/
skip_list:
# Lines should be no longer than 120 chars.
- '204'

138
.ansible-lint_rules/ValidationHasMetadataRule.py

@ -0,0 +1,138 @@
import os
import six
import yaml
from ansiblelint import AnsibleLintRule
class ValidationHasMetadataRule(AnsibleLintRule):
id = '750'
shortdesc = 'Validation playbook must have mandatory metadata'
info = """
---
- hosts: localhost
vars:
metadata:
name: Validation Name
description: >
A full description of the validation.
groups:
- group1
- group2
- group3
"""
description = (
"The Validation playbook must have mandatory metadata:\n"
"```{}```".format(info)
)
severity = 'HIGH'
tags = ['metadata']
no_vars_found = "The validation playbook must contain a 'vars' dictionary"
no_meta_found = (
"The validation playbook must contain "
"a 'metadata' dictionary under vars"
)
no_groups_found = \
"*metadata* should contain a list of group (groups)"
unknown_groups_found = (
"Unkown group(s) '{}' found! "
"The official list of groups are '{}'. "
"To add a new validation group, please add it in the groups.yaml "
"file at the root of the tripleo-validations project."
)
def get_groups(self):
"""Returns a list of group names supported by
tripleo-validations by reading 'groups.yaml'
file located in the base direcotry.
"""
results = []
grp_file_path = os.path.abspath('groups.yaml')
with open(grp_file_path, "r") as grps:
contents = yaml.safe_load(grps)
for grp_name, grp_desc in sorted(contents.items()):
results.append(grp_name)
return results
def matchplay(self, file, data):
results = []
path = file['path']
if file['type'] == 'playbook':
if path.startswith("playbooks/") or \
path.find("tripleo-validations/playbooks/") > 0:
# *hosts* line check
hosts = data.get('hosts', None)
if not hosts:
return [({
path: data
}, "No *hosts* key found in the playbook")]
# *vars* lines check
vars = data.get('vars', None)
if not vars:
return [({
path: data
}, self.no_vars_found)]
else:
if not isinstance(vars, dict):
return [({path: data}, '*vars* should be a dictionary')]
# *metadata* lines check
metadata = data['vars'].get('metadata', None)
if metadata:
if not isinstance(metadata, dict):
return [(
{path: data},
'*metadata* should be a dictionary')]
else:
return [({path: data}, self.no_meta_found)]
# *metadata>[name|description] lines check
for info in ['name', 'description']:
if not metadata.get(info, None):
results.append((
{path: data},
'*metadata* should contain a %s key' % info))
continue
if not isinstance(metadata.get(info),
six.string_types):
results.append((
{path: data},
'*%s* should be a string' % info))
# *metadata>groups* lines check
if not metadata.get('groups', None):
results.append((
{path: data},
self.no_groups_found))
else:
if not isinstance(metadata.get('groups'), list):
results.append((
{path: data},
'*groups* should be a list'))
else:
groups = metadata.get('groups')
group_list = self.get_groups()
unknown_groups_list = list(
set(groups) - set(group_list))
if unknown_groups_list:
results.append((
{path: data},
self.unknown_groups_found.format(
unknown_groups_list,
group_list)
))
return results
return results

11
.yamllint

@ -0,0 +1,11 @@
---
extends: default
rules:
line-length:
# matches hardcoded 160 value from ansible-lint
max: 160
ignore: |
zuul.d/*.yaml
releasenotes/notes/*.yaml

2
playbooks/ceph-dependencies-installed.yaml

@ -10,7 +10,7 @@
fail_without_deps: true
tripleo_delegate_to: "{{ groups['overcloud'] | default([]) }}"
packages:
- lvm2
- lvm2
tasks:
- include_role:
name: ceph

2
playbooks/check-latest-packages-version.yaml

@ -1,6 +1,6 @@
---
- hosts: undercloud
gather_facts: yes
gather_facts: true
vars:
metadata:
name: Check if latest version of packages is installed

4
playbooks/collect-flavors-and-verify-profiles.yaml

@ -7,7 +7,7 @@
This validation checks the flavors assigned to roles exist and have the
correct capabilities set.
groups:
- pre-deployment
- pre-upgrade
- pre-deployment
- pre-upgrade
roles:
- collect-flavors-and-verify-profiles

2
playbooks/controller-token.yaml

@ -7,7 +7,7 @@
This validation checks that keystone admin token is disabled on both
undercloud and overcloud controller after deployment.
groups:
- post-deployment
- post-deployment
keystone_conf_file: "/var/lib/config-data/puppet-generated/keystone/etc/keystone/keystone.conf"
roles:
- controller-token

2
playbooks/controller-ulimits.yaml

@ -6,7 +6,7 @@
description: >
This will check the ulimits of each controller.
groups:
- post-deployment
- post-deployment
nofiles_min: 1024
nproc_min: 2048
roles:

2
playbooks/default-node-count.yaml

@ -7,6 +7,6 @@
This validation checks that the nodes and hypervisor statistics
add up.
groups:
- pre-deployment
- pre-deployment
roles:
- default-node-count

12
playbooks/neutron-sanity-check.yaml

@ -17,12 +17,12 @@
# will be passed to the Neutron services. The order is important
# here: the values in later files take precedence.
configs:
- /etc/neutron/neutron.conf
- /usr/share/neutron/neutron-dist.conf
- /etc/neutron/metadata_agent.ini
- /etc/neutron/dhcp_agent.ini
- /etc/neutron/fwaas_driver.ini
- /etc/neutron/l3_agent.ini
- /etc/neutron/neutron.conf
- /usr/share/neutron/neutron-dist.conf
- /etc/neutron/metadata_agent.ini
- /etc/neutron/dhcp_agent.ini
- /etc/neutron/fwaas_driver.ini
- /etc/neutron/l3_agent.ini
roles:
- neutron-sanity-check

2
playbooks/no-op-firewall-nova-driver.yaml

@ -7,7 +7,7 @@
When using Neutron, the `firewall_driver` option in Nova must be set to
`NoopFirewallDriver`.
groups:
- post-deployment
- post-deployment
nova_conf_path: "/var/lib/config-data/puppet-generated/nova_libvirt/etc/nova/nova.conf"
roles:
- no-op-firewall-nova-driver

2
playbooks/ntp.yaml

@ -9,6 +9,6 @@
The deployment should configure and run chronyd. This validation verifies
that it is indeed running and connected to an NTP server on all nodes.
groups:
- post-deployment
- post-deployment
roles:
- ntp

6
playbooks/openshift-hw-requirements.yaml

@ -12,7 +12,7 @@
- Are images named centos or rhel available?
- Are there sufficient compute resources available for a default setup? (1 Master node, 1 Infra node, 2 App nodes)
groups:
- openshift-on-openstack
- openshift-on-openstack
min_total_ram_testing: 16384 # 4 per node
min_total_vcpus_testing: 4 # 1 per node
min_total_disk_testing: 93 # Master: 40, others: 17 per node
@ -23,8 +23,8 @@
min_node_disk_testing: 40 # Minimum disk per node for testing
min_node_ram_prod: 16384 # Minimum ram per node for production
min_node_disk_prod: 42 # Minimum disk per node for production
resource_reqs_testing: False
resource_reqs_prod: False
resource_reqs_testing: false
resource_reqs_prod: false
tasks:
- include_role:
name: openshift-on-openstack

2
playbooks/openshift-nw-requirements.yaml

@ -7,7 +7,7 @@
Checks if an external network has been configured on the overcloud as
required for an OpenShift deployment on top of OpenStack.
groups:
- openshift-on-openstack
- openshift-on-openstack
tasks:
- include_role:
name: openshift-on-openstack

6
playbooks/openstack-endpoints.yaml

@ -8,8 +8,8 @@
This validation gets the PublicVip address from the deployment and
tries to access Horizon and get a Keystone token.
groups:
- post-deployment
- pre-upgrade
- post-upgrade
- post-deployment
- pre-upgrade
- post-upgrade
roles:
- openstack-endpoints

2
playbooks/repos.yaml

@ -1,6 +1,6 @@
---
- hosts: undercloud, overcloud
gather_facts: yes
gather_facts: true
vars:
metadata:
name: Check correctness of current repositories

3
playbooks/stonith-exists.yaml

@ -7,7 +7,8 @@
Verify that stonith devices are configured for your OpenStack Platform HA cluster.
We don't configure stonith device with TripleO Installer. Because the hardware
configuration may be differ in each environment and requires different fence agents.
How to configure fencing please read https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes
How to configure fencing please read
https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes
groups:
- post-deployment
roles:

2
playbooks/tls-everywhere-post-deployment.yaml

@ -8,7 +8,7 @@
and that all certs being tracked by certmonger are in the
MONITORING state.
groups:
- post-deployment
- post-deployment
tasks:
- include_role:
name: tls-everywhere

2
playbooks/tls-everywhere-pre-deployment.yaml

@ -7,7 +7,7 @@
Checks that the undercloud has novajoin set up corectly and
that we are ready to do the overcloud deploy with tls-everywhere.
groups:
- pre-deployment
- pre-deployment
tasks:
- include_role:
name: tls-everywhere

2
playbooks/tls-everywhere-prep.yaml

@ -7,7 +7,7 @@
Checks that the undercloud is ready to set up novajoin and
to register to IdM as a client as part of undercloud-install.
groups:
- prep
- prep
tasks:
- include_role:
name: tls-everywhere

2
playbooks/undercloud-cpu.yaml

@ -1,6 +1,6 @@
---
- hosts: undercloud
gather_facts: yes
gather_facts: true
vars:
metadata:
name: Verify undercloud fits the CPU core requirements

6
playbooks/undercloud-disk-space-pre-upgrade.yaml

@ -11,10 +11,10 @@
groups:
- pre-upgrade
volumes:
- {mount: /var/lib/docker, min_size: 10}
- {mount: /var/lib/docker, min_size: 10}
- {mount: /var/lib/config-data, min_size: 3}
- {mount: /var, min_size: 16}
- {mount: /, min_size: 20}
- {mount: /var, min_size: 16}
- {mount: /, min_size: 20}
roles:
- undercloud-disk-space

10
playbooks/undercloud-disk-space.yaml

@ -12,12 +12,12 @@
- prep
- pre-introspection
volumes:
- {mount: /var/lib/docker, min_size: 10}
- {mount: /var/lib/docker, min_size: 10}
- {mount: /var/lib/config-data, min_size: 3}
- {mount: /var/log, min_size: 3}
- {mount: /usr, min_size: 5}
- {mount: /var, min_size: 20}
- {mount: /, min_size: 25}
- {mount: /var/log, min_size: 3}
- {mount: /usr, min_size: 5}
- {mount: /var, min_size: 20}
- {mount: /, min_size: 25}
roles:
- undercloud-disk-space

4
playbooks/undercloud-heat-purge-deleted.yaml

@ -8,8 +8,8 @@
heat database can grow very large. This validation checks that
the purge_deleted crontab has been set up.
groups:
- pre-upgrade
- pre-deployment
- pre-upgrade
- pre-deployment
cron_check: "heat-manage purge_deleted"
roles:
- undercloud-heat-purge-deleted

14
playbooks/undercloud-neutron-sanity-check.yaml

@ -17,13 +17,13 @@
# will be passed to the Neutron services. The order is important
# here: the values in later files take precedence.
configs:
- /etc/neutron/neutron.conf
- /usr/share/neutron/neutron-dist.conf
- /etc/neutron/metadata_agent.ini
- /etc/neutron/dhcp_agent.ini
- /etc/neutron/plugins/ml2/openvswitch_agent.ini
- /etc/neutron/fwaas_driver.ini
- /etc/neutron/l3_agent.ini
- /etc/neutron/neutron.conf
- /usr/share/neutron/neutron-dist.conf
- /etc/neutron/metadata_agent.ini
- /etc/neutron/dhcp_agent.ini
- /etc/neutron/plugins/ml2/openvswitch_agent.ini
- /etc/neutron/fwaas_driver.ini
- /etc/neutron/l3_agent.ini
roles:
- neutron-sanity-check

2
playbooks/undercloud-ram.yaml

@ -1,6 +1,6 @@
---
- hosts: undercloud
gather_facts: yes
gather_facts: true
vars:
metadata:
name: Verify the undercloud fits the RAM requirements

2
playbooks/undercloud-selinux-mode.yaml

@ -1,6 +1,6 @@
---
- hosts: undercloud
gather_facts: yes
gather_facts: true
vars:
metadata:
name: Undercloud SELinux Enforcing Mode Check

2
playbooks/undercloud-tokenflush.yaml

@ -8,7 +8,7 @@
keystone database can grow very large. This validation checks that
the keystone token_flush crontab has been set up.
groups:
- pre-introspection
- pre-introspection
cron_check: "keystone-manage token_flush"
roles:
- undercloud-tokenflush

2
roles/advanced-format-512e-support/tasks/main.yml

@ -2,7 +2,7 @@
- name: List the available drives
register: drive_list
command: "ls /sys/class/block/"
changed_when: False
changed_when: false
- name: Detect whether the drive uses Advanced Format
advanced_format: drive={{ item }}

1
roles/ceph/defaults/main.yml

@ -4,4 +4,3 @@ fail_without_deps: false
fail_on_ceph_health_err: false
osd_percentage_min: 0
ceph_ansible_repo: "centos-ceph-nautilus"

9
roles/ceph/tasks/ceph-ansible-installed.yaml

@ -2,9 +2,9 @@
- name: Check if ceph-ansible is installed
shell: rpm -q ceph-ansible || true
args:
warn: no
changed_when: False
ignore_errors: True
warn: false
changed_when: false
ignore_errors: true
register: ceph_ansible_installed
- name: Warn about missing ceph-ansible
@ -24,7 +24,7 @@
- name: Get ceph-ansible repository
shell: "yum info ceph-ansible | awk '/From repo/ {print $4}'"
register: repo
changed_when: False
changed_when: false
- name: Fail if ceph-ansible doesn't belong to the specified repo
fail:
@ -32,4 +32,3 @@
when:
- (repo.stdout | length == 0 or repo.stdout != "{{ ceph_ansible_repo }}")
- fail_without_ceph_ansible|default(false)|bool

101
roles/ceph/tasks/ceph-health.yaml

@ -4,64 +4,65 @@
shell: hiera -c /etc/puppet/hiera.yaml enabled_services | egrep -sq ceph_mon
ignore_errors: true
register: ceph_mon_enabled
changed_when: False
changed_when: false
- when:
- ceph_mon_enabled is succeeded
- when: "ceph_mon_enabled is succeeded"
block:
- name: Set container_cli fact from the inventory
set_fact:
container_cli: "{{ hostvars[inventory_hostname].container_cli|default('podman') }}"
- name: Set container_cli fact from the inventory
set_fact:
container_cli: "{{ hostvars[inventory_hostname].container_cli|default('podman') }}"
- name: Set container filter format
set_fact:
container_filter_format: !unsafe "--format '{{ .Names }}'"
- name: Set container filter format
set_fact:
container_filter_format: !unsafe "--format '{{ .Names }}'"
- name: Set ceph_mon_container name
become: true
shell: "{{ container_cli }} ps {{ container_filter_format }} | grep ceph-mon"
register: ceph_mon_container
changed_when: False
- name: Set ceph_mon_container name
become: true
shell: "{{ container_cli }} ps {{ container_filter_format }} | grep ceph-mon"
register: ceph_mon_container
changed_when: false
- name: Set ceph cluster name
become: true
shell: find /etc/ceph -name '*.conf' -prune -print -quit | xargs basename -s '.conf'
register: ceph_cluster_name
changed_when: False
- name: Set ceph cluster name
become: true
shell: find /etc/ceph -name '*.conf' -prune -print -quit | xargs basename -s '.conf'
register: ceph_cluster_name
changed_when: false
- name: Get ceph health
become: true
shell: "{{ container_cli }} exec {{ ceph_mon_container.stdout }} ceph --cluster {{ ceph_cluster_name.stdout }} health | awk '{print $1}'"
register: ceph_health
- name: Get ceph health
become: true
shell: "{{ container_cli }} exec {{ ceph_mon_container.stdout }} ceph --cluster {{ ceph_cluster_name.stdout }} health | awk '{print $1}'"
register: ceph_health
- name: Check ceph health
warn:
msg: Ceph is in {{ ceph_health.stdout }} state.
when:
- ceph_health.stdout != 'HEALTH_OK'
- not fail_on_ceph_health_err|default(true)|bool
- name: Check ceph health
warn:
msg: Ceph is in {{ ceph_health.stdout }} state.
when:
- ceph_health.stdout != 'HEALTH_OK'
- not fail_on_ceph_health_err|default(true)|bool
- name: Fail if ceph health is HEALTH_ERR
fail:
msg: Ceph is in {{ ceph_health.stdout }} state.
when:
- ceph_health.stdout == 'HEALTH_ERR'
- fail_on_ceph_health_err|default(true)|bool
- name: Fail if ceph health is HEALTH_ERR
fail:
msg: Ceph is in {{ ceph_health.stdout }} state.
when:
- ceph_health.stdout == 'HEALTH_ERR'
- fail_on_ceph_health_err|default(true)|bool
- when:
- osd_percentage_min|default(0) > 0
block:
- name: set jq osd percentage filter
set_fact:
jq_osd_percentage_filter: '( (.num_in_osds) / (.num_osds) ) * 100'
- when:
- osd_percentage_min|default(0) > 0
block:
- name: set jq osd percentage filter
set_fact:
jq_osd_percentage_filter: '( (.num_in_osds) / (.num_osds) ) * 100'
- name: Get OSD stat percentage
become: true
shell: "{{ container_cli }} exec {{ ceph_mon_container.stdout }} ceph --cluster {{ ceph_cluster_name.stdout }} osd stat -f json | jq '{{ jq_osd_percentage_filter }}'"
register: ceph_osd_in_percentage
- name: Get OSD stat percentage
become: true
shell: >-
"{{ container_cli }}" exec "{{ ceph_mon_container.stdout }}" ceph
--cluster "{{ ceph_cluster_name.stdout }}" osd stat -f json | jq '{{ jq_osd_percentage_filter }}'
register: ceph_osd_in_percentage
- name: Fail if there is an unacceptable percentage of in OSDs
fail:
msg: "Only {{ ceph_osd_in_percentage.stdout|float }}% of OSDs are in, but {{ osd_percentage_min|default(0) }}% are required"
when:
- ceph_osd_in_percentage.stdout|float < osd_percentage_min|default(0)
- name: Fail if there is an unacceptable percentage of in OSDs
fail:
msg: "Only {{ ceph_osd_in_percentage.stdout|float }}% of OSDs are in, but {{ osd_percentage_min|default(0) }}% are required"
when:
- ceph_osd_in_percentage.stdout|float < osd_percentage_min|default(0)

2
roles/check-latest-packages-version/molecule/default/prepare.yml

@ -17,7 +17,7 @@
- name: Prepare
hosts: all
gather_facts: no
gather_facts: false
tasks:
- name: install patch rpm

2
roles/check-network-gateway/molecule/default/playbook.yml

@ -17,7 +17,7 @@
- name: Converge
hosts: all
gather_facts: no
gather_facts: false
tasks:
- name: successful check with ctlplane-subnet

2
roles/check-network-gateway/molecule/default/prepare.yml

@ -17,7 +17,7 @@
- name: Prepare
hosts: all
gather_facts: no
gather_facts: false
tasks:
- name: install hiera

8
roles/check-network-gateway/tasks/main.yml

@ -5,12 +5,12 @@
name: "tripleo_undercloud_conf_file"
- name: Get the local_subnet name from the undercloud_conf file
become: True
become: true
validations_read_ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: local_subnet
ignore_missing_file: True
ignore_missing_file: true
register: local_subnet
- name: Get gateway value from the undercloud.conf file
@ -19,7 +19,7 @@
path: "{{ tripleo_undercloud_conf_file }}"
section: "{% if local_subnet.value %}{{ local_subnet.value }}{% else %}ctlplane-subnet{% endif %}"
key: gateway
ignore_missing_file: True
ignore_missing_file: true
register: gateway
- name: Get local_ip value from the undercloud.conf file
@ -28,7 +28,7 @@
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: local_ip
ignore_missing_file: True
ignore_missing_file: true
register: local_ip
- name: Test network_gateway if different from local_ip

4
roles/collect-flavors-and-verify-profiles/vars/main.yml

@ -5,5 +5,5 @@ metadata:
This validation checks the flavors assigned to roles exist and have the
correct capabilities set.
groups:
- pre-deployment
- pre-upgrade
- pre-deployment
- pre-upgrade

36
roles/container-status/tasks/main.yaml

@ -8,29 +8,29 @@
- when: "'Undercloud' in group_names"
block:
- name: Set container_cli fact from undercloud.conf
block:
- name: Get the path of tripleo undercloud config file
become: true
hiera:
name: "tripleo_undercloud_conf_file"
- name: Set container_cli fact from undercloud.conf
block:
- name: Get the path of tripleo undercloud config file
become: true
hiera:
name: "tripleo_undercloud_conf_file"
- name: Get container client from undercloud.conf
validations_read_ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: container_cli
ignore_missing_file: true
register: container_cli
- name: Get container client from undercloud.conf
validations_read_ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: container_cli
ignore_missing_file: true
register: container_cli
- name: Set uc_container_cli for the Undercloud
set_fact:
uc_container_cli: "{{ container_cli.value|default('podman', true) }}"
when: uc_container_cli is not defined
- name: Set uc_container_cli for the Undercloud
set_fact:
uc_container_cli: "{{ container_cli.value|default('podman', true) }}"
when: uc_container_cli is not defined
- name: Get failed containers for podman
changed_when: false
become: True
become: true
command: >
{% if oc_container_cli is defined %}{{ oc_container_cli }}{% else %}{{ uc_container_cli }}{% endif %}
{% raw %}

4
roles/containerized-undercloud-docker/tasks/main.yml

@ -2,7 +2,7 @@
- name: gather docker facts
docker_facts:
container_filter: status=running
become: yes
become: true
- name: compare running containers to list
set_fact:
@ -25,6 +25,6 @@
state: started # Port should be open
delay: 0 # No wait before first check (sec)
timeout: 3 # Stop checking after timeout (sec)
ignore_errors: yes
ignore_errors: true
loop: "{{ open_ports }}"
when: ctlplane_ip is defined

2
roles/controller-token/molecule/default/playbook.yml

@ -17,7 +17,7 @@
- name: Converge
hosts: all
gather_facts: no
gather_facts: false
tasks:
- name: pass validation

2
roles/controller-token/tasks/main.yml

@ -5,7 +5,7 @@
path: "{{ keystone_conf_file }}"
section: DEFAULT
key: admin_token
ignore_missing_file: True
ignore_missing_file: true
register: token_result
- name: Check if token value is disabled.

2
roles/controller-token/vars/main.yml

@ -5,4 +5,4 @@ metadata:
This validation checks that keystone admin token is disabled on both
undercloud and overcloud controller after deployment.
groups:
- post-deployment
- post-deployment

2
roles/controller-ulimits/molecule/default/playbook.yml

@ -17,7 +17,7 @@
- name: Converge
hosts: all
gather_facts: no
gather_facts: false
vars:
nofiles_min: 102400

4
roles/controller-ulimits/tasks/main.yml

@ -4,7 +4,7 @@
# NOTE: `ulimit` is a shell builtin so we have to invoke it like this:
command: sh -c "ulimit -n"
register: nofilesval
changed_when: False
changed_when: false
- name: Check nofiles limit
fail:
@ -18,7 +18,7 @@
# NOTE: `ulimit` is a shell builtin so we have to invoke it like this:
command: sh -c "ulimit -u"
register: nprocval
changed_when: False
changed_when: false
- name: Check nproc limit
fail:

2
roles/ctlplane-ip-range/molecule/default/playbook.yml

@ -17,7 +17,7 @@
- name: Converge
hosts: all
gather_facts: no
gather_facts: false
tasks:
- name: prepare directory tree for hiera

2
roles/ctlplane-ip-range/molecule/default/prepare.yml

@ -17,7 +17,7 @@
- name: Prepare
hosts: all
gather_facts: no
gather_facts: false
tasks:
- name: install hiera

4
roles/ctlplane-ip-range/tasks/main.yml

@ -10,7 +10,7 @@
path: "{{ tripleo_undercloud_conf_file }}"
section: ctlplane-subnet
key: dhcp_start
ignore_missing_file: True
ignore_missing_file: true
default: "192.0.2.5"
register: dhcp_start
@ -20,7 +20,7 @@
path: "{{ tripleo_undercloud_conf_file }}"
section: ctlplane-subnet
key: dhcp_end
ignore_missing_file: True
ignore_missing_file: true
default: "192.0.2.24"
register: dhcp_end

2
roles/default-node-count/vars/main.yml

@ -5,4 +5,4 @@ metadata:
This validation checks that the nodes and hypervisor statistics
add up.
groups:
- pre-deployment
- pre-deployment

6
roles/dhcp-validations/tasks/dhcp-introspection.yaml

@ -1,6 +1,6 @@
---
- name: Look up the introspection interface
become: True
become: true
validations_read_ini:
path: "{{ ironic_inspector_conf }}"
section: iptables
@ -8,7 +8,7 @@
register: interface
- name: Look up the introspection interface from the deprecated option
become: True
become: true
validations_read_ini:
path: "{{ ironic_inspector_conf }}"
section: firewall
@ -17,4 +17,4 @@
- name: Look for rogue DHCP servers
script: files/rogue_dhcp.py {{ interface.value or interface_deprecated.value or 'br-ctlplane' }}
changed_when: False
changed_when: false

2
roles/dhcp-validations/tasks/dhcp-provisioning.yaml

@ -7,7 +7,7 @@
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: local_interface
ignore_missing_file: True
ignore_missing_file: true
register: local_interface
- name: Look for DHCP responses

2
roles/dns/tasks/main.yml

@ -1,4 +1,4 @@
---
- name: Ensure DNS resolution works
command: "getent hosts {{ server_to_lookup }}"
changed_when: False
changed_when: false

2
roles/haproxy/molecule/default/playbook.yml

@ -17,7 +17,7 @@
- name: Converge
hosts: all
gather_facts: no
gather_facts: false
vars:
haproxy_config_file: /haproxy.cfg

4
roles/healthcheck-service-status/tasks/main.yml

@ -2,7 +2,7 @@
- name: Get the healthcheck services list enabled on node
shell: >
systemctl list-unit-files | grep "^tripleo.*healthcheck.*enabled" | awk -F'.' '{print $1}'
changed_when: False
changed_when: false
register: healthcheck_services_list
when: inflight_healthcheck_services | length < 1
@ -23,7 +23,7 @@
until:
- systemd_healthcheck_state.status.ExecMainPID != '0'
- systemd_healthcheck_state.status.ActiveState in ['inactive', 'failed']
ignore_errors: True
ignore_errors: true
register: systemd_healthcheck_state
with_items: "{{ hc_services }}"

4
roles/image-serve/molecule/default/molecule.yml

@ -8,7 +8,7 @@ platforms:
- name: centos7
hostname: centos7
image: centos:7
override_command: True
override_command: true
command: python -m SimpleHTTPServer 8787
pkg_extras: python-setuptools python-enum34 python-netaddr epel-release ruby PyYAML
easy_install:
@ -20,7 +20,7 @@ platforms:
- name: fedora28
hostname: fedora28
image: fedora:28
override_command: True
override_command: true
command: python3 -m http.server 8787
pkg_extras: python*-setuptools python*-enum python*-netaddr ruby PyYAML
environment:

4
roles/image-serve/molecule/default/playbook.yml

@ -17,7 +17,7 @@
- name: Converge
hosts: all
gather_facts: no
gather_facts: false
tasks:
- name: detect wrong port
@ -39,7 +39,7 @@
block:
- name: run validation for 404
include_role:
name: image-serve
name: image-serve
rescue:
- name: Clear host errors
meta: clear_host_errors

2
roles/image-serve/molecule/default/prepare.yml

@ -17,7 +17,7 @@
- name: Prepare
hosts: all
gather_facts: no
gather_facts: false
tasks:
- name: install hiera

2
roles/image-serve/tasks/main.yaml

@ -10,7 +10,7 @@
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: local_ip
ignore_missing_file: True
ignore_missing_file: true
register: local_ip
- name: Set container registry host

1
roles/image-serve/vars/main.yml

@ -1,3 +1,4 @@
---
metadata:
name: Image-serve availability
description: Verify that image-serve service is ready

2
roles/mysql-open-files-limit/tasks/main.yml

@ -9,7 +9,7 @@
"{{ container_cli|default('podman', true) }}" exec -u root
$("{{ container_cli|default('podman', true) }}" ps -q --filter "name=mysql|galera-bundle" | head -1)
/bin/bash -c 'ulimit -n'
changed_when: False
changed_when: false
register: mysqld_open_files_limit
- name: Test the open-files-limit value

34
roles/neutron-sanity-check/tasks/main.yml

@ -7,24 +7,24 @@
- when: "'Undercloud' in group_names"
block:
- name: Get the path of tripleo undercloud config file
become: true
hiera:
name: "tripleo_undercloud_conf_file"
- name: Get the path of tripleo undercloud config file
become: true
hiera:
name: "tripleo_undercloud_conf_file"
- name: Get the Container CLI from the undercloud.conf file
become: true
validations_read_ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: container_cli
ignore_missing_file: true
register: container_cli
- name: Get the Container CLI from the undercloud.conf file
become: true
validations_read_ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: container_cli
ignore_missing_file: true
register: container_cli
- name: Set uc_container_cli and container_name for the Undercloud
set_fact:
uc_container_cli: "{{ container_cli.value|default('podman', true) }}"
container_name: "neutron_ovs_agent"
- name: Set uc_container_cli and container_name for the Undercloud
set_fact:
uc_container_cli: "{{ container_cli.value|default('podman', true) }}"
container_name: "neutron_ovs_agent"
- name: Run neutron-sanity-check
command: >
@ -35,7 +35,7 @@
become: true
register: nsc_return
ignore_errors: true
changed_when: False
changed_when: false
- name: Detect errors
set_fact:

4
roles/no-op-firewall-nova-driver/molecule/default/playbook.yml

@ -17,7 +17,7 @@
- name: Converge
hosts: all
gather_facts: no
gather_facts: false
vars:
nova_conf_path: "/nova.conf"
@ -48,7 +48,7 @@
section: DEFAULT
option: firewall_driver
value: CHANGEME
backup: yes
backup: true
- include_role:
name: no-op-firewall-nova-driver

2
roles/no-op-firewall-nova-driver/vars/main.yml

@ -5,4 +5,4 @@ metadata:
When using Neutron, the `firewall_driver` option in Nova must be set to
`NoopFirewallDriver`.
groups:
- post-deployment
- post-deployment

2
roles/node-health/tasks/main.yml

@ -6,7 +6,7 @@
- name: Ping all overcloud nodes
icmp_ping:
host: "{{ item }}"
host: "{{ item }}"
with_items: "{{ oc_ips.results | map(attribute='ansible_facts.ansible_host') | list }}"
ignore_errors: true
register: ping_results

14
roles/nova-event-callback/tasks/main.yml

@ -1,6 +1,6 @@
---
- name: Get VIF Plugging setting values from nova.conf
become: True
become: true
validations_read_ini:
path: "{{ nova_config_file }}"
section: DEFAULT
@ -21,14 +21,14 @@
with_items: "{{ nova_config_result.results }}"
- name: Get auth_url value from hiera
become: True
become: true
command: hiera -c /etc/puppet/hiera.yaml neutron::server::notifications::auth_url
ignore_errors: True
changed_when: False
ignore_errors: true
changed_when: false
register: auth_url
- name: Get auth_url value from neutron.conf
become: True
become: true
validations_read_ini:
path: "{{ neutron_config_file }}"
section: nova
@ -45,7 +45,7 @@
failed_when: "neutron_auth_url_result.value != auth_url.stdout"
- name: Get Notify Nova settings values from neutron.conf
become: True
become: true
validations_read_ini:
path: "{{ neutron_config_file }}"
section: DEFAULT
@ -63,7 +63,7 @@
with_items: "{{ neutron_notify_nova_result.results }}"
- name: Get Tenant Name setting value from neutron.conf
become: True
become: true
validations_read_ini:
path: "{{ neutron_config_file }}"
section: nova

2
roles/nova-status/molecule/default/playbook.yml

@ -17,7 +17,7 @@
- name: Converge
hosts: all
gather_facts: no
gather_facts: false
tasks:
- name: working detection

2
roles/nova-status/molecule/default/prepare.yml

@ -17,7 +17,7 @@
- name: Prepare
hosts: all
gather_facts: no
gather_facts: false
tasks:
- name: Populate successful podman CLI

2
roles/nova-status/tasks/main.yml

@ -7,7 +7,7 @@
- name: Check nova upgrade status
become: true
command: "{{ container_cli }} exec -u root nova_api nova-status upgrade check"
changed_when: False
changed_when: false
register: nova_upgrade_check
- name: Warn if at least one check encountered an issue

26
roles/ntp/tasks/main.yml

@ -1,26 +1,26 @@
---
- name: Get if chrony is enabled
become: True
become: true
hiera:
name: "chrony_enabled"
- when: chrony_enabled|bool
block:
- name: Populate service facts
service_facts: # needed to make yaml happy
- name: Populate service facts
service_facts: # needed to make yaml happy
- name: Fail if chronyd service is not running
fail:
msg: "Chronyd service is not running"
when: "ansible_facts.services['chronyd.service'].state != 'running'"
- name: Fail if chronyd service is not running
fail:
msg: "Chronyd service is not running"
when: "ansible_facts.services['chronyd.service'].state != 'running'"
- name: Run chronyc
become: True
command: chronyc -a 'burst 4/4'
changed_when: False
- name: Run chronyc
become: true
command: chronyc -a 'burst 4/4'
changed_when: false
# ntpstat returns 0 if synchronised and non-zero otherwise:
- name: Run ntpstat
# ntpstat returns 0 if synchronised and non-zero otherwise:
command: ntpstat
changed_when: False
changed_when: false
when: not chrony_enabled|bool

2
roles/ntp/vars/main.yml

@ -7,4 +7,4 @@ metadata:
The deployment should configure and run chronyd. This validation verifies
that it is indeed running and connected to an NTP server on all nodes.
groups:
- post-deployment
- post-deployment

4
roles/openshift-on-openstack/defaults/main.yml

@ -9,5 +9,5 @@ min_node_ram_testing: 4096 # Minimum ram per node for testing
min_node_disk_testing: 40 # Minimum disk per node for testing
min_node_ram_prod: 16384 # Minimum ram per node for production
min_node_disk_prod: 42 # Minimum disk per node for production
resource_reqs_testing: False
resource_reqs_prod: False
resource_reqs_testing: false
resource_reqs_prod: false

10
roles/openshift-on-openstack/tasks/openshift-hw-requirements.yaml

@ -23,7 +23,7 @@
domain:
id: default
password: "{{ overcloud_admin_password }}"
return_content: yes
return_content: true
status_code: 201
register: keystone_result
no_log: true
@ -53,7 +53,7 @@
headers:
X-Auth-Token: "{{ auth_token }}"
Accept: application/vnd.openstack.compute.v2.1+json
return_content: yes
return_content: true
follow_redirects: all
register: flavors_result_testing
@ -64,7 +64,7 @@
headers:
X-Auth-Token: "{{ auth_token }}"
Accept: application/vnd.openstack.compute.v2.1+json
return_content: yes
return_content: true
follow_redirects: all
register: flavors_result_prod
@ -89,7 +89,7 @@
headers:
X-Auth-Token: "{{ auth_token }}"
Accept: application/vnd.openstack.compute.v2.1+json
return_content: yes
return_content: true
follow_redirects: all
register: hypervisors_result
@ -116,7 +116,7 @@
method: GET
headers:
X-Auth-Token: "{{ auth_token }}"
return_content: yes
return_content: true
follow_redirects: all
register: images

106
roles/openshift-on-openstack/tasks/openshift-nw-requirements.yaml

@ -1,7 +1,7 @@
---
- name: Set fact to identify if the overcloud was deployed
set_fact:
overcloud_deployed: "{{ groups['overcloud'] is defined }}"