Browse Source

Validate the playboooks metadata structure

This patch add a custom ansible-lint rule to enforce the structure of
the validations playbooks:

*ValidationHasMetadataRule*:
Throw an ansible-lint error if:
- the *hosts* key is empty or not found,
- *vars* dictionary is missing,
- *metadata* dict is missing in *vars*
- *name*/*description*/*groups* keys are missing or found with a wrong
  data type
- the validation belongs to one or several groups NOT in the official list of
  groups (groups.yaml)

*YAMLLINT*:
- Enable yamllint check in tox linters
- WIP Fix detected yamllint errors

Change-Id: If233286aa9f4299f02f13dc34f1e8c05d89df851
Signed-off-by: Gael Chamoulaud <gchamoul@redhat.com>
(cherry picked from commit e50e1a067d)
Signed-off-by: Gael Chamoulaud <gchamoul@redhat.com>
changes/27/702627/3
Gael Chamoulaud Gael Chamoulaud (Strider) 2 months ago
parent
commit
02ebd6b335
No known key found for this signature in database GPG Key ID: 4119D0305C651D66
100 changed files with 511 additions and 387 deletions
  1. +2
    -0
      .ansible-lint
  2. +138
    -0
      .ansible-lint_rules/ValidationHasMetadataRule.py
  3. +11
    -0
      .yamllint
  4. +1
    -1
      playbooks/ceph-dependencies-installed.yaml
  5. +1
    -1
      playbooks/check-latest-packages-version.yaml
  6. +2
    -2
      playbooks/collect-flavors-and-verify-profiles.yaml
  7. +1
    -1
      playbooks/controller-token.yaml
  8. +1
    -1
      playbooks/controller-ulimits.yaml
  9. +1
    -1
      playbooks/default-node-count.yaml
  10. +6
    -6
      playbooks/neutron-sanity-check.yaml
  11. +1
    -1
      playbooks/no-op-firewall-nova-driver.yaml
  12. +1
    -1
      playbooks/ntp.yaml
  13. +3
    -3
      playbooks/openshift-hw-requirements.yaml
  14. +1
    -1
      playbooks/openshift-nw-requirements.yaml
  15. +3
    -3
      playbooks/openstack-endpoints.yaml
  16. +1
    -1
      playbooks/repos.yaml
  17. +2
    -1
      playbooks/stonith-exists.yaml
  18. +1
    -1
      playbooks/tls-everywhere-post-deployment.yaml
  19. +1
    -1
      playbooks/tls-everywhere-pre-deployment.yaml
  20. +1
    -1
      playbooks/tls-everywhere-prep.yaml
  21. +1
    -1
      playbooks/undercloud-cpu.yaml
  22. +3
    -3
      playbooks/undercloud-disk-space-pre-upgrade.yaml
  23. +5
    -5
      playbooks/undercloud-disk-space.yaml
  24. +2
    -2
      playbooks/undercloud-heat-purge-deleted.yaml
  25. +7
    -7
      playbooks/undercloud-neutron-sanity-check.yaml
  26. +1
    -1
      playbooks/undercloud-ram.yaml
  27. +1
    -1
      playbooks/undercloud-selinux-mode.yaml
  28. +1
    -1
      playbooks/undercloud-tokenflush.yaml
  29. +1
    -1
      roles/advanced-format-512e-support/tasks/main.yml
  30. +0
    -1
      roles/ceph/defaults/main.yml
  31. +4
    -5
      roles/ceph/tasks/ceph-ansible-installed.yaml
  32. +51
    -50
      roles/ceph/tasks/ceph-health.yaml
  33. +1
    -1
      roles/check-latest-packages-version/molecule/default/prepare.yml
  34. +1
    -1
      roles/check-network-gateway/molecule/default/playbook.yml
  35. +1
    -1
      roles/check-network-gateway/molecule/default/prepare.yml
  36. +4
    -4
      roles/check-network-gateway/tasks/main.yml
  37. +2
    -2
      roles/collect-flavors-and-verify-profiles/vars/main.yml
  38. +18
    -18
      roles/container-status/tasks/main.yaml
  39. +2
    -2
      roles/containerized-undercloud-docker/tasks/main.yml
  40. +1
    -1
      roles/controller-token/molecule/default/playbook.yml
  41. +1
    -1
      roles/controller-token/tasks/main.yml
  42. +1
    -1
      roles/controller-token/vars/main.yml
  43. +1
    -1
      roles/controller-ulimits/molecule/default/playbook.yml
  44. +2
    -2
      roles/controller-ulimits/tasks/main.yml
  45. +1
    -1
      roles/ctlplane-ip-range/molecule/default/playbook.yml
  46. +1
    -1
      roles/ctlplane-ip-range/molecule/default/prepare.yml
  47. +2
    -2
      roles/ctlplane-ip-range/tasks/main.yml
  48. +1
    -1
      roles/default-node-count/vars/main.yml
  49. +3
    -3
      roles/dhcp-validations/tasks/dhcp-introspection.yaml
  50. +1
    -1
      roles/dhcp-validations/tasks/dhcp-provisioning.yaml
  51. +1
    -1
      roles/dns/tasks/main.yml
  52. +1
    -1
      roles/haproxy/molecule/default/playbook.yml
  53. +2
    -2
      roles/healthcheck-service-status/tasks/main.yml
  54. +2
    -2
      roles/image-serve/molecule/default/molecule.yml
  55. +2
    -2
      roles/image-serve/molecule/default/playbook.yml
  56. +1
    -1
      roles/image-serve/molecule/default/prepare.yml
  57. +1
    -1
      roles/image-serve/tasks/main.yaml
  58. +1
    -0
      roles/image-serve/vars/main.yml
  59. +1
    -1
      roles/mysql-open-files-limit/tasks/main.yml
  60. +17
    -17
      roles/neutron-sanity-check/tasks/main.yml
  61. +2
    -2
      roles/no-op-firewall-nova-driver/molecule/default/playbook.yml
  62. +1
    -1
      roles/no-op-firewall-nova-driver/vars/main.yml
  63. +1
    -1
      roles/node-health/tasks/main.yml
  64. +7
    -7
      roles/nova-event-callback/tasks/main.yml
  65. +1
    -1
      roles/nova-status/molecule/default/playbook.yml
  66. +1
    -1
      roles/nova-status/molecule/default/prepare.yml
  67. +1
    -1
      roles/nova-status/tasks/main.yml
  68. +13
    -13
      roles/ntp/tasks/main.yml
  69. +1
    -1
      roles/ntp/vars/main.yml
  70. +2
    -2
      roles/openshift-on-openstack/defaults/main.yml
  71. +5
    -5
      roles/openshift-on-openstack/tasks/openshift-hw-requirements.yaml
  72. +53
    -53
      roles/openshift-on-openstack/tasks/openshift-nw-requirements.yaml
  73. +3
    -3
      roles/openstack-endpoints/tasks/main.yml
  74. +3
    -3
      roles/ovs-dpdk-pmd/tasks/main.yml
  75. +4
    -4
      roles/pacemaker-status/tasks/main.yml
  76. +1
    -1
      roles/rabbitmq-limits/molecule/default/playbook.yml
  77. +1
    -1
      roles/rabbitmq-limits/molecule/default/prepare.yml
  78. +2
    -2
      roles/repos/molecule/default/playbook.yml
  79. +5
    -5
      roles/repos/tasks/main.yml
  80. +0
    -27
      roles/service-status/meta/main.yml
  81. +1
    -1
      roles/service-status/tasks/main.yaml
  82. +1
    -1
      roles/stonith-exists/molecule/default/prepare.yml
  83. +2
    -2
      roles/stonith-exists/tasks/main.yml
  84. +2
    -1
      roles/stonith-exists/vars/main.yml
  85. +5
    -5
      roles/tls-everywhere/tasks/common.yaml
  86. +3
    -3
      roles/tls-everywhere/tasks/overcloud-post-deployment.yaml
  87. +8
    -8
      roles/tls-everywhere/tasks/pre-deployment-containerized.yaml
  88. +6
    -7
      roles/tls-everywhere/tasks/pre-deployment-non-containerized.yaml
  89. +17
    -17
      roles/tls-everywhere/tasks/pre-deployment.yaml
  90. +8
    -8
      roles/tls-everywhere/tasks/prep.yaml
  91. +1
    -1
      roles/undercloud-debug/defaults/main.yml
  92. +3
    -3
      roles/undercloud-debug/molecule/default/playbook.yml
  93. +2
    -2
      roles/undercloud-debug/tasks/main.yml
  94. +5
    -6
      roles/undercloud-disk-space/defaults/main.yml
  95. +4
    -4
      roles/undercloud-disk-space/tasks/main.yml
  96. +2
    -2
      roles/undercloud-heat-purge-deleted/molecule/default/molecule.yml
  97. +1
    -1
      roles/undercloud-heat-purge-deleted/molecule/default/playbook.yml
  98. +1
    -1
      roles/undercloud-heat-purge-deleted/molecule/default/prepare.yml
  99. +1
    -1
      roles/undercloud-heat-purge-deleted/tasks/main.yml
  100. +2
    -2
      roles/undercloud-heat-purge-deleted/vars/main.yaml

+ 2
- 0
.ansible-lint View File

@@ -2,6 +2,8 @@ exclude_paths:
- releasenotes/
parseable: true
quiet: false
rulesdir:
- .ansible-lint_rules/
skip_list:
# Lines should be no longer than 120 chars.
- '204'


+ 138
- 0
.ansible-lint_rules/ValidationHasMetadataRule.py View File

@@ -0,0 +1,138 @@
import os
import six
import yaml

from ansiblelint import AnsibleLintRule


class ValidationHasMetadataRule(AnsibleLintRule):
id = '750'
shortdesc = 'Validation playbook must have mandatory metadata'

info = """
---
- hosts: localhost
vars:
metadata:
name: Validation Name
description: >
A full description of the validation.
groups:
- group1
- group2
- group3
"""

description = (
"The Validation playbook must have mandatory metadata:\n"
"```{}```".format(info)
)

severity = 'HIGH'
tags = ['metadata']

no_vars_found = "The validation playbook must contain a 'vars' dictionary"
no_meta_found = (
"The validation playbook must contain "
"a 'metadata' dictionary under vars"
)
no_groups_found = \
"*metadata* should contain a list of group (groups)"

unknown_groups_found = (
"Unkown group(s) '{}' found! "
"The official list of groups are '{}'. "
"To add a new validation group, please add it in the groups.yaml "
"file at the root of the tripleo-validations project."
)

def get_groups(self):
"""Returns a list of group names supported by
tripleo-validations by reading 'groups.yaml'
file located in the base direcotry.
"""
results = []

grp_file_path = os.path.abspath('groups.yaml')

with open(grp_file_path, "r") as grps:
contents = yaml.safe_load(grps)

for grp_name, grp_desc in sorted(contents.items()):
results.append(grp_name)

return results

def matchplay(self, file, data):
results = []
path = file['path']

if file['type'] == 'playbook':
if path.startswith("playbooks/") or \
path.find("tripleo-validations/playbooks/") > 0:

# *hosts* line check
hosts = data.get('hosts', None)
if not hosts:
return [({
path: data
}, "No *hosts* key found in the playbook")]

# *vars* lines check
vars = data.get('vars', None)
if not vars:
return [({
path: data
}, self.no_vars_found)]
else:
if not isinstance(vars, dict):
return [({path: data}, '*vars* should be a dictionary')]

# *metadata* lines check
metadata = data['vars'].get('metadata', None)
if metadata:
if not isinstance(metadata, dict):
return [(
{path: data},
'*metadata* should be a dictionary')]
else:
return [({path: data}, self.no_meta_found)]

# *metadata>[name|description] lines check
for info in ['name', 'description']:
if not metadata.get(info, None):
results.append((
{path: data},
'*metadata* should contain a %s key' % info))
continue
if not isinstance(metadata.get(info),
six.string_types):
results.append((
{path: data},
'*%s* should be a string' % info))

# *metadata>groups* lines check
if not metadata.get('groups', None):
results.append((
{path: data},
self.no_groups_found))
else:
if not isinstance(metadata.get('groups'), list):
results.append((
{path: data},
'*groups* should be a list'))
else:
groups = metadata.get('groups')
group_list = self.get_groups()
unknown_groups_list = list(
set(groups) - set(group_list))
if unknown_groups_list:
results.append((
{path: data},
self.unknown_groups_found.format(
unknown_groups_list,
group_list)
))
return results

return results

+ 11
- 0
.yamllint View File

@@ -0,0 +1,11 @@
---
extends: default

rules:
line-length:
# matches hardcoded 160 value from ansible-lint
max: 160

ignore: |
zuul.d/*.yaml
releasenotes/notes/*.yaml

+ 1
- 1
playbooks/ceph-dependencies-installed.yaml View File

@@ -10,7 +10,7 @@
fail_without_deps: true
tripleo_delegate_to: "{{ groups['overcloud'] | default([]) }}"
packages:
- lvm2
- lvm2
tasks:
- include_role:
name: ceph


+ 1
- 1
playbooks/check-latest-packages-version.yaml View File

@@ -1,6 +1,6 @@
---
- hosts: undercloud
gather_facts: yes
gather_facts: true
vars:
metadata:
name: Check if latest version of packages is installed


+ 2
- 2
playbooks/collect-flavors-and-verify-profiles.yaml View File

@@ -7,7 +7,7 @@
This validation checks the flavors assigned to roles exist and have the
correct capabilities set.
groups:
- pre-deployment
- pre-upgrade
- pre-deployment
- pre-upgrade
roles:
- collect-flavors-and-verify-profiles

+ 1
- 1
playbooks/controller-token.yaml View File

@@ -7,7 +7,7 @@
This validation checks that keystone admin token is disabled on both
undercloud and overcloud controller after deployment.
groups:
- post-deployment
- post-deployment
keystone_conf_file: "/var/lib/config-data/puppet-generated/keystone/etc/keystone/keystone.conf"
roles:
- controller-token

+ 1
- 1
playbooks/controller-ulimits.yaml View File

@@ -6,7 +6,7 @@
description: >
This will check the ulimits of each controller.
groups:
- post-deployment
- post-deployment
nofiles_min: 1024
nproc_min: 2048
roles:


+ 1
- 1
playbooks/default-node-count.yaml View File

@@ -7,6 +7,6 @@
This validation checks that the nodes and hypervisor statistics
add up.
groups:
- pre-deployment
- pre-deployment
roles:
- default-node-count

+ 6
- 6
playbooks/neutron-sanity-check.yaml View File

@@ -17,12 +17,12 @@
# will be passed to the Neutron services. The order is important
# here: the values in later files take precedence.
configs:
- /etc/neutron/neutron.conf
- /usr/share/neutron/neutron-dist.conf
- /etc/neutron/metadata_agent.ini
- /etc/neutron/dhcp_agent.ini
- /etc/neutron/fwaas_driver.ini
- /etc/neutron/l3_agent.ini
- /etc/neutron/neutron.conf
- /usr/share/neutron/neutron-dist.conf
- /etc/neutron/metadata_agent.ini
- /etc/neutron/dhcp_agent.ini
- /etc/neutron/fwaas_driver.ini
- /etc/neutron/l3_agent.ini

roles:
- neutron-sanity-check

+ 1
- 1
playbooks/no-op-firewall-nova-driver.yaml View File

@@ -7,7 +7,7 @@
When using Neutron, the `firewall_driver` option in Nova must be set to
`NoopFirewallDriver`.
groups:
- post-deployment
- post-deployment
nova_conf_path: "/var/lib/config-data/puppet-generated/nova_libvirt/etc/nova/nova.conf"
roles:
- no-op-firewall-nova-driver

+ 1
- 1
playbooks/ntp.yaml View File

@@ -9,6 +9,6 @@
The deployment should configure and run chronyd. This validation verifies
that it is indeed running and connected to an NTP server on all nodes.
groups:
- post-deployment
- post-deployment
roles:
- ntp

+ 3
- 3
playbooks/openshift-hw-requirements.yaml View File

@@ -12,7 +12,7 @@
- Are images named centos or rhel available?
- Are there sufficient compute resources available for a default setup? (1 Master node, 1 Infra node, 2 App nodes)
groups:
- openshift-on-openstack
- openshift-on-openstack
min_total_ram_testing: 16384 # 4 per node
min_total_vcpus_testing: 4 # 1 per node
min_total_disk_testing: 93 # Master: 40, others: 17 per node
@@ -23,8 +23,8 @@
min_node_disk_testing: 40 # Minimum disk per node for testing
min_node_ram_prod: 16384 # Minimum ram per node for production
min_node_disk_prod: 42 # Minimum disk per node for production
resource_reqs_testing: False
resource_reqs_prod: False
resource_reqs_testing: false
resource_reqs_prod: false
tasks:
- include_role:
name: openshift-on-openstack


+ 1
- 1
playbooks/openshift-nw-requirements.yaml View File

@@ -7,7 +7,7 @@
Checks if an external network has been configured on the overcloud as
required for an OpenShift deployment on top of OpenStack.
groups:
- openshift-on-openstack
- openshift-on-openstack
tasks:
- include_role:
name: openshift-on-openstack


+ 3
- 3
playbooks/openstack-endpoints.yaml View File

@@ -8,8 +8,8 @@
This validation gets the PublicVip address from the deployment and
tries to access Horizon and get a Keystone token.
groups:
- post-deployment
- pre-upgrade
- post-upgrade
- post-deployment
- pre-upgrade
- post-upgrade
roles:
- openstack-endpoints

+ 1
- 1
playbooks/repos.yaml View File

@@ -1,6 +1,6 @@
---
- hosts: undercloud, overcloud
gather_facts: yes
gather_facts: true
vars:
metadata:
name: Check correctness of current repositories


+ 2
- 1
playbooks/stonith-exists.yaml View File

@@ -7,7 +7,8 @@
Verify that stonith devices are configured for your OpenStack Platform HA cluster.
We don't configure stonith device with TripleO Installer. Because the hardware
configuration may be differ in each environment and requires different fence agents.
How to configure fencing please read https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes
How to configure fencing please read
https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes
groups:
- post-deployment
roles:


+ 1
- 1
playbooks/tls-everywhere-post-deployment.yaml View File

@@ -8,7 +8,7 @@
and that all certs being tracked by certmonger are in the
MONITORING state.
groups:
- post-deployment
- post-deployment
tasks:
- include_role:
name: tls-everywhere


+ 1
- 1
playbooks/tls-everywhere-pre-deployment.yaml View File

@@ -7,7 +7,7 @@
Checks that the undercloud has novajoin set up corectly and
that we are ready to do the overcloud deploy with tls-everywhere.
groups:
- pre-deployment
- pre-deployment
tasks:
- include_role:
name: tls-everywhere


+ 1
- 1
playbooks/tls-everywhere-prep.yaml View File

@@ -7,7 +7,7 @@
Checks that the undercloud is ready to set up novajoin and
to register to IdM as a client as part of undercloud-install.
groups:
- prep
- prep
tasks:
- include_role:
name: tls-everywhere


+ 1
- 1
playbooks/undercloud-cpu.yaml View File

@@ -1,6 +1,6 @@
---
- hosts: undercloud
gather_facts: yes
gather_facts: true
vars:
metadata:
name: Verify undercloud fits the CPU core requirements


+ 3
- 3
playbooks/undercloud-disk-space-pre-upgrade.yaml View File

@@ -11,10 +11,10 @@
groups:
- pre-upgrade
volumes:
- {mount: /var/lib/docker, min_size: 10}
- {mount: /var/lib/docker, min_size: 10}
- {mount: /var/lib/config-data, min_size: 3}
- {mount: /var, min_size: 16}
- {mount: /, min_size: 20}
- {mount: /var, min_size: 16}
- {mount: /, min_size: 20}

roles:
- undercloud-disk-space

+ 5
- 5
playbooks/undercloud-disk-space.yaml View File

@@ -12,12 +12,12 @@
- prep
- pre-introspection
volumes:
- {mount: /var/lib/docker, min_size: 10}
- {mount: /var/lib/docker, min_size: 10}
- {mount: /var/lib/config-data, min_size: 3}
- {mount: /var/log, min_size: 3}
- {mount: /usr, min_size: 5}
- {mount: /var, min_size: 20}
- {mount: /, min_size: 25}
- {mount: /var/log, min_size: 3}
- {mount: /usr, min_size: 5}
- {mount: /var, min_size: 20}
- {mount: /, min_size: 25}

roles:
- undercloud-disk-space

+ 2
- 2
playbooks/undercloud-heat-purge-deleted.yaml View File

@@ -8,8 +8,8 @@
heat database can grow very large. This validation checks that
the purge_deleted crontab has been set up.
groups:
- pre-upgrade
- pre-deployment
- pre-upgrade
- pre-deployment
cron_check: "heat-manage purge_deleted"
roles:
- undercloud-heat-purge-deleted

+ 7
- 7
playbooks/undercloud-neutron-sanity-check.yaml View File

@@ -17,13 +17,13 @@
# will be passed to the Neutron services. The order is important
# here: the values in later files take precedence.
configs:
- /etc/neutron/neutron.conf
- /usr/share/neutron/neutron-dist.conf
- /etc/neutron/metadata_agent.ini
- /etc/neutron/dhcp_agent.ini
- /etc/neutron/plugins/ml2/openvswitch_agent.ini
- /etc/neutron/fwaas_driver.ini
- /etc/neutron/l3_agent.ini
- /etc/neutron/neutron.conf
- /usr/share/neutron/neutron-dist.conf
- /etc/neutron/metadata_agent.ini
- /etc/neutron/dhcp_agent.ini
- /etc/neutron/plugins/ml2/openvswitch_agent.ini
- /etc/neutron/fwaas_driver.ini
- /etc/neutron/l3_agent.ini

roles:
- neutron-sanity-check

+ 1
- 1
playbooks/undercloud-ram.yaml View File

@@ -1,6 +1,6 @@
---
- hosts: undercloud
gather_facts: yes
gather_facts: true
vars:
metadata:
name: Verify the undercloud fits the RAM requirements


+ 1
- 1
playbooks/undercloud-selinux-mode.yaml View File

@@ -1,6 +1,6 @@
---
- hosts: undercloud
gather_facts: yes
gather_facts: true
vars:
metadata:
name: Undercloud SELinux Enforcing Mode Check


+ 1
- 1
playbooks/undercloud-tokenflush.yaml View File

@@ -8,7 +8,7 @@
keystone database can grow very large. This validation checks that
the keystone token_flush crontab has been set up.
groups:
- pre-introspection
- pre-introspection
cron_check: "keystone-manage token_flush"
roles:
- undercloud-tokenflush

+ 1
- 1
roles/advanced-format-512e-support/tasks/main.yml View File

@@ -2,7 +2,7 @@
- name: List the available drives
register: drive_list
command: "ls /sys/class/block/"
changed_when: False
changed_when: false

- name: Detect whether the drive uses Advanced Format
advanced_format: drive={{ item }}


+ 0
- 1
roles/ceph/defaults/main.yml View File

@@ -4,4 +4,3 @@ fail_without_deps: false
fail_on_ceph_health_err: false
osd_percentage_min: 0
ceph_ansible_repo: "centos-ceph-nautilus"


+ 4
- 5
roles/ceph/tasks/ceph-ansible-installed.yaml View File

@@ -2,9 +2,9 @@
- name: Check if ceph-ansible is installed
shell: rpm -q ceph-ansible || true
args:
warn: no
changed_when: False
ignore_errors: True
warn: false
changed_when: false
ignore_errors: true
register: ceph_ansible_installed

- name: Warn about missing ceph-ansible
@@ -24,7 +24,7 @@
- name: Get ceph-ansible repository
shell: "yum info ceph-ansible | awk '/From repo/ {print $4}'"
register: repo
changed_when: False
changed_when: false

- name: Fail if ceph-ansible doesn't belong to the specified repo
fail:
@@ -32,4 +32,3 @@
when:
- (repo.stdout | length == 0 or repo.stdout != "{{ ceph_ansible_repo }}")
- fail_without_ceph_ansible|default(false)|bool


+ 51
- 50
roles/ceph/tasks/ceph-health.yaml View File

@@ -4,64 +4,65 @@
shell: hiera -c /etc/puppet/hiera.yaml enabled_services | egrep -sq ceph_mon
ignore_errors: true
register: ceph_mon_enabled
changed_when: False
changed_when: false

- when:
- ceph_mon_enabled is succeeded
- when: "ceph_mon_enabled is succeeded"
block:
- name: Set container_cli fact from the inventory
set_fact:
container_cli: "{{ hostvars[inventory_hostname].container_cli|default('podman') }}"
- name: Set container_cli fact from the inventory
set_fact:
container_cli: "{{ hostvars[inventory_hostname].container_cli|default('podman') }}"

- name: Set container filter format
set_fact:
container_filter_format: !unsafe "--format '{{ .Names }}'"
- name: Set container filter format
set_fact:
container_filter_format: !unsafe "--format '{{ .Names }}'"

- name: Set ceph_mon_container name
become: true
shell: "{{ container_cli }} ps {{ container_filter_format }} | grep ceph-mon"
register: ceph_mon_container
changed_when: False
- name: Set ceph_mon_container name
become: true
shell: "{{ container_cli }} ps {{ container_filter_format }} | grep ceph-mon"
register: ceph_mon_container
changed_when: false

- name: Set ceph cluster name
become: true
shell: find /etc/ceph -name '*.conf' -prune -print -quit | xargs basename -s '.conf'
register: ceph_cluster_name
changed_when: False
- name: Set ceph cluster name
become: true
shell: find /etc/ceph -name '*.conf' -prune -print -quit | xargs basename -s '.conf'
register: ceph_cluster_name
changed_when: false

- name: Get ceph health
become: true
shell: "{{ container_cli }} exec {{ ceph_mon_container.stdout }} ceph --cluster {{ ceph_cluster_name.stdout }} health | awk '{print $1}'"
register: ceph_health
- name: Get ceph health
become: true
shell: "{{ container_cli }} exec {{ ceph_mon_container.stdout }} ceph --cluster {{ ceph_cluster_name.stdout }} health | awk '{print $1}'"
register: ceph_health

- name: Check ceph health
warn:
msg: Ceph is in {{ ceph_health.stdout }} state.
when:
- ceph_health.stdout != 'HEALTH_OK'
- not fail_on_ceph_health_err|default(true)|bool
- name: Check ceph health
warn:
msg: Ceph is in {{ ceph_health.stdout }} state.
when:
- ceph_health.stdout != 'HEALTH_OK'
- not fail_on_ceph_health_err|default(true)|bool

- name: Fail if ceph health is HEALTH_ERR
fail:
msg: Ceph is in {{ ceph_health.stdout }} state.
when:
- ceph_health.stdout == 'HEALTH_ERR'
- fail_on_ceph_health_err|default(true)|bool
- name: Fail if ceph health is HEALTH_ERR
fail:
msg: Ceph is in {{ ceph_health.stdout }} state.
when:
- ceph_health.stdout == 'HEALTH_ERR'
- fail_on_ceph_health_err|default(true)|bool

- when:
- osd_percentage_min|default(0) > 0
block:
- name: set jq osd percentage filter
set_fact:
jq_osd_percentage_filter: '( (.num_in_osds) / (.num_osds) ) * 100'
- when:
- osd_percentage_min|default(0) > 0
block:
- name: set jq osd percentage filter
set_fact:
jq_osd_percentage_filter: '( (.num_in_osds) / (.num_osds) ) * 100'

- name: Get OSD stat percentage
become: true
shell: "{{ container_cli }} exec {{ ceph_mon_container.stdout }} ceph --cluster {{ ceph_cluster_name.stdout }} osd stat -f json | jq '{{ jq_osd_percentage_filter }}'"
register: ceph_osd_in_percentage
- name: Get OSD stat percentage
become: true
shell: >-
"{{ container_cli }}" exec "{{ ceph_mon_container.stdout }}" ceph
--cluster "{{ ceph_cluster_name.stdout }}" osd stat -f json | jq '{{ jq_osd_percentage_filter }}'
register: ceph_osd_in_percentage

- name: Fail if there is an unacceptable percentage of in OSDs
fail:
msg: "Only {{ ceph_osd_in_percentage.stdout|float }}% of OSDs are in, but {{ osd_percentage_min|default(0) }}% are required"
when:
- ceph_osd_in_percentage.stdout|float < osd_percentage_min|default(0)
- name: Fail if there is an unacceptable percentage of in OSDs
fail:
msg: "Only {{ ceph_osd_in_percentage.stdout|float }}% of OSDs are in, but {{ osd_percentage_min|default(0) }}% are required"
when:
- ceph_osd_in_percentage.stdout|float < osd_percentage_min|default(0)

+ 1
- 1
roles/check-latest-packages-version/molecule/default/prepare.yml View File

@@ -17,7 +17,7 @@

- name: Prepare
hosts: all
gather_facts: no
gather_facts: false

tasks:
- name: install patch rpm


+ 1
- 1
roles/check-network-gateway/molecule/default/playbook.yml View File

@@ -17,7 +17,7 @@

- name: Converge
hosts: all
gather_facts: no
gather_facts: false

tasks:
- name: successful check with ctlplane-subnet


+ 1
- 1
roles/check-network-gateway/molecule/default/prepare.yml View File

@@ -17,7 +17,7 @@

- name: Prepare
hosts: all
gather_facts: no
gather_facts: false

tasks:
- name: install hiera


+ 4
- 4
roles/check-network-gateway/tasks/main.yml View File

@@ -5,12 +5,12 @@
name: "tripleo_undercloud_conf_file"

- name: Get the local_subnet name from the undercloud_conf file
become: True
become: true
validations_read_ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: local_subnet
ignore_missing_file: True
ignore_missing_file: true
register: local_subnet

- name: Get gateway value from the undercloud.conf file
@@ -19,7 +19,7 @@
path: "{{ tripleo_undercloud_conf_file }}"
section: "{% if local_subnet.value %}{{ local_subnet.value }}{% else %}ctlplane-subnet{% endif %}"
key: gateway
ignore_missing_file: True
ignore_missing_file: true
register: gateway

- name: Get local_ip value from the undercloud.conf file
@@ -28,7 +28,7 @@
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: local_ip
ignore_missing_file: True
ignore_missing_file: true
register: local_ip

- name: Test network_gateway if different from local_ip


+ 2
- 2
roles/collect-flavors-and-verify-profiles/vars/main.yml View File

@@ -5,5 +5,5 @@ metadata:
This validation checks the flavors assigned to roles exist and have the
correct capabilities set.
groups:
- pre-deployment
- pre-upgrade
- pre-deployment
- pre-upgrade

+ 18
- 18
roles/container-status/tasks/main.yaml View File

@@ -8,29 +8,29 @@

- when: "'Undercloud' in group_names"
block:
- name: Set container_cli fact from undercloud.conf
block:
- name: Get the path of tripleo undercloud config file
become: true
hiera:
name: "tripleo_undercloud_conf_file"
- name: Set container_cli fact from undercloud.conf
block:
- name: Get the path of tripleo undercloud config file
become: true
hiera:
name: "tripleo_undercloud_conf_file"

- name: Get container client from undercloud.conf
validations_read_ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: container_cli
ignore_missing_file: true
register: container_cli
- name: Get container client from undercloud.conf
validations_read_ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: container_cli
ignore_missing_file: true
register: container_cli

- name: Set uc_container_cli for the Undercloud
set_fact:
uc_container_cli: "{{ container_cli.value|default('podman', true) }}"
when: uc_container_cli is not defined
- name: Set uc_container_cli for the Undercloud
set_fact:
uc_container_cli: "{{ container_cli.value|default('podman', true) }}"
when: uc_container_cli is not defined

- name: Get failed containers for podman
changed_when: false
become: True
become: true
command: >
{% if oc_container_cli is defined %}{{ oc_container_cli }}{% else %}{{ uc_container_cli }}{% endif %}
{% raw %}


+ 2
- 2
roles/containerized-undercloud-docker/tasks/main.yml View File

@@ -2,7 +2,7 @@
- name: gather docker facts
docker_facts:
container_filter: status=running
become: yes
become: true

- name: compare running containers to list
set_fact:
@@ -25,6 +25,6 @@
state: started # Port should be open
delay: 0 # No wait before first check (sec)
timeout: 3 # Stop checking after timeout (sec)
ignore_errors: yes
ignore_errors: true
loop: "{{ open_ports }}"
when: ctlplane_ip is defined

+ 1
- 1
roles/controller-token/molecule/default/playbook.yml View File

@@ -17,7 +17,7 @@

- name: Converge
hosts: all
gather_facts: no
gather_facts: false

tasks:
- name: pass validation


+ 1
- 1
roles/controller-token/tasks/main.yml View File

@@ -5,7 +5,7 @@
path: "{{ keystone_conf_file }}"
section: DEFAULT
key: admin_token
ignore_missing_file: True
ignore_missing_file: true
register: token_result

- name: Check if token value is disabled.


+ 1
- 1
roles/controller-token/vars/main.yml View File

@@ -5,4 +5,4 @@ metadata:
This validation checks that keystone admin token is disabled on both
undercloud and overcloud controller after deployment.
groups:
- post-deployment
- post-deployment

+ 1
- 1
roles/controller-ulimits/molecule/default/playbook.yml View File

@@ -17,7 +17,7 @@

- name: Converge
hosts: all
gather_facts: no
gather_facts: false

vars:
nofiles_min: 102400


+ 2
- 2
roles/controller-ulimits/tasks/main.yml View File

@@ -4,7 +4,7 @@
# NOTE: `ulimit` is a shell builtin so we have to invoke it like this:
command: sh -c "ulimit -n"
register: nofilesval
changed_when: False
changed_when: false

- name: Check nofiles limit
fail:
@@ -18,7 +18,7 @@
# NOTE: `ulimit` is a shell builtin so we have to invoke it like this:
command: sh -c "ulimit -u"
register: nprocval
changed_when: False
changed_when: false

- name: Check nproc limit
fail:


+ 1
- 1
roles/ctlplane-ip-range/molecule/default/playbook.yml View File

@@ -17,7 +17,7 @@

- name: Converge
hosts: all
gather_facts: no
gather_facts: false

tasks:
- name: prepare directory tree for hiera


+ 1
- 1
roles/ctlplane-ip-range/molecule/default/prepare.yml View File

@@ -17,7 +17,7 @@

- name: Prepare
hosts: all
gather_facts: no
gather_facts: false

tasks:
- name: install hiera


+ 2
- 2
roles/ctlplane-ip-range/tasks/main.yml View File

@@ -10,7 +10,7 @@
path: "{{ tripleo_undercloud_conf_file }}"
section: ctlplane-subnet
key: dhcp_start
ignore_missing_file: True
ignore_missing_file: true
default: "192.0.2.5"
register: dhcp_start

@@ -20,7 +20,7 @@
path: "{{ tripleo_undercloud_conf_file }}"
section: ctlplane-subnet
key: dhcp_end
ignore_missing_file: True
ignore_missing_file: true
default: "192.0.2.24"
register: dhcp_end



+ 1
- 1
roles/default-node-count/vars/main.yml View File

@@ -5,4 +5,4 @@ metadata:
This validation checks that the nodes and hypervisor statistics
add up.
groups:
- pre-deployment
- pre-deployment

+ 3
- 3
roles/dhcp-validations/tasks/dhcp-introspection.yaml View File

@@ -1,6 +1,6 @@
---
- name: Look up the introspection interface
become: True
become: true
validations_read_ini:
path: "{{ ironic_inspector_conf }}"
section: iptables
@@ -8,7 +8,7 @@
register: interface

- name: Look up the introspection interface from the deprecated option
become: True
become: true
validations_read_ini:
path: "{{ ironic_inspector_conf }}"
section: firewall
@@ -17,4 +17,4 @@

- name: Look for rogue DHCP servers
script: files/rogue_dhcp.py {{ interface.value or interface_deprecated.value or 'br-ctlplane' }}
changed_when: False
changed_when: false

+ 1
- 1
roles/dhcp-validations/tasks/dhcp-provisioning.yaml View File

@@ -7,7 +7,7 @@
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: local_interface
ignore_missing_file: True
ignore_missing_file: true
register: local_interface

- name: Look for DHCP responses


+ 1
- 1
roles/dns/tasks/main.yml View File

@@ -1,4 +1,4 @@
---
- name: Ensure DNS resolution works
command: "getent hosts {{ server_to_lookup }}"
changed_when: False
changed_when: false

+ 1
- 1
roles/haproxy/molecule/default/playbook.yml View File

@@ -17,7 +17,7 @@

- name: Converge
hosts: all
gather_facts: no
gather_facts: false

vars:
haproxy_config_file: /haproxy.cfg


+ 2
- 2
roles/healthcheck-service-status/tasks/main.yml View File

@@ -2,7 +2,7 @@
- name: Get the healthcheck services list enabled on node
shell: >
systemctl list-unit-files | grep "^tripleo.*healthcheck.*enabled" | awk -F'.' '{print $1}'
changed_when: False
changed_when: false
register: healthcheck_services_list
when: inflight_healthcheck_services | length < 1

@@ -23,7 +23,7 @@
until:
- systemd_healthcheck_state.status.ExecMainPID != '0'
- systemd_healthcheck_state.status.ActiveState in ['inactive', 'failed']
ignore_errors: True
ignore_errors: true
register: systemd_healthcheck_state
with_items: "{{ hc_services }}"



+ 2
- 2
roles/image-serve/molecule/default/molecule.yml View File

@@ -8,7 +8,7 @@ platforms:
- name: centos7
hostname: centos7
image: centos:7
override_command: True
override_command: true
command: python -m SimpleHTTPServer 8787
pkg_extras: python-setuptools python-enum34 python-netaddr epel-release ruby PyYAML
easy_install:
@@ -20,7 +20,7 @@ platforms:
- name: fedora28
hostname: fedora28
image: fedora:28
override_command: True
override_command: true
command: python3 -m http.server 8787
pkg_extras: python*-setuptools python*-enum python*-netaddr ruby PyYAML
environment:


+ 2
- 2
roles/image-serve/molecule/default/playbook.yml View File

@@ -17,7 +17,7 @@

- name: Converge
hosts: all
gather_facts: no
gather_facts: false

tasks:
- name: detect wrong port
@@ -39,7 +39,7 @@
block:
- name: run validation for 404
include_role:
name: image-serve
name: image-serve
rescue:
- name: Clear host errors
meta: clear_host_errors


+ 1
- 1
roles/image-serve/molecule/default/prepare.yml View File

@@ -17,7 +17,7 @@

- name: Prepare
hosts: all
gather_facts: no
gather_facts: false

tasks:
- name: install hiera


+ 1
- 1
roles/image-serve/tasks/main.yaml View File

@@ -10,7 +10,7 @@
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: local_ip
ignore_missing_file: True
ignore_missing_file: true
register: local_ip

- name: Set container registry host


+ 1
- 0
roles/image-serve/vars/main.yml View File

@@ -1,3 +1,4 @@
---
metadata:
name: Image-serve availability
description: Verify that image-serve service is ready


+ 1
- 1
roles/mysql-open-files-limit/tasks/main.yml View File

@@ -9,7 +9,7 @@
"{{ container_cli|default('podman', true) }}" exec -u root
$("{{ container_cli|default('podman', true) }}" ps -q --filter "name=mysql|galera-bundle" | head -1)
/bin/bash -c 'ulimit -n'
changed_when: False
changed_when: false
register: mysqld_open_files_limit

- name: Test the open-files-limit value


+ 17
- 17
roles/neutron-sanity-check/tasks/main.yml View File

@@ -7,24 +7,24 @@

- when: "'Undercloud' in group_names"
block:
- name: Get the path of tripleo undercloud config file
become: true
hiera:
name: "tripleo_undercloud_conf_file"
- name: Get the path of tripleo undercloud config file
become: true
hiera:
name: "tripleo_undercloud_conf_file"

- name: Get the Container CLI from the undercloud.conf file
become: true
validations_read_ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: container_cli
ignore_missing_file: true
register: container_cli
- name: Get the Container CLI from the undercloud.conf file
become: true
validations_read_ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: container_cli
ignore_missing_file: true
register: container_cli

- name: Set uc_container_cli and container_name for the Undercloud
set_fact:
uc_container_cli: "{{ container_cli.value|default('podman', true) }}"
container_name: "neutron_ovs_agent"
- name: Set uc_container_cli and container_name for the Undercloud
set_fact:
uc_container_cli: "{{ container_cli.value|default('podman', true) }}"
container_name: "neutron_ovs_agent"

- name: Run neutron-sanity-check
command: >
@@ -35,7 +35,7 @@
become: true
register: nsc_return
ignore_errors: true
changed_when: False
changed_when: false

- name: Detect errors
set_fact:


+ 2
- 2
roles/no-op-firewall-nova-driver/molecule/default/playbook.yml View File

@@ -17,7 +17,7 @@

- name: Converge
hosts: all
gather_facts: no
gather_facts: false

vars:
nova_conf_path: "/nova.conf"
@@ -48,7 +48,7 @@
section: DEFAULT
option: firewall_driver
value: CHANGEME
backup: yes
backup: true

- include_role:
name: no-op-firewall-nova-driver


+ 1
- 1
roles/no-op-firewall-nova-driver/vars/main.yml View File

@@ -5,4 +5,4 @@ metadata:
When using Neutron, the `firewall_driver` option in Nova must be set to
`NoopFirewallDriver`.
groups:
- post-deployment
- post-deployment

+ 1
- 1
roles/node-health/tasks/main.yml View File

@@ -6,7 +6,7 @@

- name: Ping all overcloud nodes
icmp_ping:
host: "{{ item }}"
host: "{{ item }}"
with_items: "{{ oc_ips.results | map(attribute='ansible_facts.ansible_host') | list }}"
ignore_errors: true
register: ping_results


+ 7
- 7
roles/nova-event-callback/tasks/main.yml View File

@@ -1,6 +1,6 @@
---
- name: Get VIF Plugging setting values from nova.conf
become: True
become: true
validations_read_ini:
path: "{{ nova_config_file }}"
section: DEFAULT
@@ -21,14 +21,14 @@
with_items: "{{ nova_config_result.results }}"

- name: Get auth_url value from hiera
become: True
become: true
command: hiera -c /etc/puppet/hiera.yaml neutron::server::notifications::auth_url
ignore_errors: True
changed_when: False
ignore_errors: true
changed_when: false
register: auth_url

- name: Get auth_url value from neutron.conf
become: True
become: true
validations_read_ini:
path: "{{ neutron_config_file }}"
section: nova
@@ -45,7 +45,7 @@
failed_when: "neutron_auth_url_result.value != auth_url.stdout"

- name: Get Notify Nova settings values from neutron.conf
become: True
become: true
validations_read_ini:
path: "{{ neutron_config_file }}"
section: DEFAULT
@@ -63,7 +63,7 @@
with_items: "{{ neutron_notify_nova_result.results }}"

- name: Get Tenant Name setting value from neutron.conf
become: True
become: true
validations_read_ini:
path: "{{ neutron_config_file }}"
section: nova


+ 1
- 1
roles/nova-status/molecule/default/playbook.yml View File

@@ -17,7 +17,7 @@

- name: Converge
hosts: all
gather_facts: no
gather_facts: false

tasks:
- name: working detection


+ 1
- 1
roles/nova-status/molecule/default/prepare.yml View File

@@ -17,7 +17,7 @@

- name: Prepare
hosts: all
gather_facts: no
gather_facts: false

tasks:
- name: Populate successful podman CLI


+ 1
- 1
roles/nova-status/tasks/main.yml View File

@@ -7,7 +7,7 @@
- name: Check nova upgrade status
become: true
command: "{{ container_cli }} exec -u root nova_api nova-status upgrade check"
changed_when: False
changed_when: false
register: nova_upgrade_check

- name: Warn if at least one check encountered an issue


+ 13
- 13
roles/ntp/tasks/main.yml View File

@@ -1,26 +1,26 @@
---
- name: Get if chrony is enabled
become: True
become: true
hiera:
name: "chrony_enabled"

- when: chrony_enabled|bool
block:
- name: Populate service facts
service_facts: # needed to make yaml happy
- name: Populate service facts
service_facts: # needed to make yaml happy

- name: Fail if chronyd service is not running
fail:
msg: "Chronyd service is not running"
when: "ansible_facts.services['chronyd.service'].state != 'running'"
- name: Fail if chronyd service is not running
fail:
msg: "Chronyd service is not running"
when: "ansible_facts.services['chronyd.service'].state != 'running'"

- name: Run chronyc
become: True
command: chronyc -a 'burst 4/4'
changed_when: False
- name: Run chronyc
become: true
command: chronyc -a 'burst 4/4'
changed_when: false

# ntpstat returns 0 if synchronised and non-zero otherwise:
- name: Run ntpstat
# ntpstat returns 0 if synchronised and non-zero otherwise:
command: ntpstat
changed_when: False
changed_when: false
when: not chrony_enabled|bool

+ 1
- 1
roles/ntp/vars/main.yml View File

@@ -7,4 +7,4 @@ metadata:
The deployment should configure and run chronyd. This validation verifies
that it is indeed running and connected to an NTP server on all nodes.
groups:
- post-deployment
- post-deployment

+ 2
- 2
roles/openshift-on-openstack/defaults/main.yml View File

@@ -9,5 +9,5 @@ min_node_ram_testing: 4096 # Minimum ram per node for testing
min_node_disk_testing: 40 # Minimum disk per node for testing
min_node_ram_prod: 16384 # Minimum ram per node for production
min_node_disk_prod: 42 # Minimum disk per node for production
resource_reqs_testing: False
resource_reqs_prod: False
resource_reqs_testing: false
resource_reqs_prod: false

+ 5
- 5
roles/openshift-on-openstack/tasks/openshift-hw-requirements.yaml View File

@@ -23,7 +23,7 @@
domain:
id: default
password: "{{ overcloud_admin_password }}"
return_content: yes
return_content: true
status_code: 201
register: keystone_result
no_log: true
@@ -53,7 +53,7 @@
headers:
X-Auth-Token: "{{ auth_token }}"
Accept: application/vnd.openstack.compute.v2.1+json
return_content: yes
return_content: true
follow_redirects: all
register: flavors_result_testing

@@ -64,7 +64,7 @@
headers:
X-Auth-Token: "{{ auth_token }}"
Accept: application/vnd.openstack.compute.v2.1+json
return_content: yes
return_content: true
follow_redirects: all
register: flavors_result_prod

@@ -89,7 +89,7 @@
headers:
X-Auth-Token: "{{ auth_token }}"
Accept: application/vnd.openstack.compute.v2.1+json
return_content: yes
return_content: true
follow_redirects: all
register: hypervisors_result

@@ -116,7 +116,7 @@
method: GET
headers:
X-Auth-Token: "{{ auth_token }}"
return_content: yes
return_content: true
follow_redirects: all
register: images



+ 53
- 53
roles/openshift-on-openstack/tasks/openshift-nw-requirements.yaml View File

@@ -1,7 +1,7 @@
---
- name: Set fact to identify if the overcloud was deployed
set_fact:
overcloud_deployed: "{{ groups['overcloud'] is defined }}"
overcloud_deployed: "{{ groups['overcloud'] is defined }}"

- name: Warn if no overcloud deployed yet
warn:
@@ -12,62 +12,62 @@

- when: overcloud_deployed|bool
block:
# Get auth token and service catalog from Keystone and extract service urls.
- name: Get token and catalog from Keystone
uri:
url: "{{ overcloud_keystone_url
| urlsplit('scheme') }}://{{ overcloud_keystone_url
| urlsplit('netloc') }}/v3/auth/tokens"
method: POST
body_format: json
body:
auth:
scope:
project:
name: admin
domain:
id: default
identity:
methods:
- password
password:
user:
# Get auth token and service catalog from Keystone and extract service urls.
- name: Get token and catalog from Keystone
uri:
url: "{{ overcloud_keystone_url
| urlsplit('scheme') }}://{{ overcloud_keystone_url
| urlsplit('netloc') }}/v3/auth/tokens"
method: POST
body_format: json
body:
auth:
scope:
project:
name: admin
domain:
id: default
password: "{{ overcloud_admin_password }}"
return_content: yes
status_code: 201
register: keystone_result
no_log: true
when: overcloud_keystone_url|default('')
identity:
methods:
- password
password:
user:
name: admin
domain:
id: default
password: "{{ overcloud_admin_password }}"
return_content: true
status_code: 201
register: keystone_result
no_log: true
when: overcloud_keystone_url|default('')

- name: Set auth token
set_fact: token="{{ keystone_result.x_subject_token }}"
- name: Set auth token
set_fact: token="{{ keystone_result.x_subject_token }}"

- name: Get Neutron URL from catalog
set_fact: neutron_url="{{ keystone_result.json.token
| json_query("catalog[?name=='neutron'].endpoints")
| first
| selectattr('interface', 'equalto', 'public')
| map(attribute='url') | first }}"
- name: Get Neutron URL from catalog
set_fact: neutron_url="{{ keystone_result.json.token
| json_query("catalog[?name=='neutron'].endpoints")
| first
| selectattr('interface', 'equalto', 'public')
| map(attribute='url') | first }}"

# Get overcloud networks from Neutron and check if there is
# a network with a common name for external networks.
- name: Get networks from Neutron
uri:
url: "{{ neutron_url }}/v2.0/networks?router:external=true"
method: GET
headers:
X-Auth-Token: "{{ token }}"
return_content: yes
follow_redirects: all
register: networks_result
# Get overcloud networks from Neutron and check if there is
# a network with a common name for external networks.
- name: Get networks from Neutron
uri:
url: "{{ neutron_url }}/v2.0/networks?router:external=true"
method: GET
headers:
X-Auth-Token: "{{ token }}"
return_content: true
follow_redirects: all
register: networks_result

- name: Warn if there are no matching networks
warn:
msg: |
No external network found. It is strongly recommended that you
configure an external Neutron network with a floating IP address
pool.
when: networks_result.json.networks | length == 0
- name: Warn if there are no matching networks
warn:
msg: |
No external network found. It is strongly recommended that you
configure an external Neutron network with a floating IP address
pool.
when: networks_result.json.networks | length == 0

+ 3
- 3
roles/openstack-endpoints/tasks/main.yml View File

@@ -1,7 +1,7 @@
---
- name: Set fact to identify if the overcloud was deployed
set_fact:
overcloud_deployed: "{{ groups['overcloud'] is defined }}"
overcloud_deployed: "{{ groups['overcloud'] is defined }}"

# Check that the Horizon endpoint exists
- name: Fail if the HorizonPublic endpoint is not defined
@@ -30,7 +30,7 @@

# Check that we can obtain an auth token from horizon
- name: Check Keystone
no_log: True
no_log: true
uri:
url: "{{ overcloud_keystone_url | urlsplit('scheme') }}://{{ overcloud_keystone_url | urlsplit('netloc') }}/v3/auth/tokens"
method: POST
@@ -46,7 +46,7 @@
domain:
name: Default
password: "{{ overcloud_admin_password }}"
return_content: yes
return_content: true
status_code: 201
register: auth_token
when: overcloud_keystone_url|default('')


+ 3
- 3
roles/ovs-dpdk-pmd/tasks/main.yml View File

@@ -1,12 +1,12 @@
---
- name: Get OVS DPDK PMD cores mask value
become_method: sudo
become: True
become: true
register: pmd_cpu_mask
command: ovs-vsctl --no-wait get Open_vSwitch . other_config:pmd-cpu-mask
changed_when: False
changed_when: false

- name: Run OVS DPDK PMD cores check
become: True
become: true
ovs_dpdk_pmd_cpus_check:
pmd_cpu_mask: "{{ pmd_cpu_mask.stdout }}"

+ 4
- 4
roles/pacemaker-status/tasks/main.yml View File

@@ -1,10 +1,10 @@
---
- name: Check pacemaker service is running
become: True
become: true
command: "/usr/bin/systemctl show pacemaker --property ActiveState"
register: check_service
changed_when: False
ignore_errors: True
changed_when: false
ignore_errors: true

- when: "check_service.stdout == 'ActiveState=active'"
block:
@@ -12,7 +12,7 @@
become: true
command: pcs status xml
register: pcs_status
changed_when: False
changed_when: false
- name: Check pacemaker status
pacemaker:
status: "{{ pcs_status.stdout }}"

+ 1
- 1
roles/rabbitmq-limits/molecule/default/playbook.yml View File

@@ -17,7 +17,7 @@

- name: Converge
hosts: all
gather_facts: no
gather_facts: false

tasks:
- name: working detection


+ 1
- 1
roles/rabbitmq-limits/molecule/default/prepare.yml View File

@@ -17,7 +17,7 @@

- name: Prepare
hosts: all
gather_facts: no
gather_facts: false

tasks:
- name: Populate successful podman CLI


+ 2
- 2
roles/repos/molecule/default/playbook.yml View File

@@ -30,7 +30,7 @@
name: faulty
description: really faulty repository
baseurl: http://this.repository.do-not.exists/like-not-at-all
enabled: yes
enabled: true

- name: execute role
include_role:
@@ -56,7 +56,7 @@
name: faulty-bis
description: faulty repository with working DNS
baseurl: http://download.fedoraproject.org/pub/fedora/blah
enabled: yes
enabled: true

- name: execute role
include_role:


+ 5
- 5
roles/repos/tasks/main.yml View File

@@ -1,11 +1,11 @@
---
- name: List repositories
become: True
become: true
shell: |
{{ ansible_pkg_mgr }} repolist enabled -v 2>&1 || exit 0
args:
warn: no
changed_when: False
warn: false
changed_when: false
register: repositories

- name: Fail if we detect error in repolist output
@@ -16,7 +16,7 @@
repositories.stdout is regex('(cannot|could not|failure)', ignorecase=True)

- name: Find repository IDs
changed_when: False
changed_when: false
shell: 'echo "{{ repositories.stdout }}" | grep Repo-id | sed "s/Repo-id.*://" | tr -d " "'
register: repository_ids

@@ -25,5 +25,5 @@
msg: Found unwanted repository {{ item.0 }} enabled
when: item.0 == item.1
with_nested:
- [ 'epel/x86_64' ]
- ['epel/x86_64']
- "{{ repository_ids.stdout_lines }}"

+ 0
- 27
roles/service-status/meta/main.yml View File

@@ -1,27 +0,0 @@
galaxy_info:
author: TripleO Validations Team
company: Red Hat
license: Apache
min_ansible_version: 2.4

platforms:
- name: CentOS
versions:
- 7
- name: RHEL
versions:
- 7

categories:
- cloud
- baremetal
- system
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.

dependencies: []

+ 1
- 1
roles/service-status/tasks/main.yaml View File

@@ -4,7 +4,7 @@
systemctl list-units --failed --plain --no-legend --no-pager |
awk '{print $1}'
register: systemd_status
changed_when: False
changed_when: false

- name: Fails if we find failed units
assert:


+ 1
- 1
roles/stonith-exists/molecule/default/prepare.yml View File

@@ -17,7 +17,7 @@

- name: Prepare
hosts: all
gather_facts: no
gather_facts: false

tasks:
- name: Populate successful stonith


+ 2
- 2
roles/stonith-exists/tasks/main.yml View File

@@ -1,13 +1,13 @@
---
- name: Check if we are in HA cluster environment
become: True
become: true
register: pcs_cluster_status
command: pcs cluster status
failed_when: false
changed_when: false

- name: Get all currently configured stonith devices
become: True
become: true
command: "pcs stonith"
register: stonith_devices
changed_when: false


+ 2
- 1
roles/stonith-exists/vars/main.yml View File

@@ -5,6 +5,7 @@ metadata:
Verify that stonith devices are configured for your OpenStack Platform HA cluster.
We don't configure stonith device with TripleO Installer. Because the hardware
configuration may be differ in each environment and requires different fence agents.
How to configure fencing please read https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes
How to configure fencing please read
https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes
groups:
- post-deployment

+ 5
- 5
roles/tls-everywhere/tasks/common.yaml View File

@@ -127,19 +127,19 @@
path: "/etc/ipa/default.conf"
section: global
key: realm
ignore_missing_file: False
ignore_missing_file: false
register: ipa_realm
check_mode: no
check_mode: false

- name: Set fact for IdM/FreeIPA host entry
set_fact:
host_entry: "{{ ansible_fqdn }}@{{ ipa_realm.value }}"
when: ipa_conf_stat.stat.exists
when: ipa_conf_stat.stat.exists

- name: Set fact for IdM/FreeIPA host principal
set_fact:
host_principal: "host/{{ host_entry }}"
when: ipa_conf_stat.stat.exists
when: ipa_conf_stat.stat.exists

# Kerberos keytab related tasks
- name: Check for kerberos host keytab
@@ -182,7 +182,7 @@
changed_when: false
become: true
when: krb5_keytab_stat.stat.exists
check_mode: no
check_mode: false

- name: Set facts for host principals in /etc/krb5.keytab
set_fact:


+ 3
- 3
roles/tls-everywhere/tasks/overcloud-post-deployment.yaml View File

@@ -4,7 +4,7 @@
become: true
hiera:
name: "certmonger_user_enabled"
check_mode: no
check_mode: false

- name: Set facts for certmonger user service not enabled
set_fact:
@@ -36,7 +36,7 @@
become: true
changed_when: false
register: all_certnames
check_mode: no
check_mode: false

# Get status of all certificates and trim the leading whitespaces
- name: Get status of all certificates
@@ -47,7 +47,7 @@
loop_control:
loop_var: certname
register: all_cert_status
check_mode: no
check_mode: false

- name: Gather certificates that are not in MONITORING status
set_fact:


+ 8
- 8
roles/tls-everywhere/tasks/pre-deployment-containerized.yaml View File

@@ -3,7 +3,7 @@
- name: Verify that join.conf exists (containzerized)
command: "{{ command_prefix }} exec novajoin_server test -e /etc/novajoin/join.conf"
register: containerized_join_conf_st
changed_when: False
changed_when: false
become: true

- name: Fail if join.conf is not present (containerized)
@@ -21,9 +21,9 @@
path: "{{ joinconf_location }}"
section: DEFAULT
key: keytab
ignore_missing_file: True
ignore_missing_file: true
register: novajoin_keytab_path
check_mode: no
check_mode: false

- name: Get novajoin server port from join.conf
become: true
@@ -31,9 +31,9 @@
path: "{{ joinconf_location }}"
section: DEFAULT
key: join_listen_port
ignore_missing_file: True
ignore_missing_file: true
register: novajoin_server_port
check_mode: no
check_mode: false

- name: Get novajoin server host from join.conf
become: true
@@ -41,9 +41,9 @@
path: "{{ joinconf_location }}"
section: DEFAULT
key: join_listen
ignore_missing_file: True
ignore_missing_file: true
register: novajoin_server_host
check_mode: no
check_mode: false

### verify that the keytab and principal are usable ###
# TODO(alee): We need to move this to a subfile so we can run
@@ -91,7 +91,7 @@
command: "{{ command_prefix }} exec novajoin_server kdestroy -c {{ item }}"