From 02ebd6b3353f179d117f16deb91684504236d33d Mon Sep 17 00:00:00 2001 From: Gael Chamoulaud Date: Thu, 19 Dec 2019 10:03:20 +0100 Subject: [PATCH] Validate the playboooks metadata structure This patch add a custom ansible-lint rule to enforce the structure of the validations playbooks: *ValidationHasMetadataRule*: Throw an ansible-lint error if: - the *hosts* key is empty or not found, - *vars* dictionary is missing, - *metadata* dict is missing in *vars* - *name*/*description*/*groups* keys are missing or found with a wrong data type - the validation belongs to one or several groups NOT in the official list of groups (groups.yaml) *YAMLLINT*: - Enable yamllint check in tox linters - WIP Fix detected yamllint errors Change-Id: If233286aa9f4299f02f13dc34f1e8c05d89df851 Signed-off-by: Gael Chamoulaud (cherry picked from commit e50e1a067de6d359d8e95c909859c30cf5d6912e) Signed-off-by: Gael Chamoulaud --- .ansible-lint | 2 + .../ValidationHasMetadataRule.py | 138 ++++++++++++++++++ .yamllint | 11 ++ playbooks/ceph-dependencies-installed.yaml | 2 +- playbooks/check-latest-packages-version.yaml | 2 +- .../collect-flavors-and-verify-profiles.yaml | 4 +- playbooks/controller-token.yaml | 2 +- playbooks/controller-ulimits.yaml | 2 +- playbooks/default-node-count.yaml | 2 +- playbooks/neutron-sanity-check.yaml | 12 +- playbooks/no-op-firewall-nova-driver.yaml | 2 +- playbooks/ntp.yaml | 2 +- playbooks/openshift-hw-requirements.yaml | 6 +- playbooks/openshift-nw-requirements.yaml | 2 +- playbooks/openstack-endpoints.yaml | 6 +- playbooks/repos.yaml | 2 +- playbooks/stonith-exists.yaml | 3 +- playbooks/tls-everywhere-post-deployment.yaml | 2 +- playbooks/tls-everywhere-pre-deployment.yaml | 2 +- playbooks/tls-everywhere-prep.yaml | 2 +- playbooks/undercloud-cpu.yaml | 2 +- .../undercloud-disk-space-pre-upgrade.yaml | 6 +- playbooks/undercloud-disk-space.yaml | 10 +- playbooks/undercloud-heat-purge-deleted.yaml | 4 +- .../undercloud-neutron-sanity-check.yaml | 14 +- playbooks/undercloud-ram.yaml | 2 +- playbooks/undercloud-selinux-mode.yaml | 2 +- playbooks/undercloud-tokenflush.yaml | 2 +- .../tasks/main.yml | 2 +- roles/ceph/defaults/main.yml | 1 - roles/ceph/tasks/ceph-ansible-installed.yaml | 9 +- roles/ceph/tasks/ceph-health.yaml | 109 +++++++------- .../molecule/default/prepare.yml | 2 +- .../molecule/default/playbook.yml | 2 +- .../molecule/default/prepare.yml | 2 +- roles/check-network-gateway/tasks/main.yml | 8 +- .../vars/main.yml | 4 +- roles/container-status/tasks/main.yaml | 36 ++--- .../tasks/main.yml | 4 +- .../molecule/default/playbook.yml | 2 +- roles/controller-token/tasks/main.yml | 2 +- roles/controller-token/vars/main.yml | 2 +- .../molecule/default/playbook.yml | 2 +- roles/controller-ulimits/tasks/main.yml | 4 +- .../molecule/default/playbook.yml | 2 +- .../molecule/default/prepare.yml | 2 +- roles/ctlplane-ip-range/tasks/main.yml | 4 +- roles/default-node-count/vars/main.yml | 2 +- .../tasks/dhcp-introspection.yaml | 6 +- .../tasks/dhcp-provisioning.yaml | 2 +- roles/dns/tasks/main.yml | 2 +- roles/haproxy/molecule/default/playbook.yml | 2 +- .../healthcheck-service-status/tasks/main.yml | 4 +- .../image-serve/molecule/default/molecule.yml | 4 +- .../image-serve/molecule/default/playbook.yml | 4 +- .../image-serve/molecule/default/prepare.yml | 2 +- roles/image-serve/tasks/main.yaml | 2 +- roles/image-serve/vars/main.yml | 1 + roles/mysql-open-files-limit/tasks/main.yml | 2 +- roles/neutron-sanity-check/tasks/main.yml | 34 ++--- .../molecule/default/playbook.yml | 4 +- .../no-op-firewall-nova-driver/vars/main.yml | 2 +- roles/node-health/tasks/main.yml | 2 +- roles/nova-event-callback/tasks/main.yml | 14 +- .../nova-status/molecule/default/playbook.yml | 2 +- .../nova-status/molecule/default/prepare.yml | 2 +- roles/nova-status/tasks/main.yml | 2 +- roles/ntp/tasks/main.yml | 26 ++-- roles/ntp/vars/main.yml | 2 +- .../openshift-on-openstack/defaults/main.yml | 4 +- .../tasks/openshift-hw-requirements.yaml | 10 +- .../tasks/openshift-nw-requirements.yaml | 106 +++++++------- roles/openstack-endpoints/tasks/main.yml | 6 +- roles/ovs-dpdk-pmd/tasks/main.yml | 6 +- roles/pacemaker-status/tasks/main.yml | 8 +- .../molecule/default/playbook.yml | 2 +- .../molecule/default/prepare.yml | 2 +- roles/repos/molecule/default/playbook.yml | 4 +- roles/repos/tasks/main.yml | 10 +- roles/service-status/meta/main.yml | 27 ---- roles/service-status/tasks/main.yaml | 2 +- .../molecule/default/prepare.yml | 2 +- roles/stonith-exists/tasks/main.yml | 4 +- roles/stonith-exists/vars/main.yml | 3 +- roles/tls-everywhere/tasks/common.yaml | 10 +- .../tasks/overcloud-post-deployment.yaml | 6 +- .../tasks/pre-deployment-containerized.yaml | 16 +- .../pre-deployment-non-containerized.yaml | 13 +- .../tls-everywhere/tasks/pre-deployment.yaml | 34 ++--- roles/tls-everywhere/tasks/prep.yaml | 16 +- roles/undercloud-debug/defaults/main.yml | 2 +- .../molecule/default/playbook.yml | 6 +- roles/undercloud-debug/tasks/main.yml | 4 +- roles/undercloud-disk-space/defaults/main.yml | 11 +- roles/undercloud-disk-space/tasks/main.yml | 8 +- .../molecule/default/molecule.yml | 4 +- .../molecule/default/playbook.yml | 2 +- .../molecule/default/prepare.yml | 2 +- .../tasks/main.yml | 2 +- .../vars/main.yaml | 4 +- roles/undercloud-process-count/tasks/main.yml | 34 ++--- roles/undercloud-selinux-mode/tasks/main.yml | 4 +- .../undercloud-service-status/tasks/main.yml | 2 +- .../molecule/default/molecule.yml | 4 +- .../molecule/default/playbook.yml | 2 +- .../molecule/default/prepare.yml | 2 +- roles/undercloud-tokenflush/tasks/main.yml | 2 +- roles/undercloud-tokenflush/vars/main.yaml | 2 +- .../molecule/default/playbook.yml | 2 +- .../molecule/default/prepare.yml | 5 +- roles/validate-selinux/vars/main.yml | 2 +- tox.ini | 3 +- zuul.d/playbooks/pre.yml | 2 +- zuul.d/playbooks/run.yml | 2 +- 114 files changed, 551 insertions(+), 423 deletions(-) create mode 100644 .ansible-lint_rules/ValidationHasMetadataRule.py create mode 100644 .yamllint delete mode 100644 roles/service-status/meta/main.yml diff --git a/.ansible-lint b/.ansible-lint index 882705bc8..a8796493c 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -2,6 +2,8 @@ exclude_paths: - releasenotes/ parseable: true quiet: false +rulesdir: + - .ansible-lint_rules/ skip_list: # Lines should be no longer than 120 chars. - '204' diff --git a/.ansible-lint_rules/ValidationHasMetadataRule.py b/.ansible-lint_rules/ValidationHasMetadataRule.py new file mode 100644 index 000000000..8af343b90 --- /dev/null +++ b/.ansible-lint_rules/ValidationHasMetadataRule.py @@ -0,0 +1,138 @@ +import os +import six +import yaml + +from ansiblelint import AnsibleLintRule + + +class ValidationHasMetadataRule(AnsibleLintRule): + id = '750' + shortdesc = 'Validation playbook must have mandatory metadata' + + info = """ +--- +- hosts: localhost + vars: + metadata: + name: Validation Name + description: > + A full description of the validation. + groups: + - group1 + - group2 + - group3 +""" + + description = ( + "The Validation playbook must have mandatory metadata:\n" + "```{}```".format(info) + ) + + severity = 'HIGH' + tags = ['metadata'] + + no_vars_found = "The validation playbook must contain a 'vars' dictionary" + no_meta_found = ( + "The validation playbook must contain " + "a 'metadata' dictionary under vars" + ) + no_groups_found = \ + "*metadata* should contain a list of group (groups)" + + unknown_groups_found = ( + "Unkown group(s) '{}' found! " + "The official list of groups are '{}'. " + "To add a new validation group, please add it in the groups.yaml " + "file at the root of the tripleo-validations project." + ) + + def get_groups(self): + """Returns a list of group names supported by + tripleo-validations by reading 'groups.yaml' + file located in the base direcotry. + """ + results = [] + + grp_file_path = os.path.abspath('groups.yaml') + + with open(grp_file_path, "r") as grps: + contents = yaml.safe_load(grps) + + for grp_name, grp_desc in sorted(contents.items()): + results.append(grp_name) + + return results + + def matchplay(self, file, data): + results = [] + path = file['path'] + + if file['type'] == 'playbook': + if path.startswith("playbooks/") or \ + path.find("tripleo-validations/playbooks/") > 0: + + # *hosts* line check + hosts = data.get('hosts', None) + if not hosts: + return [({ + path: data + }, "No *hosts* key found in the playbook")] + + # *vars* lines check + vars = data.get('vars', None) + if not vars: + return [({ + path: data + }, self.no_vars_found)] + else: + if not isinstance(vars, dict): + return [({path: data}, '*vars* should be a dictionary')] + + # *metadata* lines check + metadata = data['vars'].get('metadata', None) + if metadata: + if not isinstance(metadata, dict): + return [( + {path: data}, + '*metadata* should be a dictionary')] + else: + return [({path: data}, self.no_meta_found)] + + # *metadata>[name|description] lines check + for info in ['name', 'description']: + if not metadata.get(info, None): + results.append(( + {path: data}, + '*metadata* should contain a %s key' % info)) + continue + if not isinstance(metadata.get(info), + six.string_types): + results.append(( + {path: data}, + '*%s* should be a string' % info)) + + # *metadata>groups* lines check + if not metadata.get('groups', None): + results.append(( + {path: data}, + self.no_groups_found)) + else: + if not isinstance(metadata.get('groups'), list): + results.append(( + {path: data}, + '*groups* should be a list')) + else: + groups = metadata.get('groups') + group_list = self.get_groups() + unknown_groups_list = list( + set(groups) - set(group_list)) + if unknown_groups_list: + results.append(( + {path: data}, + self.unknown_groups_found.format( + unknown_groups_list, + group_list) + )) + return results + + return results diff --git a/.yamllint b/.yamllint new file mode 100644 index 000000000..7abc6068c --- /dev/null +++ b/.yamllint @@ -0,0 +1,11 @@ +--- +extends: default + +rules: + line-length: + # matches hardcoded 160 value from ansible-lint + max: 160 + +ignore: | + zuul.d/*.yaml + releasenotes/notes/*.yaml diff --git a/playbooks/ceph-dependencies-installed.yaml b/playbooks/ceph-dependencies-installed.yaml index 827e7e081..ecfdbc9e9 100644 --- a/playbooks/ceph-dependencies-installed.yaml +++ b/playbooks/ceph-dependencies-installed.yaml @@ -10,7 +10,7 @@ fail_without_deps: true tripleo_delegate_to: "{{ groups['overcloud'] | default([]) }}" packages: - - lvm2 + - lvm2 tasks: - include_role: name: ceph diff --git a/playbooks/check-latest-packages-version.yaml b/playbooks/check-latest-packages-version.yaml index e850171d1..ff3e3ac3a 100644 --- a/playbooks/check-latest-packages-version.yaml +++ b/playbooks/check-latest-packages-version.yaml @@ -1,6 +1,6 @@ --- - hosts: undercloud - gather_facts: yes + gather_facts: true vars: metadata: name: Check if latest version of packages is installed diff --git a/playbooks/collect-flavors-and-verify-profiles.yaml b/playbooks/collect-flavors-and-verify-profiles.yaml index 8cd5761f2..1f728eab2 100644 --- a/playbooks/collect-flavors-and-verify-profiles.yaml +++ b/playbooks/collect-flavors-and-verify-profiles.yaml @@ -7,7 +7,7 @@ This validation checks the flavors assigned to roles exist and have the correct capabilities set. groups: - - pre-deployment - - pre-upgrade + - pre-deployment + - pre-upgrade roles: - collect-flavors-and-verify-profiles diff --git a/playbooks/controller-token.yaml b/playbooks/controller-token.yaml index f2e74a6a3..9e0cc84ee 100644 --- a/playbooks/controller-token.yaml +++ b/playbooks/controller-token.yaml @@ -7,7 +7,7 @@ This validation checks that keystone admin token is disabled on both undercloud and overcloud controller after deployment. groups: - - post-deployment + - post-deployment keystone_conf_file: "/var/lib/config-data/puppet-generated/keystone/etc/keystone/keystone.conf" roles: - controller-token diff --git a/playbooks/controller-ulimits.yaml b/playbooks/controller-ulimits.yaml index 2ded38d3f..dcd5cc8a5 100644 --- a/playbooks/controller-ulimits.yaml +++ b/playbooks/controller-ulimits.yaml @@ -6,7 +6,7 @@ description: > This will check the ulimits of each controller. groups: - - post-deployment + - post-deployment nofiles_min: 1024 nproc_min: 2048 roles: diff --git a/playbooks/default-node-count.yaml b/playbooks/default-node-count.yaml index e2e79dbd8..8438fd70e 100644 --- a/playbooks/default-node-count.yaml +++ b/playbooks/default-node-count.yaml @@ -7,6 +7,6 @@ This validation checks that the nodes and hypervisor statistics add up. groups: - - pre-deployment + - pre-deployment roles: - default-node-count diff --git a/playbooks/neutron-sanity-check.yaml b/playbooks/neutron-sanity-check.yaml index 6398a6be8..59578f0ec 100644 --- a/playbooks/neutron-sanity-check.yaml +++ b/playbooks/neutron-sanity-check.yaml @@ -17,12 +17,12 @@ # will be passed to the Neutron services. The order is important # here: the values in later files take precedence. configs: - - /etc/neutron/neutron.conf - - /usr/share/neutron/neutron-dist.conf - - /etc/neutron/metadata_agent.ini - - /etc/neutron/dhcp_agent.ini - - /etc/neutron/fwaas_driver.ini - - /etc/neutron/l3_agent.ini + - /etc/neutron/neutron.conf + - /usr/share/neutron/neutron-dist.conf + - /etc/neutron/metadata_agent.ini + - /etc/neutron/dhcp_agent.ini + - /etc/neutron/fwaas_driver.ini + - /etc/neutron/l3_agent.ini roles: - neutron-sanity-check diff --git a/playbooks/no-op-firewall-nova-driver.yaml b/playbooks/no-op-firewall-nova-driver.yaml index 537128c8c..fe374f698 100644 --- a/playbooks/no-op-firewall-nova-driver.yaml +++ b/playbooks/no-op-firewall-nova-driver.yaml @@ -7,7 +7,7 @@ When using Neutron, the `firewall_driver` option in Nova must be set to `NoopFirewallDriver`. groups: - - post-deployment + - post-deployment nova_conf_path: "/var/lib/config-data/puppet-generated/nova_libvirt/etc/nova/nova.conf" roles: - no-op-firewall-nova-driver diff --git a/playbooks/ntp.yaml b/playbooks/ntp.yaml index 47eea2685..39eb9429d 100644 --- a/playbooks/ntp.yaml +++ b/playbooks/ntp.yaml @@ -9,6 +9,6 @@ The deployment should configure and run chronyd. This validation verifies that it is indeed running and connected to an NTP server on all nodes. groups: - - post-deployment + - post-deployment roles: - ntp diff --git a/playbooks/openshift-hw-requirements.yaml b/playbooks/openshift-hw-requirements.yaml index c9cfc0862..1c49b6f09 100644 --- a/playbooks/openshift-hw-requirements.yaml +++ b/playbooks/openshift-hw-requirements.yaml @@ -12,7 +12,7 @@ - Are images named centos or rhel available? - Are there sufficient compute resources available for a default setup? (1 Master node, 1 Infra node, 2 App nodes) groups: - - openshift-on-openstack + - openshift-on-openstack min_total_ram_testing: 16384 # 4 per node min_total_vcpus_testing: 4 # 1 per node min_total_disk_testing: 93 # Master: 40, others: 17 per node @@ -23,8 +23,8 @@ min_node_disk_testing: 40 # Minimum disk per node for testing min_node_ram_prod: 16384 # Minimum ram per node for production min_node_disk_prod: 42 # Minimum disk per node for production - resource_reqs_testing: False - resource_reqs_prod: False + resource_reqs_testing: false + resource_reqs_prod: false tasks: - include_role: name: openshift-on-openstack diff --git a/playbooks/openshift-nw-requirements.yaml b/playbooks/openshift-nw-requirements.yaml index fb1e4f105..b829b2b25 100644 --- a/playbooks/openshift-nw-requirements.yaml +++ b/playbooks/openshift-nw-requirements.yaml @@ -7,7 +7,7 @@ Checks if an external network has been configured on the overcloud as required for an OpenShift deployment on top of OpenStack. groups: - - openshift-on-openstack + - openshift-on-openstack tasks: - include_role: name: openshift-on-openstack diff --git a/playbooks/openstack-endpoints.yaml b/playbooks/openstack-endpoints.yaml index 025dd6f62..408a0811b 100644 --- a/playbooks/openstack-endpoints.yaml +++ b/playbooks/openstack-endpoints.yaml @@ -8,8 +8,8 @@ This validation gets the PublicVip address from the deployment and tries to access Horizon and get a Keystone token. groups: - - post-deployment - - pre-upgrade - - post-upgrade + - post-deployment + - pre-upgrade + - post-upgrade roles: - openstack-endpoints diff --git a/playbooks/repos.yaml b/playbooks/repos.yaml index ef6380d9f..3ae221856 100644 --- a/playbooks/repos.yaml +++ b/playbooks/repos.yaml @@ -1,6 +1,6 @@ --- - hosts: undercloud, overcloud - gather_facts: yes + gather_facts: true vars: metadata: name: Check correctness of current repositories diff --git a/playbooks/stonith-exists.yaml b/playbooks/stonith-exists.yaml index 89c54ddc8..bf4070e43 100644 --- a/playbooks/stonith-exists.yaml +++ b/playbooks/stonith-exists.yaml @@ -7,7 +7,8 @@ Verify that stonith devices are configured for your OpenStack Platform HA cluster. We don't configure stonith device with TripleO Installer. Because the hardware configuration may be differ in each environment and requires different fence agents. - How to configure fencing please read https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes + How to configure fencing please read + https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes groups: - post-deployment roles: diff --git a/playbooks/tls-everywhere-post-deployment.yaml b/playbooks/tls-everywhere-post-deployment.yaml index 2867a2d92..e694cacaa 100644 --- a/playbooks/tls-everywhere-post-deployment.yaml +++ b/playbooks/tls-everywhere-post-deployment.yaml @@ -8,7 +8,7 @@ and that all certs being tracked by certmonger are in the MONITORING state. groups: - - post-deployment + - post-deployment tasks: - include_role: name: tls-everywhere diff --git a/playbooks/tls-everywhere-pre-deployment.yaml b/playbooks/tls-everywhere-pre-deployment.yaml index 6e1a5b615..11f4cdafa 100644 --- a/playbooks/tls-everywhere-pre-deployment.yaml +++ b/playbooks/tls-everywhere-pre-deployment.yaml @@ -7,7 +7,7 @@ Checks that the undercloud has novajoin set up corectly and that we are ready to do the overcloud deploy with tls-everywhere. groups: - - pre-deployment + - pre-deployment tasks: - include_role: name: tls-everywhere diff --git a/playbooks/tls-everywhere-prep.yaml b/playbooks/tls-everywhere-prep.yaml index 5fac06f27..57374a48a 100644 --- a/playbooks/tls-everywhere-prep.yaml +++ b/playbooks/tls-everywhere-prep.yaml @@ -7,7 +7,7 @@ Checks that the undercloud is ready to set up novajoin and to register to IdM as a client as part of undercloud-install. groups: - - prep + - prep tasks: - include_role: name: tls-everywhere diff --git a/playbooks/undercloud-cpu.yaml b/playbooks/undercloud-cpu.yaml index e1765ae4f..772b4dbd2 100644 --- a/playbooks/undercloud-cpu.yaml +++ b/playbooks/undercloud-cpu.yaml @@ -1,6 +1,6 @@ --- - hosts: undercloud - gather_facts: yes + gather_facts: true vars: metadata: name: Verify undercloud fits the CPU core requirements diff --git a/playbooks/undercloud-disk-space-pre-upgrade.yaml b/playbooks/undercloud-disk-space-pre-upgrade.yaml index ef7c96490..bf95dbd77 100644 --- a/playbooks/undercloud-disk-space-pre-upgrade.yaml +++ b/playbooks/undercloud-disk-space-pre-upgrade.yaml @@ -11,10 +11,10 @@ groups: - pre-upgrade volumes: - - {mount: /var/lib/docker, min_size: 10} + - {mount: /var/lib/docker, min_size: 10} - {mount: /var/lib/config-data, min_size: 3} - - {mount: /var, min_size: 16} - - {mount: /, min_size: 20} + - {mount: /var, min_size: 16} + - {mount: /, min_size: 20} roles: - undercloud-disk-space diff --git a/playbooks/undercloud-disk-space.yaml b/playbooks/undercloud-disk-space.yaml index b9dc55b1b..8ee78c87e 100644 --- a/playbooks/undercloud-disk-space.yaml +++ b/playbooks/undercloud-disk-space.yaml @@ -12,12 +12,12 @@ - prep - pre-introspection volumes: - - {mount: /var/lib/docker, min_size: 10} + - {mount: /var/lib/docker, min_size: 10} - {mount: /var/lib/config-data, min_size: 3} - - {mount: /var/log, min_size: 3} - - {mount: /usr, min_size: 5} - - {mount: /var, min_size: 20} - - {mount: /, min_size: 25} + - {mount: /var/log, min_size: 3} + - {mount: /usr, min_size: 5} + - {mount: /var, min_size: 20} + - {mount: /, min_size: 25} roles: - undercloud-disk-space diff --git a/playbooks/undercloud-heat-purge-deleted.yaml b/playbooks/undercloud-heat-purge-deleted.yaml index 5eb942ab2..2ef02ebbb 100644 --- a/playbooks/undercloud-heat-purge-deleted.yaml +++ b/playbooks/undercloud-heat-purge-deleted.yaml @@ -8,8 +8,8 @@ heat database can grow very large. This validation checks that the purge_deleted crontab has been set up. groups: - - pre-upgrade - - pre-deployment + - pre-upgrade + - pre-deployment cron_check: "heat-manage purge_deleted" roles: - undercloud-heat-purge-deleted diff --git a/playbooks/undercloud-neutron-sanity-check.yaml b/playbooks/undercloud-neutron-sanity-check.yaml index 32f8415f1..25c4074b5 100644 --- a/playbooks/undercloud-neutron-sanity-check.yaml +++ b/playbooks/undercloud-neutron-sanity-check.yaml @@ -17,13 +17,13 @@ # will be passed to the Neutron services. The order is important # here: the values in later files take precedence. configs: - - /etc/neutron/neutron.conf - - /usr/share/neutron/neutron-dist.conf - - /etc/neutron/metadata_agent.ini - - /etc/neutron/dhcp_agent.ini - - /etc/neutron/plugins/ml2/openvswitch_agent.ini - - /etc/neutron/fwaas_driver.ini - - /etc/neutron/l3_agent.ini + - /etc/neutron/neutron.conf + - /usr/share/neutron/neutron-dist.conf + - /etc/neutron/metadata_agent.ini + - /etc/neutron/dhcp_agent.ini + - /etc/neutron/plugins/ml2/openvswitch_agent.ini + - /etc/neutron/fwaas_driver.ini + - /etc/neutron/l3_agent.ini roles: - neutron-sanity-check diff --git a/playbooks/undercloud-ram.yaml b/playbooks/undercloud-ram.yaml index 3536f47af..7aa04944b 100644 --- a/playbooks/undercloud-ram.yaml +++ b/playbooks/undercloud-ram.yaml @@ -1,6 +1,6 @@ --- - hosts: undercloud - gather_facts: yes + gather_facts: true vars: metadata: name: Verify the undercloud fits the RAM requirements diff --git a/playbooks/undercloud-selinux-mode.yaml b/playbooks/undercloud-selinux-mode.yaml index bc42a3c94..5d211b0bb 100644 --- a/playbooks/undercloud-selinux-mode.yaml +++ b/playbooks/undercloud-selinux-mode.yaml @@ -1,6 +1,6 @@ --- - hosts: undercloud - gather_facts: yes + gather_facts: true vars: metadata: name: Undercloud SELinux Enforcing Mode Check diff --git a/playbooks/undercloud-tokenflush.yaml b/playbooks/undercloud-tokenflush.yaml index ad946ff97..49641a5bd 100644 --- a/playbooks/undercloud-tokenflush.yaml +++ b/playbooks/undercloud-tokenflush.yaml @@ -8,7 +8,7 @@ keystone database can grow very large. This validation checks that the keystone token_flush crontab has been set up. groups: - - pre-introspection + - pre-introspection cron_check: "keystone-manage token_flush" roles: - undercloud-tokenflush diff --git a/roles/advanced-format-512e-support/tasks/main.yml b/roles/advanced-format-512e-support/tasks/main.yml index 7b579e853..0d507e0f9 100644 --- a/roles/advanced-format-512e-support/tasks/main.yml +++ b/roles/advanced-format-512e-support/tasks/main.yml @@ -2,7 +2,7 @@ - name: List the available drives register: drive_list command: "ls /sys/class/block/" - changed_when: False + changed_when: false - name: Detect whether the drive uses Advanced Format advanced_format: drive={{ item }} diff --git a/roles/ceph/defaults/main.yml b/roles/ceph/defaults/main.yml index 3af2f954b..301b69b8d 100644 --- a/roles/ceph/defaults/main.yml +++ b/roles/ceph/defaults/main.yml @@ -4,4 +4,3 @@ fail_without_deps: false fail_on_ceph_health_err: false osd_percentage_min: 0 ceph_ansible_repo: "centos-ceph-nautilus" - diff --git a/roles/ceph/tasks/ceph-ansible-installed.yaml b/roles/ceph/tasks/ceph-ansible-installed.yaml index 404dbfa2f..e173ab91e 100644 --- a/roles/ceph/tasks/ceph-ansible-installed.yaml +++ b/roles/ceph/tasks/ceph-ansible-installed.yaml @@ -2,9 +2,9 @@ - name: Check if ceph-ansible is installed shell: rpm -q ceph-ansible || true args: - warn: no - changed_when: False - ignore_errors: True + warn: false + changed_when: false + ignore_errors: true register: ceph_ansible_installed - name: Warn about missing ceph-ansible @@ -24,7 +24,7 @@ - name: Get ceph-ansible repository shell: "yum info ceph-ansible | awk '/From repo/ {print $4}'" register: repo - changed_when: False + changed_when: false - name: Fail if ceph-ansible doesn't belong to the specified repo fail: @@ -32,4 +32,3 @@ when: - (repo.stdout | length == 0 or repo.stdout != "{{ ceph_ansible_repo }}") - fail_without_ceph_ansible|default(false)|bool - diff --git a/roles/ceph/tasks/ceph-health.yaml b/roles/ceph/tasks/ceph-health.yaml index 0e9b6b2cb..0936c4e15 100644 --- a/roles/ceph/tasks/ceph-health.yaml +++ b/roles/ceph/tasks/ceph-health.yaml @@ -4,64 +4,65 @@ shell: hiera -c /etc/puppet/hiera.yaml enabled_services | egrep -sq ceph_mon ignore_errors: true register: ceph_mon_enabled - changed_when: False + changed_when: false -- when: - - ceph_mon_enabled is succeeded +- when: "ceph_mon_enabled is succeeded" block: - - name: Set container_cli fact from the inventory - set_fact: - container_cli: "{{ hostvars[inventory_hostname].container_cli|default('podman') }}" - - - name: Set container filter format - set_fact: - container_filter_format: !unsafe "--format '{{ .Names }}'" - - - name: Set ceph_mon_container name - become: true - shell: "{{ container_cli }} ps {{ container_filter_format }} | grep ceph-mon" - register: ceph_mon_container - changed_when: False - - - name: Set ceph cluster name - become: true - shell: find /etc/ceph -name '*.conf' -prune -print -quit | xargs basename -s '.conf' - register: ceph_cluster_name - changed_when: False - - - name: Get ceph health - become: true - shell: "{{ container_cli }} exec {{ ceph_mon_container.stdout }} ceph --cluster {{ ceph_cluster_name.stdout }} health | awk '{print $1}'" - register: ceph_health - - - name: Check ceph health - warn: - msg: Ceph is in {{ ceph_health.stdout }} state. - when: - - ceph_health.stdout != 'HEALTH_OK' - - not fail_on_ceph_health_err|default(true)|bool - - - name: Fail if ceph health is HEALTH_ERR - fail: - msg: Ceph is in {{ ceph_health.stdout }} state. - when: - - ceph_health.stdout == 'HEALTH_ERR' - - fail_on_ceph_health_err|default(true)|bool - - - when: - - osd_percentage_min|default(0) > 0 - block: - - name: set jq osd percentage filter + - name: Set container_cli fact from the inventory set_fact: - jq_osd_percentage_filter: '( (.num_in_osds) / (.num_osds) ) * 100' + container_cli: "{{ hostvars[inventory_hostname].container_cli|default('podman') }}" - - name: Get OSD stat percentage + - name: Set container filter format + set_fact: + container_filter_format: !unsafe "--format '{{ .Names }}'" + + - name: Set ceph_mon_container name become: true - shell: "{{ container_cli }} exec {{ ceph_mon_container.stdout }} ceph --cluster {{ ceph_cluster_name.stdout }} osd stat -f json | jq '{{ jq_osd_percentage_filter }}'" - register: ceph_osd_in_percentage + shell: "{{ container_cli }} ps {{ container_filter_format }} | grep ceph-mon" + register: ceph_mon_container + changed_when: false - - name: Fail if there is an unacceptable percentage of in OSDs - fail: - msg: "Only {{ ceph_osd_in_percentage.stdout|float }}% of OSDs are in, but {{ osd_percentage_min|default(0) }}% are required" + - name: Set ceph cluster name + become: true + shell: find /etc/ceph -name '*.conf' -prune -print -quit | xargs basename -s '.conf' + register: ceph_cluster_name + changed_when: false + + - name: Get ceph health + become: true + shell: "{{ container_cli }} exec {{ ceph_mon_container.stdout }} ceph --cluster {{ ceph_cluster_name.stdout }} health | awk '{print $1}'" + register: ceph_health + + - name: Check ceph health + warn: + msg: Ceph is in {{ ceph_health.stdout }} state. when: - - ceph_osd_in_percentage.stdout|float < osd_percentage_min|default(0) + - ceph_health.stdout != 'HEALTH_OK' + - not fail_on_ceph_health_err|default(true)|bool + + - name: Fail if ceph health is HEALTH_ERR + fail: + msg: Ceph is in {{ ceph_health.stdout }} state. + when: + - ceph_health.stdout == 'HEALTH_ERR' + - fail_on_ceph_health_err|default(true)|bool + + - when: + - osd_percentage_min|default(0) > 0 + block: + - name: set jq osd percentage filter + set_fact: + jq_osd_percentage_filter: '( (.num_in_osds) / (.num_osds) ) * 100' + + - name: Get OSD stat percentage + become: true + shell: >- + "{{ container_cli }}" exec "{{ ceph_mon_container.stdout }}" ceph + --cluster "{{ ceph_cluster_name.stdout }}" osd stat -f json | jq '{{ jq_osd_percentage_filter }}' + register: ceph_osd_in_percentage + + - name: Fail if there is an unacceptable percentage of in OSDs + fail: + msg: "Only {{ ceph_osd_in_percentage.stdout|float }}% of OSDs are in, but {{ osd_percentage_min|default(0) }}% are required" + when: + - ceph_osd_in_percentage.stdout|float < osd_percentage_min|default(0) diff --git a/roles/check-latest-packages-version/molecule/default/prepare.yml b/roles/check-latest-packages-version/molecule/default/prepare.yml index 4d1b75233..c55cfc709 100644 --- a/roles/check-latest-packages-version/molecule/default/prepare.yml +++ b/roles/check-latest-packages-version/molecule/default/prepare.yml @@ -17,7 +17,7 @@ - name: Prepare hosts: all - gather_facts: no + gather_facts: false tasks: - name: install patch rpm diff --git a/roles/check-network-gateway/molecule/default/playbook.yml b/roles/check-network-gateway/molecule/default/playbook.yml index e017708aa..52d9d4ebf 100644 --- a/roles/check-network-gateway/molecule/default/playbook.yml +++ b/roles/check-network-gateway/molecule/default/playbook.yml @@ -17,7 +17,7 @@ - name: Converge hosts: all - gather_facts: no + gather_facts: false tasks: - name: successful check with ctlplane-subnet diff --git a/roles/check-network-gateway/molecule/default/prepare.yml b/roles/check-network-gateway/molecule/default/prepare.yml index ab488467a..70f54da03 100644 --- a/roles/check-network-gateway/molecule/default/prepare.yml +++ b/roles/check-network-gateway/molecule/default/prepare.yml @@ -17,7 +17,7 @@ - name: Prepare hosts: all - gather_facts: no + gather_facts: false tasks: - name: install hiera diff --git a/roles/check-network-gateway/tasks/main.yml b/roles/check-network-gateway/tasks/main.yml index e29db4230..5d86b5042 100644 --- a/roles/check-network-gateway/tasks/main.yml +++ b/roles/check-network-gateway/tasks/main.yml @@ -5,12 +5,12 @@ name: "tripleo_undercloud_conf_file" - name: Get the local_subnet name from the undercloud_conf file - become: True + become: true validations_read_ini: path: "{{ tripleo_undercloud_conf_file }}" section: DEFAULT key: local_subnet - ignore_missing_file: True + ignore_missing_file: true register: local_subnet - name: Get gateway value from the undercloud.conf file @@ -19,7 +19,7 @@ path: "{{ tripleo_undercloud_conf_file }}" section: "{% if local_subnet.value %}{{ local_subnet.value }}{% else %}ctlplane-subnet{% endif %}" key: gateway - ignore_missing_file: True + ignore_missing_file: true register: gateway - name: Get local_ip value from the undercloud.conf file @@ -28,7 +28,7 @@ path: "{{ tripleo_undercloud_conf_file }}" section: DEFAULT key: local_ip - ignore_missing_file: True + ignore_missing_file: true register: local_ip - name: Test network_gateway if different from local_ip diff --git a/roles/collect-flavors-and-verify-profiles/vars/main.yml b/roles/collect-flavors-and-verify-profiles/vars/main.yml index db4a2ec26..5b4af78b1 100644 --- a/roles/collect-flavors-and-verify-profiles/vars/main.yml +++ b/roles/collect-flavors-and-verify-profiles/vars/main.yml @@ -5,5 +5,5 @@ metadata: This validation checks the flavors assigned to roles exist and have the correct capabilities set. groups: - - pre-deployment - - pre-upgrade + - pre-deployment + - pre-upgrade diff --git a/roles/container-status/tasks/main.yaml b/roles/container-status/tasks/main.yaml index 64271864c..279a0d466 100644 --- a/roles/container-status/tasks/main.yaml +++ b/roles/container-status/tasks/main.yaml @@ -8,29 +8,29 @@ - when: "'Undercloud' in group_names" block: - - name: Set container_cli fact from undercloud.conf - block: - - name: Get the path of tripleo undercloud config file - become: true - hiera: - name: "tripleo_undercloud_conf_file" + - name: Set container_cli fact from undercloud.conf + block: + - name: Get the path of tripleo undercloud config file + become: true + hiera: + name: "tripleo_undercloud_conf_file" - - name: Get container client from undercloud.conf - validations_read_ini: - path: "{{ tripleo_undercloud_conf_file }}" - section: DEFAULT - key: container_cli - ignore_missing_file: true - register: container_cli + - name: Get container client from undercloud.conf + validations_read_ini: + path: "{{ tripleo_undercloud_conf_file }}" + section: DEFAULT + key: container_cli + ignore_missing_file: true + register: container_cli - - name: Set uc_container_cli for the Undercloud - set_fact: - uc_container_cli: "{{ container_cli.value|default('podman', true) }}" - when: uc_container_cli is not defined + - name: Set uc_container_cli for the Undercloud + set_fact: + uc_container_cli: "{{ container_cli.value|default('podman', true) }}" + when: uc_container_cli is not defined - name: Get failed containers for podman changed_when: false - become: True + become: true command: > {% if oc_container_cli is defined %}{{ oc_container_cli }}{% else %}{{ uc_container_cli }}{% endif %} {% raw %} diff --git a/roles/containerized-undercloud-docker/tasks/main.yml b/roles/containerized-undercloud-docker/tasks/main.yml index 01bbae2fb..8d3f24f13 100644 --- a/roles/containerized-undercloud-docker/tasks/main.yml +++ b/roles/containerized-undercloud-docker/tasks/main.yml @@ -2,7 +2,7 @@ - name: gather docker facts docker_facts: container_filter: status=running - become: yes + become: true - name: compare running containers to list set_fact: @@ -25,6 +25,6 @@ state: started # Port should be open delay: 0 # No wait before first check (sec) timeout: 3 # Stop checking after timeout (sec) - ignore_errors: yes + ignore_errors: true loop: "{{ open_ports }}" when: ctlplane_ip is defined diff --git a/roles/controller-token/molecule/default/playbook.yml b/roles/controller-token/molecule/default/playbook.yml index 0333d430e..49be315b0 100644 --- a/roles/controller-token/molecule/default/playbook.yml +++ b/roles/controller-token/molecule/default/playbook.yml @@ -17,7 +17,7 @@ - name: Converge hosts: all - gather_facts: no + gather_facts: false tasks: - name: pass validation diff --git a/roles/controller-token/tasks/main.yml b/roles/controller-token/tasks/main.yml index b01d82b1c..e2e7bba80 100644 --- a/roles/controller-token/tasks/main.yml +++ b/roles/controller-token/tasks/main.yml @@ -5,7 +5,7 @@ path: "{{ keystone_conf_file }}" section: DEFAULT key: admin_token - ignore_missing_file: True + ignore_missing_file: true register: token_result - name: Check if token value is disabled. diff --git a/roles/controller-token/vars/main.yml b/roles/controller-token/vars/main.yml index 8f853b102..ff770158a 100644 --- a/roles/controller-token/vars/main.yml +++ b/roles/controller-token/vars/main.yml @@ -5,4 +5,4 @@ metadata: This validation checks that keystone admin token is disabled on both undercloud and overcloud controller after deployment. groups: - - post-deployment + - post-deployment diff --git a/roles/controller-ulimits/molecule/default/playbook.yml b/roles/controller-ulimits/molecule/default/playbook.yml index 05c451918..c71c231cb 100644 --- a/roles/controller-ulimits/molecule/default/playbook.yml +++ b/roles/controller-ulimits/molecule/default/playbook.yml @@ -17,7 +17,7 @@ - name: Converge hosts: all - gather_facts: no + gather_facts: false vars: nofiles_min: 102400 diff --git a/roles/controller-ulimits/tasks/main.yml b/roles/controller-ulimits/tasks/main.yml index 2c91845bb..8eddb5baf 100644 --- a/roles/controller-ulimits/tasks/main.yml +++ b/roles/controller-ulimits/tasks/main.yml @@ -4,7 +4,7 @@ # NOTE: `ulimit` is a shell builtin so we have to invoke it like this: command: sh -c "ulimit -n" register: nofilesval - changed_when: False + changed_when: false - name: Check nofiles limit fail: @@ -18,7 +18,7 @@ # NOTE: `ulimit` is a shell builtin so we have to invoke it like this: command: sh -c "ulimit -u" register: nprocval - changed_when: False + changed_when: false - name: Check nproc limit fail: diff --git a/roles/ctlplane-ip-range/molecule/default/playbook.yml b/roles/ctlplane-ip-range/molecule/default/playbook.yml index 5287cd61e..36542a07e 100644 --- a/roles/ctlplane-ip-range/molecule/default/playbook.yml +++ b/roles/ctlplane-ip-range/molecule/default/playbook.yml @@ -17,7 +17,7 @@ - name: Converge hosts: all - gather_facts: no + gather_facts: false tasks: - name: prepare directory tree for hiera diff --git a/roles/ctlplane-ip-range/molecule/default/prepare.yml b/roles/ctlplane-ip-range/molecule/default/prepare.yml index 40f2dabd8..832276934 100644 --- a/roles/ctlplane-ip-range/molecule/default/prepare.yml +++ b/roles/ctlplane-ip-range/molecule/default/prepare.yml @@ -17,7 +17,7 @@ - name: Prepare hosts: all - gather_facts: no + gather_facts: false tasks: - name: install hiera diff --git a/roles/ctlplane-ip-range/tasks/main.yml b/roles/ctlplane-ip-range/tasks/main.yml index 4d0bfb8fe..5da9392f7 100644 --- a/roles/ctlplane-ip-range/tasks/main.yml +++ b/roles/ctlplane-ip-range/tasks/main.yml @@ -10,7 +10,7 @@ path: "{{ tripleo_undercloud_conf_file }}" section: ctlplane-subnet key: dhcp_start - ignore_missing_file: True + ignore_missing_file: true default: "192.0.2.5" register: dhcp_start @@ -20,7 +20,7 @@ path: "{{ tripleo_undercloud_conf_file }}" section: ctlplane-subnet key: dhcp_end - ignore_missing_file: True + ignore_missing_file: true default: "192.0.2.24" register: dhcp_end diff --git a/roles/default-node-count/vars/main.yml b/roles/default-node-count/vars/main.yml index 95dee3432..744ae8351 100644 --- a/roles/default-node-count/vars/main.yml +++ b/roles/default-node-count/vars/main.yml @@ -5,4 +5,4 @@ metadata: This validation checks that the nodes and hypervisor statistics add up. groups: - - pre-deployment + - pre-deployment diff --git a/roles/dhcp-validations/tasks/dhcp-introspection.yaml b/roles/dhcp-validations/tasks/dhcp-introspection.yaml index 37ac6df46..63ef1db0b 100644 --- a/roles/dhcp-validations/tasks/dhcp-introspection.yaml +++ b/roles/dhcp-validations/tasks/dhcp-introspection.yaml @@ -1,6 +1,6 @@ --- - name: Look up the introspection interface - become: True + become: true validations_read_ini: path: "{{ ironic_inspector_conf }}" section: iptables @@ -8,7 +8,7 @@ register: interface - name: Look up the introspection interface from the deprecated option - become: True + become: true validations_read_ini: path: "{{ ironic_inspector_conf }}" section: firewall @@ -17,4 +17,4 @@ - name: Look for rogue DHCP servers script: files/rogue_dhcp.py {{ interface.value or interface_deprecated.value or 'br-ctlplane' }} - changed_when: False + changed_when: false diff --git a/roles/dhcp-validations/tasks/dhcp-provisioning.yaml b/roles/dhcp-validations/tasks/dhcp-provisioning.yaml index c1addddae..7e5dec1e5 100644 --- a/roles/dhcp-validations/tasks/dhcp-provisioning.yaml +++ b/roles/dhcp-validations/tasks/dhcp-provisioning.yaml @@ -7,7 +7,7 @@ path: "{{ tripleo_undercloud_conf_file }}" section: DEFAULT key: local_interface - ignore_missing_file: True + ignore_missing_file: true register: local_interface - name: Look for DHCP responses diff --git a/roles/dns/tasks/main.yml b/roles/dns/tasks/main.yml index b78292bb5..aefea0804 100644 --- a/roles/dns/tasks/main.yml +++ b/roles/dns/tasks/main.yml @@ -1,4 +1,4 @@ --- - name: Ensure DNS resolution works command: "getent hosts {{ server_to_lookup }}" - changed_when: False + changed_when: false diff --git a/roles/haproxy/molecule/default/playbook.yml b/roles/haproxy/molecule/default/playbook.yml index a1a008424..9960f9788 100644 --- a/roles/haproxy/molecule/default/playbook.yml +++ b/roles/haproxy/molecule/default/playbook.yml @@ -17,7 +17,7 @@ - name: Converge hosts: all - gather_facts: no + gather_facts: false vars: haproxy_config_file: /haproxy.cfg diff --git a/roles/healthcheck-service-status/tasks/main.yml b/roles/healthcheck-service-status/tasks/main.yml index ae9603a77..2ee55205f 100644 --- a/roles/healthcheck-service-status/tasks/main.yml +++ b/roles/healthcheck-service-status/tasks/main.yml @@ -2,7 +2,7 @@ - name: Get the healthcheck services list enabled on node shell: > systemctl list-unit-files | grep "^tripleo.*healthcheck.*enabled" | awk -F'.' '{print $1}' - changed_when: False + changed_when: false register: healthcheck_services_list when: inflight_healthcheck_services | length < 1 @@ -23,7 +23,7 @@ until: - systemd_healthcheck_state.status.ExecMainPID != '0' - systemd_healthcheck_state.status.ActiveState in ['inactive', 'failed'] - ignore_errors: True + ignore_errors: true register: systemd_healthcheck_state with_items: "{{ hc_services }}" diff --git a/roles/image-serve/molecule/default/molecule.yml b/roles/image-serve/molecule/default/molecule.yml index 656d826fd..82496abe9 100644 --- a/roles/image-serve/molecule/default/molecule.yml +++ b/roles/image-serve/molecule/default/molecule.yml @@ -8,7 +8,7 @@ platforms: - name: centos7 hostname: centos7 image: centos:7 - override_command: True + override_command: true command: python -m SimpleHTTPServer 8787 pkg_extras: python-setuptools python-enum34 python-netaddr epel-release ruby PyYAML easy_install: @@ -20,7 +20,7 @@ platforms: - name: fedora28 hostname: fedora28 image: fedora:28 - override_command: True + override_command: true command: python3 -m http.server 8787 pkg_extras: python*-setuptools python*-enum python*-netaddr ruby PyYAML environment: diff --git a/roles/image-serve/molecule/default/playbook.yml b/roles/image-serve/molecule/default/playbook.yml index 36c487c96..ffe0169ba 100644 --- a/roles/image-serve/molecule/default/playbook.yml +++ b/roles/image-serve/molecule/default/playbook.yml @@ -17,7 +17,7 @@ - name: Converge hosts: all - gather_facts: no + gather_facts: false tasks: - name: detect wrong port @@ -39,7 +39,7 @@ block: - name: run validation for 404 include_role: - name: image-serve + name: image-serve rescue: - name: Clear host errors meta: clear_host_errors diff --git a/roles/image-serve/molecule/default/prepare.yml b/roles/image-serve/molecule/default/prepare.yml index 2fd21898e..e2dc73221 100644 --- a/roles/image-serve/molecule/default/prepare.yml +++ b/roles/image-serve/molecule/default/prepare.yml @@ -17,7 +17,7 @@ - name: Prepare hosts: all - gather_facts: no + gather_facts: false tasks: - name: install hiera diff --git a/roles/image-serve/tasks/main.yaml b/roles/image-serve/tasks/main.yaml index eeb52352b..7ce364241 100644 --- a/roles/image-serve/tasks/main.yaml +++ b/roles/image-serve/tasks/main.yaml @@ -10,7 +10,7 @@ path: "{{ tripleo_undercloud_conf_file }}" section: DEFAULT key: local_ip - ignore_missing_file: True + ignore_missing_file: true register: local_ip - name: Set container registry host diff --git a/roles/image-serve/vars/main.yml b/roles/image-serve/vars/main.yml index b546b9599..e8a11f1a4 100644 --- a/roles/image-serve/vars/main.yml +++ b/roles/image-serve/vars/main.yml @@ -1,3 +1,4 @@ +--- metadata: name: Image-serve availability description: Verify that image-serve service is ready diff --git a/roles/mysql-open-files-limit/tasks/main.yml b/roles/mysql-open-files-limit/tasks/main.yml index df93dcec1..439fa50ba 100644 --- a/roles/mysql-open-files-limit/tasks/main.yml +++ b/roles/mysql-open-files-limit/tasks/main.yml @@ -9,7 +9,7 @@ "{{ container_cli|default('podman', true) }}" exec -u root $("{{ container_cli|default('podman', true) }}" ps -q --filter "name=mysql|galera-bundle" | head -1) /bin/bash -c 'ulimit -n' - changed_when: False + changed_when: false register: mysqld_open_files_limit - name: Test the open-files-limit value diff --git a/roles/neutron-sanity-check/tasks/main.yml b/roles/neutron-sanity-check/tasks/main.yml index 19081a17d..d062946f8 100644 --- a/roles/neutron-sanity-check/tasks/main.yml +++ b/roles/neutron-sanity-check/tasks/main.yml @@ -7,24 +7,24 @@ - when: "'Undercloud' in group_names" block: - - name: Get the path of tripleo undercloud config file - become: true - hiera: - name: "tripleo_undercloud_conf_file" + - name: Get the path of tripleo undercloud config file + become: true + hiera: + name: "tripleo_undercloud_conf_file" - - name: Get the Container CLI from the undercloud.conf file - become: true - validations_read_ini: - path: "{{ tripleo_undercloud_conf_file }}" - section: DEFAULT - key: container_cli - ignore_missing_file: true - register: container_cli + - name: Get the Container CLI from the undercloud.conf file + become: true + validations_read_ini: + path: "{{ tripleo_undercloud_conf_file }}" + section: DEFAULT + key: container_cli + ignore_missing_file: true + register: container_cli - - name: Set uc_container_cli and container_name for the Undercloud - set_fact: - uc_container_cli: "{{ container_cli.value|default('podman', true) }}" - container_name: "neutron_ovs_agent" + - name: Set uc_container_cli and container_name for the Undercloud + set_fact: + uc_container_cli: "{{ container_cli.value|default('podman', true) }}" + container_name: "neutron_ovs_agent" - name: Run neutron-sanity-check command: > @@ -35,7 +35,7 @@ become: true register: nsc_return ignore_errors: true - changed_when: False + changed_when: false - name: Detect errors set_fact: diff --git a/roles/no-op-firewall-nova-driver/molecule/default/playbook.yml b/roles/no-op-firewall-nova-driver/molecule/default/playbook.yml index 8f3b81706..77beec6ed 100644 --- a/roles/no-op-firewall-nova-driver/molecule/default/playbook.yml +++ b/roles/no-op-firewall-nova-driver/molecule/default/playbook.yml @@ -17,7 +17,7 @@ - name: Converge hosts: all - gather_facts: no + gather_facts: false vars: nova_conf_path: "/nova.conf" @@ -48,7 +48,7 @@ section: DEFAULT option: firewall_driver value: CHANGEME - backup: yes + backup: true - include_role: name: no-op-firewall-nova-driver diff --git a/roles/no-op-firewall-nova-driver/vars/main.yml b/roles/no-op-firewall-nova-driver/vars/main.yml index 960361ae4..e86bfaa37 100644 --- a/roles/no-op-firewall-nova-driver/vars/main.yml +++ b/roles/no-op-firewall-nova-driver/vars/main.yml @@ -5,4 +5,4 @@ metadata: When using Neutron, the `firewall_driver` option in Nova must be set to `NoopFirewallDriver`. groups: - - post-deployment + - post-deployment diff --git a/roles/node-health/tasks/main.yml b/roles/node-health/tasks/main.yml index ee27468b0..fc3855b67 100644 --- a/roles/node-health/tasks/main.yml +++ b/roles/node-health/tasks/main.yml @@ -6,7 +6,7 @@ - name: Ping all overcloud nodes icmp_ping: - host: "{{ item }}" + host: "{{ item }}" with_items: "{{ oc_ips.results | map(attribute='ansible_facts.ansible_host') | list }}" ignore_errors: true register: ping_results diff --git a/roles/nova-event-callback/tasks/main.yml b/roles/nova-event-callback/tasks/main.yml index d2924cd8a..557d390cf 100644 --- a/roles/nova-event-callback/tasks/main.yml +++ b/roles/nova-event-callback/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Get VIF Plugging setting values from nova.conf - become: True + become: true validations_read_ini: path: "{{ nova_config_file }}" section: DEFAULT @@ -21,14 +21,14 @@ with_items: "{{ nova_config_result.results }}" - name: Get auth_url value from hiera - become: True + become: true command: hiera -c /etc/puppet/hiera.yaml neutron::server::notifications::auth_url - ignore_errors: True - changed_when: False + ignore_errors: true + changed_when: false register: auth_url - name: Get auth_url value from neutron.conf - become: True + become: true validations_read_ini: path: "{{ neutron_config_file }}" section: nova @@ -45,7 +45,7 @@ failed_when: "neutron_auth_url_result.value != auth_url.stdout" - name: Get Notify Nova settings values from neutron.conf - become: True + become: true validations_read_ini: path: "{{ neutron_config_file }}" section: DEFAULT @@ -63,7 +63,7 @@ with_items: "{{ neutron_notify_nova_result.results }}" - name: Get Tenant Name setting value from neutron.conf - become: True + become: true validations_read_ini: path: "{{ neutron_config_file }}" section: nova diff --git a/roles/nova-status/molecule/default/playbook.yml b/roles/nova-status/molecule/default/playbook.yml index e02a6e24f..a9c1ca31a 100644 --- a/roles/nova-status/molecule/default/playbook.yml +++ b/roles/nova-status/molecule/default/playbook.yml @@ -17,7 +17,7 @@ - name: Converge hosts: all - gather_facts: no + gather_facts: false tasks: - name: working detection diff --git a/roles/nova-status/molecule/default/prepare.yml b/roles/nova-status/molecule/default/prepare.yml index f683a7ab9..e6cddc634 100644 --- a/roles/nova-status/molecule/default/prepare.yml +++ b/roles/nova-status/molecule/default/prepare.yml @@ -17,7 +17,7 @@ - name: Prepare hosts: all - gather_facts: no + gather_facts: false tasks: - name: Populate successful podman CLI diff --git a/roles/nova-status/tasks/main.yml b/roles/nova-status/tasks/main.yml index 2b9d0540d..5f26b8173 100644 --- a/roles/nova-status/tasks/main.yml +++ b/roles/nova-status/tasks/main.yml @@ -7,7 +7,7 @@ - name: Check nova upgrade status become: true command: "{{ container_cli }} exec -u root nova_api nova-status upgrade check" - changed_when: False + changed_when: false register: nova_upgrade_check - name: Warn if at least one check encountered an issue diff --git a/roles/ntp/tasks/main.yml b/roles/ntp/tasks/main.yml index ec761ba08..36c67faec 100644 --- a/roles/ntp/tasks/main.yml +++ b/roles/ntp/tasks/main.yml @@ -1,26 +1,26 @@ --- - name: Get if chrony is enabled - become: True + become: true hiera: name: "chrony_enabled" - when: chrony_enabled|bool block: - - name: Populate service facts - service_facts: # needed to make yaml happy + - name: Populate service facts + service_facts: # needed to make yaml happy - - name: Fail if chronyd service is not running - fail: - msg: "Chronyd service is not running" - when: "ansible_facts.services['chronyd.service'].state != 'running'" + - name: Fail if chronyd service is not running + fail: + msg: "Chronyd service is not running" + when: "ansible_facts.services['chronyd.service'].state != 'running'" - - name: Run chronyc - become: True - command: chronyc -a 'burst 4/4' - changed_when: False + - name: Run chronyc + become: true + command: chronyc -a 'burst 4/4' + changed_when: false +# ntpstat returns 0 if synchronised and non-zero otherwise: - name: Run ntpstat - # ntpstat returns 0 if synchronised and non-zero otherwise: command: ntpstat - changed_when: False + changed_when: false when: not chrony_enabled|bool diff --git a/roles/ntp/vars/main.yml b/roles/ntp/vars/main.yml index 3c3788142..992ea6f9b 100644 --- a/roles/ntp/vars/main.yml +++ b/roles/ntp/vars/main.yml @@ -7,4 +7,4 @@ metadata: The deployment should configure and run chronyd. This validation verifies that it is indeed running and connected to an NTP server on all nodes. groups: - - post-deployment + - post-deployment diff --git a/roles/openshift-on-openstack/defaults/main.yml b/roles/openshift-on-openstack/defaults/main.yml index 8a81304db..55318dd52 100644 --- a/roles/openshift-on-openstack/defaults/main.yml +++ b/roles/openshift-on-openstack/defaults/main.yml @@ -9,5 +9,5 @@ min_node_ram_testing: 4096 # Minimum ram per node for testing min_node_disk_testing: 40 # Minimum disk per node for testing min_node_ram_prod: 16384 # Minimum ram per node for production min_node_disk_prod: 42 # Minimum disk per node for production -resource_reqs_testing: False -resource_reqs_prod: False +resource_reqs_testing: false +resource_reqs_prod: false diff --git a/roles/openshift-on-openstack/tasks/openshift-hw-requirements.yaml b/roles/openshift-on-openstack/tasks/openshift-hw-requirements.yaml index 1689c683d..9f47b9475 100644 --- a/roles/openshift-on-openstack/tasks/openshift-hw-requirements.yaml +++ b/roles/openshift-on-openstack/tasks/openshift-hw-requirements.yaml @@ -23,7 +23,7 @@ domain: id: default password: "{{ overcloud_admin_password }}" - return_content: yes + return_content: true status_code: 201 register: keystone_result no_log: true @@ -53,7 +53,7 @@ headers: X-Auth-Token: "{{ auth_token }}" Accept: application/vnd.openstack.compute.v2.1+json - return_content: yes + return_content: true follow_redirects: all register: flavors_result_testing @@ -64,7 +64,7 @@ headers: X-Auth-Token: "{{ auth_token }}" Accept: application/vnd.openstack.compute.v2.1+json - return_content: yes + return_content: true follow_redirects: all register: flavors_result_prod @@ -89,7 +89,7 @@ headers: X-Auth-Token: "{{ auth_token }}" Accept: application/vnd.openstack.compute.v2.1+json - return_content: yes + return_content: true follow_redirects: all register: hypervisors_result @@ -116,7 +116,7 @@ method: GET headers: X-Auth-Token: "{{ auth_token }}" - return_content: yes + return_content: true follow_redirects: all register: images diff --git a/roles/openshift-on-openstack/tasks/openshift-nw-requirements.yaml b/roles/openshift-on-openstack/tasks/openshift-nw-requirements.yaml index a351a1332..3050589e6 100644 --- a/roles/openshift-on-openstack/tasks/openshift-nw-requirements.yaml +++ b/roles/openshift-on-openstack/tasks/openshift-nw-requirements.yaml @@ -1,7 +1,7 @@ --- - name: Set fact to identify if the overcloud was deployed set_fact: - overcloud_deployed: "{{ groups['overcloud'] is defined }}" + overcloud_deployed: "{{ groups['overcloud'] is defined }}" - name: Warn if no overcloud deployed yet warn: @@ -12,62 +12,62 @@ - when: overcloud_deployed|bool block: - # Get auth token and service catalog from Keystone and extract service urls. - - name: Get token and catalog from Keystone - uri: - url: "{{ overcloud_keystone_url - | urlsplit('scheme') }}://{{ overcloud_keystone_url - | urlsplit('netloc') }}/v3/auth/tokens" - method: POST - body_format: json - body: - auth: - scope: - project: - name: admin - domain: - id: default - identity: - methods: - - password - password: - user: + # Get auth token and service catalog from Keystone and extract service urls. + - name: Get token and catalog from Keystone + uri: + url: "{{ overcloud_keystone_url + | urlsplit('scheme') }}://{{ overcloud_keystone_url + | urlsplit('netloc') }}/v3/auth/tokens" + method: POST + body_format: json + body: + auth: + scope: + project: name: admin domain: id: default - password: "{{ overcloud_admin_password }}" - return_content: yes - status_code: 201 - register: keystone_result - no_log: true - when: overcloud_keystone_url|default('') + identity: + methods: + - password + password: + user: + name: admin + domain: + id: default + password: "{{ overcloud_admin_password }}" + return_content: true + status_code: 201 + register: keystone_result + no_log: true + when: overcloud_keystone_url|default('') - - name: Set auth token - set_fact: token="{{ keystone_result.x_subject_token }}" + - name: Set auth token + set_fact: token="{{ keystone_result.x_subject_token }}" - - name: Get Neutron URL from catalog - set_fact: neutron_url="{{ keystone_result.json.token - | json_query("catalog[?name=='neutron'].endpoints") - | first - | selectattr('interface', 'equalto', 'public') - | map(attribute='url') | first }}" + - name: Get Neutron URL from catalog + set_fact: neutron_url="{{ keystone_result.json.token + | json_query("catalog[?name=='neutron'].endpoints") + | first + | selectattr('interface', 'equalto', 'public') + | map(attribute='url') | first }}" - # Get overcloud networks from Neutron and check if there is - # a network with a common name for external networks. - - name: Get networks from Neutron - uri: - url: "{{ neutron_url }}/v2.0/networks?router:external=true" - method: GET - headers: - X-Auth-Token: "{{ token }}" - return_content: yes - follow_redirects: all - register: networks_result + # Get overcloud networks from Neutron and check if there is + # a network with a common name for external networks. + - name: Get networks from Neutron + uri: + url: "{{ neutron_url }}/v2.0/networks?router:external=true" + method: GET + headers: + X-Auth-Token: "{{ token }}" + return_content: true + follow_redirects: all + register: networks_result - - name: Warn if there are no matching networks - warn: - msg: | - No external network found. It is strongly recommended that you - configure an external Neutron network with a floating IP address - pool. - when: networks_result.json.networks | length == 0 + - name: Warn if there are no matching networks + warn: + msg: | + No external network found. It is strongly recommended that you + configure an external Neutron network with a floating IP address + pool. + when: networks_result.json.networks | length == 0 diff --git a/roles/openstack-endpoints/tasks/main.yml b/roles/openstack-endpoints/tasks/main.yml index 3f3bd4b8d..d59d6a43f 100644 --- a/roles/openstack-endpoints/tasks/main.yml +++ b/roles/openstack-endpoints/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Set fact to identify if the overcloud was deployed set_fact: - overcloud_deployed: "{{ groups['overcloud'] is defined }}" + overcloud_deployed: "{{ groups['overcloud'] is defined }}" # Check that the Horizon endpoint exists - name: Fail if the HorizonPublic endpoint is not defined @@ -30,7 +30,7 @@ # Check that we can obtain an auth token from horizon - name: Check Keystone - no_log: True + no_log: true uri: url: "{{ overcloud_keystone_url | urlsplit('scheme') }}://{{ overcloud_keystone_url | urlsplit('netloc') }}/v3/auth/tokens" method: POST @@ -46,7 +46,7 @@ domain: name: Default password: "{{ overcloud_admin_password }}" - return_content: yes + return_content: true status_code: 201 register: auth_token when: overcloud_keystone_url|default('') diff --git a/roles/ovs-dpdk-pmd/tasks/main.yml b/roles/ovs-dpdk-pmd/tasks/main.yml index 4317c5f9e..87b522148 100644 --- a/roles/ovs-dpdk-pmd/tasks/main.yml +++ b/roles/ovs-dpdk-pmd/tasks/main.yml @@ -1,12 +1,12 @@ --- - name: Get OVS DPDK PMD cores mask value become_method: sudo - become: True + become: true register: pmd_cpu_mask command: ovs-vsctl --no-wait get Open_vSwitch . other_config:pmd-cpu-mask - changed_when: False + changed_when: false - name: Run OVS DPDK PMD cores check - become: True + become: true ovs_dpdk_pmd_cpus_check: pmd_cpu_mask: "{{ pmd_cpu_mask.stdout }}" diff --git a/roles/pacemaker-status/tasks/main.yml b/roles/pacemaker-status/tasks/main.yml index 8f412fb12..27840fb80 100644 --- a/roles/pacemaker-status/tasks/main.yml +++ b/roles/pacemaker-status/tasks/main.yml @@ -1,10 +1,10 @@ --- - name: Check pacemaker service is running - become: True + become: true command: "/usr/bin/systemctl show pacemaker --property ActiveState" register: check_service - changed_when: False - ignore_errors: True + changed_when: false + ignore_errors: true - when: "check_service.stdout == 'ActiveState=active'" block: @@ -12,7 +12,7 @@ become: true command: pcs status xml register: pcs_status - changed_when: False + changed_when: false - name: Check pacemaker status pacemaker: status: "{{ pcs_status.stdout }}" diff --git a/roles/rabbitmq-limits/molecule/default/playbook.yml b/roles/rabbitmq-limits/molecule/default/playbook.yml index 41e928253..4a107517f 100644 --- a/roles/rabbitmq-limits/molecule/default/playbook.yml +++ b/roles/rabbitmq-limits/molecule/default/playbook.yml @@ -17,7 +17,7 @@ - name: Converge hosts: all - gather_facts: no + gather_facts: false tasks: - name: working detection diff --git a/roles/rabbitmq-limits/molecule/default/prepare.yml b/roles/rabbitmq-limits/molecule/default/prepare.yml index 8c782c78a..6e653795d 100644 --- a/roles/rabbitmq-limits/molecule/default/prepare.yml +++ b/roles/rabbitmq-limits/molecule/default/prepare.yml @@ -17,7 +17,7 @@ - name: Prepare hosts: all - gather_facts: no + gather_facts: false tasks: - name: Populate successful podman CLI diff --git a/roles/repos/molecule/default/playbook.yml b/roles/repos/molecule/default/playbook.yml index ad373cf0a..37c832ce1 100644 --- a/roles/repos/molecule/default/playbook.yml +++ b/roles/repos/molecule/default/playbook.yml @@ -30,7 +30,7 @@ name: faulty description: really faulty repository baseurl: http://this.repository.do-not.exists/like-not-at-all - enabled: yes + enabled: true - name: execute role include_role: @@ -56,7 +56,7 @@ name: faulty-bis description: faulty repository with working DNS baseurl: http://download.fedoraproject.org/pub/fedora/blah - enabled: yes + enabled: true - name: execute role include_role: diff --git a/roles/repos/tasks/main.yml b/roles/repos/tasks/main.yml index b18c9a86a..b9178f67b 100644 --- a/roles/repos/tasks/main.yml +++ b/roles/repos/tasks/main.yml @@ -1,11 +1,11 @@ --- - name: List repositories - become: True + become: true shell: | {{ ansible_pkg_mgr }} repolist enabled -v 2>&1 || exit 0 args: - warn: no - changed_when: False + warn: false + changed_when: false register: repositories - name: Fail if we detect error in repolist output @@ -16,7 +16,7 @@ repositories.stdout is regex('(cannot|could not|failure)', ignorecase=True) - name: Find repository IDs - changed_when: False + changed_when: false shell: 'echo "{{ repositories.stdout }}" | grep Repo-id | sed "s/Repo-id.*://" | tr -d " "' register: repository_ids @@ -25,5 +25,5 @@ msg: Found unwanted repository {{ item.0 }} enabled when: item.0 == item.1 with_nested: - - [ 'epel/x86_64' ] + - ['epel/x86_64'] - "{{ repository_ids.stdout_lines }}" diff --git a/roles/service-status/meta/main.yml b/roles/service-status/meta/main.yml deleted file mode 100644 index b3d5ccc9f..000000000 --- a/roles/service-status/meta/main.yml +++ /dev/null @@ -1,27 +0,0 @@ -galaxy_info: - author: TripleO Validations Team - company: Red Hat - license: Apache - min_ansible_version: 2.4 - - platforms: - - name: CentOS - versions: - - 7 - - name: RHEL - versions: - - 7 - - categories: - - cloud - - baremetal - - system - galaxy_tags: [] - # List tags for your role here, one per line. A tag is a keyword that describes - # and categorizes the role. Users find roles by searching for tags. Be sure to - # remove the '[]' above, if you add tags to this list. - # - # NOTE: A tag is limited to a single word comprised of alphanumeric characters. - # Maximum 20 tags per role. - -dependencies: [] diff --git a/roles/service-status/tasks/main.yaml b/roles/service-status/tasks/main.yaml index 756841495..e1e0e0975 100644 --- a/roles/service-status/tasks/main.yaml +++ b/roles/service-status/tasks/main.yaml @@ -4,7 +4,7 @@ systemctl list-units --failed --plain --no-legend --no-pager | awk '{print $1}' register: systemd_status - changed_when: False + changed_when: false - name: Fails if we find failed units assert: diff --git a/roles/stonith-exists/molecule/default/prepare.yml b/roles/stonith-exists/molecule/default/prepare.yml index c47fc2079..eba0c86ac 100644 --- a/roles/stonith-exists/molecule/default/prepare.yml +++ b/roles/stonith-exists/molecule/default/prepare.yml @@ -17,7 +17,7 @@ - name: Prepare hosts: all - gather_facts: no + gather_facts: false tasks: - name: Populate successful stonith diff --git a/roles/stonith-exists/tasks/main.yml b/roles/stonith-exists/tasks/main.yml index 1c23c4249..4277134ec 100644 --- a/roles/stonith-exists/tasks/main.yml +++ b/roles/stonith-exists/tasks/main.yml @@ -1,13 +1,13 @@ --- - name: Check if we are in HA cluster environment - become: True + become: true register: pcs_cluster_status command: pcs cluster status failed_when: false changed_when: false - name: Get all currently configured stonith devices - become: True + become: true command: "pcs stonith" register: stonith_devices changed_when: false diff --git a/roles/stonith-exists/vars/main.yml b/roles/stonith-exists/vars/main.yml index 61ac01ad8..34a26d153 100644 --- a/roles/stonith-exists/vars/main.yml +++ b/roles/stonith-exists/vars/main.yml @@ -5,6 +5,7 @@ metadata: Verify that stonith devices are configured for your OpenStack Platform HA cluster. We don't configure stonith device with TripleO Installer. Because the hardware configuration may be differ in each environment and requires different fence agents. - How to configure fencing please read https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes + How to configure fencing please read + https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes groups: - post-deployment diff --git a/roles/tls-everywhere/tasks/common.yaml b/roles/tls-everywhere/tasks/common.yaml index fca236fb5..1b2ed4d98 100644 --- a/roles/tls-everywhere/tasks/common.yaml +++ b/roles/tls-everywhere/tasks/common.yaml @@ -127,19 +127,19 @@ path: "/etc/ipa/default.conf" section: global key: realm - ignore_missing_file: False + ignore_missing_file: false register: ipa_realm - check_mode: no + check_mode: false - name: Set fact for IdM/FreeIPA host entry set_fact: host_entry: "{{ ansible_fqdn }}@{{ ipa_realm.value }}" - when: ipa_conf_stat.stat.exists + when: ipa_conf_stat.stat.exists - name: Set fact for IdM/FreeIPA host principal set_fact: host_principal: "host/{{ host_entry }}" - when: ipa_conf_stat.stat.exists + when: ipa_conf_stat.stat.exists # Kerberos keytab related tasks - name: Check for kerberos host keytab @@ -182,7 +182,7 @@ changed_when: false become: true when: krb5_keytab_stat.stat.exists - check_mode: no + check_mode: false - name: Set facts for host principals in /etc/krb5.keytab set_fact: diff --git a/roles/tls-everywhere/tasks/overcloud-post-deployment.yaml b/roles/tls-everywhere/tasks/overcloud-post-deployment.yaml index 37dcb70f4..843c8b793 100644 --- a/roles/tls-everywhere/tasks/overcloud-post-deployment.yaml +++ b/roles/tls-everywhere/tasks/overcloud-post-deployment.yaml @@ -4,7 +4,7 @@ become: true hiera: name: "certmonger_user_enabled" - check_mode: no + check_mode: false - name: Set facts for certmonger user service not enabled set_fact: @@ -36,7 +36,7 @@ become: true changed_when: false register: all_certnames - check_mode: no + check_mode: false # Get status of all certificates and trim the leading whitespaces - name: Get status of all certificates @@ -47,7 +47,7 @@ loop_control: loop_var: certname register: all_cert_status - check_mode: no + check_mode: false - name: Gather certificates that are not in MONITORING status set_fact: diff --git a/roles/tls-everywhere/tasks/pre-deployment-containerized.yaml b/roles/tls-everywhere/tasks/pre-deployment-containerized.yaml index 51768c4fa..68d6c4b3e 100644 --- a/roles/tls-everywhere/tasks/pre-deployment-containerized.yaml +++ b/roles/tls-everywhere/tasks/pre-deployment-containerized.yaml @@ -3,7 +3,7 @@ - name: Verify that join.conf exists (containzerized) command: "{{ command_prefix }} exec novajoin_server test -e /etc/novajoin/join.conf" register: containerized_join_conf_st - changed_when: False + changed_when: false become: true - name: Fail if join.conf is not present (containerized) @@ -21,9 +21,9 @@ path: "{{ joinconf_location }}" section: DEFAULT key: keytab - ignore_missing_file: True + ignore_missing_file: true register: novajoin_keytab_path - check_mode: no + check_mode: false - name: Get novajoin server port from join.conf become: true @@ -31,9 +31,9 @@ path: "{{ joinconf_location }}" section: DEFAULT key: join_listen_port - ignore_missing_file: True + ignore_missing_file: true register: novajoin_server_port - check_mode: no + check_mode: false - name: Get novajoin server host from join.conf become: true @@ -41,9 +41,9 @@ path: "{{ joinconf_location }}" section: DEFAULT key: join_listen - ignore_missing_file: True + ignore_missing_file: true register: novajoin_server_host - check_mode: no + check_mode: false ### verify that the keytab and principal are usable ### # TODO(alee): We need to move this to a subfile so we can run @@ -91,7 +91,7 @@ command: "{{ command_prefix }} exec novajoin_server kdestroy -c {{ item }}" with_items: "{{ temp_krb_caches }}" ignore_errors: false - changed_when: False + changed_when: false become: true when: - containerized_novajoin_krb5_keytab_stat.rc == 0 diff --git a/roles/tls-everywhere/tasks/pre-deployment-non-containerized.yaml b/roles/tls-everywhere/tasks/pre-deployment-non-containerized.yaml index 549083e8b..a1f203542 100644 --- a/roles/tls-everywhere/tasks/pre-deployment-non-containerized.yaml +++ b/roles/tls-everywhere/tasks/pre-deployment-non-containerized.yaml @@ -20,9 +20,9 @@ path: "{{ joinconf_location }}" section: DEFAULT key: keytab - ignore_missing_file: True + ignore_missing_file: true register: novajoin_keytab_path - check_mode: no + check_mode: false - name: Get novajoin server port from join.conf become: true @@ -30,9 +30,9 @@ path: "{{ joinconf_location }}" section: DEFAULT key: join_listen_port - ignore_missing_file: True + ignore_missing_file: true register: novajoin_server_port - check_mode: no + check_mode: false - name: Get novajoin server host from join.conf become: true @@ -40,9 +40,9 @@ path: "{{ joinconf_location }}" section: DEFAULT key: join_listen - ignore_missing_file: True + ignore_missing_file: true register: novajoin_server_host - check_mode: no + check_mode: false ### verify that the keytab and principal are usable ### # TODO(alee): We need to move this to a subfile so we can run @@ -191,4 +191,3 @@ report_status: "{{ service_running_status }}" report_reason: "{{ service_running_reason }}" report_recommendations: "{{ service_running_recommendations }}" - diff --git a/roles/tls-everywhere/tasks/pre-deployment.yaml b/roles/tls-everywhere/tasks/pre-deployment.yaml index e485a7b76..fd18ba9e0 100644 --- a/roles/tls-everywhere/tasks/pre-deployment.yaml +++ b/roles/tls-everywhere/tasks/pre-deployment.yaml @@ -6,7 +6,7 @@ - name: Get the path of tripleo undercloud config file become: true hiera: name="tripleo_undercloud_conf_file" - check_mode: no + check_mode: false - name: Get the Container CLI from the undercloud.conf file (stein+) become: true @@ -27,25 +27,25 @@ - not podman_install|bool - not docker_install|bool block: - - name: Determine if Docker is enabled and has containers running - command: docker ps -q - register: docker_ps - become: true - ignore_errors: true + - name: Determine if Docker is enabled and has containers running + command: docker ps -q + register: docker_ps + become: true + ignore_errors: true - - name: Set container facts - set_fact: - docker_install: true - when: not docker_ps.stdout|length == 0 + - name: Set container facts + set_fact: + docker_install: true + when: not docker_ps.stdout|length == 0 - - name: Set container facts - set_fact: - docker_install: false - when: docker_ps.stdout|length == 0 + - name: Set container facts + set_fact: + docker_install: false + when: docker_ps.stdout|length == 0 - - name: Set container facts - set_fact: - podman_install: false + - name: Set container facts + set_fact: + podman_install: false - name: Set podman command prefix set_fact: diff --git a/roles/tls-everywhere/tasks/prep.yaml b/roles/tls-everywhere/tasks/prep.yaml index d2e653e2d..5cdab1149 100644 --- a/roles/tls-everywhere/tasks/prep.yaml +++ b/roles/tls-everywhere/tasks/prep.yaml @@ -3,7 +3,7 @@ become: true hiera: name: "tripleo_undercloud_conf_file" - check_mode: no + check_mode: false - name: Verify that nameservers are set in undercloud.conf become: true @@ -11,9 +11,9 @@ path: "{{ tripleo_undercloud_conf_file }}" section: DEFAULT key: undercloud_nameservers - ignore_missing_file: False + ignore_missing_file: false register: undercloud_nameservers - check_mode: no + check_mode: false - name: Check that nameservers point to IdM/FreeIPA set_fact: @@ -52,7 +52,7 @@ shell: host {{ undercloud_conf_dns_query }} | awk '{print $5}' register: host_from_ip_reg changed_when: false - check_mode: no + check_mode: false - name: Get domain as set in undercloud.conf become: true @@ -60,9 +60,9 @@ path: "{{ tripleo_undercloud_conf_file }}" section: DEFAULT key: overcloud_domain_name - ignore_missing_file: False + ignore_missing_file: false register: undercloud_overcloud_domain - check_mode: no + check_mode: false - name: Set facts undercloud.conf domain is not configured correctly set_fact: @@ -96,9 +96,9 @@ path: "{{ tripleo_undercloud_conf_file }}" section: DEFAULT key: enable_novajoin - ignore_missing_file: False + ignore_missing_file: false register: undercloud_enable_novajoin - check_mode: no + check_mode: false - name: Set facts undercloud.conf enable novajoin is disabled set_fact: diff --git a/roles/undercloud-debug/defaults/main.yml b/roles/undercloud-debug/defaults/main.yml index 765049d9f..c2a23cb39 100644 --- a/roles/undercloud-debug/defaults/main.yml +++ b/roles/undercloud-debug/defaults/main.yml @@ -1,6 +1,6 @@ --- -debug_check: True +debug_check: true services_conf_files: - /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf - /var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf diff --git a/roles/undercloud-debug/molecule/default/playbook.yml b/roles/undercloud-debug/molecule/default/playbook.yml index f87dddf11..3b44fcbb7 100644 --- a/roles/undercloud-debug/molecule/default/playbook.yml +++ b/roles/undercloud-debug/molecule/default/playbook.yml @@ -17,7 +17,7 @@ - name: Converge hosts: all - gather_facts: no + gather_facts: false vars: services_conf_files: @@ -29,13 +29,13 @@ dest: /tmp/debug_true_1.conf content: | [DEFAULT] - debug: True + debug: true - name: Checking good value include_role: name: undercloud-debug vars: - debug_check: False + debug_check: false - name: Should fail due to bad value block: diff --git a/roles/undercloud-debug/tasks/main.yml b/roles/undercloud-debug/tasks/main.yml index 69e8cfcf1..64922b5c8 100644 --- a/roles/undercloud-debug/tasks/main.yml +++ b/roles/undercloud-debug/tasks/main.yml @@ -1,11 +1,11 @@ --- - name: Check the services for debug flag - become: True + become: true validations_read_ini: path: "{{ item }}" section: DEFAULT key: debug - ignore_missing_file: True + ignore_missing_file: true register: config_result with_items: "{{ services_conf_files }}" failed_when: "debug_check|bool == config_result.value|bool" diff --git a/roles/undercloud-disk-space/defaults/main.yml b/roles/undercloud-disk-space/defaults/main.yml index 606e060f9..1484cfdb4 100644 --- a/roles/undercloud-disk-space/defaults/main.yml +++ b/roles/undercloud-disk-space/defaults/main.yml @@ -1,9 +1,8 @@ --- - volumes: - - {mount: /var/lib/docker, min_size: 10} + - {mount: /var/lib/docker, min_size: 10} - {mount: /var/lib/config-data, min_size: 3} - - {mount: /var/log, min_size: 3} - - {mount: /usr, min_size: 5} - - {mount: /var, min_size: 20} - - {mount: /, min_size: 25} + - {mount: /var/log, min_size: 3} + - {mount: /usr, min_size: 5} + - {mount: /var, min_size: 20} + - {mount: /, min_size: 25} diff --git a/roles/undercloud-disk-space/tasks/main.yml b/roles/undercloud-disk-space/tasks/main.yml index e04ccef3f..67c989bac 100644 --- a/roles/undercloud-disk-space/tasks/main.yml +++ b/roles/undercloud-disk-space/tasks/main.yml @@ -25,13 +25,13 @@ shell: df -B1 {{ item.mount }} --output=avail | sed 1d register: volume_size with_items: "{{ existing_volumes }}" - changed_when: False + changed_when: false - name: Fail if any of the volumes are too small fail: - msg: > - Minimum free space required for {{ item.item.mount }}: {{ item.item.min_size }}G - - current free space: {{ (item.stdout|int / const_bytes_in_gb|int) |round(1) }}G + msg: > + Minimum free space required for {{ item.item.mount }}: {{ item.item.min_size }}G + - current free space: {{ (item.stdout|int / const_bytes_in_gb|int) |round(1) }}G when: > item.stdout|int / const_bytes_in_gb|int < item.item.min_size|int with_items: "{{ volume_size.results }}" diff --git a/roles/undercloud-heat-purge-deleted/molecule/default/molecule.yml b/roles/undercloud-heat-purge-deleted/molecule/default/molecule.yml index 656d826fd..82496abe9 100644 --- a/roles/undercloud-heat-purge-deleted/molecule/default/molecule.yml +++ b/roles/undercloud-heat-purge-deleted/molecule/default/molecule.yml @@ -8,7 +8,7 @@ platforms: - name: centos7 hostname: centos7 image: centos:7 - override_command: True + override_command: true command: python -m SimpleHTTPServer 8787 pkg_extras: python-setuptools python-enum34 python-netaddr epel-release ruby PyYAML easy_install: @@ -20,7 +20,7 @@ platforms: - name: fedora28 hostname: fedora28 image: fedora:28 - override_command: True + override_command: true command: python3 -m http.server 8787 pkg_extras: python*-setuptools python*-enum python*-netaddr ruby PyYAML environment: diff --git a/roles/undercloud-heat-purge-deleted/molecule/default/playbook.yml b/roles/undercloud-heat-purge-deleted/molecule/default/playbook.yml index aa82c166a..c9618ee8f 100644 --- a/roles/undercloud-heat-purge-deleted/molecule/default/playbook.yml +++ b/roles/undercloud-heat-purge-deleted/molecule/default/playbook.yml @@ -17,7 +17,7 @@ - name: Converge hosts: all - gather_facts: no + gather_facts: false tasks: - name: working detection diff --git a/roles/undercloud-heat-purge-deleted/molecule/default/prepare.yml b/roles/undercloud-heat-purge-deleted/molecule/default/prepare.yml index ed46780a8..a9cf46a6c 100644 --- a/roles/undercloud-heat-purge-deleted/molecule/default/prepare.yml +++ b/roles/undercloud-heat-purge-deleted/molecule/default/prepare.yml @@ -17,7 +17,7 @@ - name: Prepare hosts: all - gather_facts: no + gather_facts: false tasks: - name: install hiera diff --git a/roles/undercloud-heat-purge-deleted/tasks/main.yml b/roles/undercloud-heat-purge-deleted/tasks/main.yml index e340c9445..3d71a321a 100644 --- a/roles/undercloud-heat-purge-deleted/tasks/main.yml +++ b/roles/undercloud-heat-purge-deleted/tasks/main.yml @@ -18,7 +18,7 @@ set -o pipefail {{ container_cli.value|default('podman', true) }} exec heat_api_cron crontab -l -u heat |grep -v '^#' register: cron_result - changed_when: False + changed_when: false - name: Check heat crontab fail: diff --git a/roles/undercloud-heat-purge-deleted/vars/main.yaml b/roles/undercloud-heat-purge-deleted/vars/main.yaml index b773cb4a1..558c3b0c2 100644 --- a/roles/undercloud-heat-purge-deleted/vars/main.yaml +++ b/roles/undercloud-heat-purge-deleted/vars/main.yaml @@ -6,5 +6,5 @@ metadata: heat database can grow very large. This validation checks that the purge_deleted crontab has been set up. groups: - - pre-upgrade - - pre-deployment + - pre-upgrade + - pre-deployment diff --git a/roles/undercloud-process-count/tasks/main.yml b/roles/undercloud-process-count/tasks/main.yml index 690155e5c..0d691c310 100644 --- a/roles/undercloud-process-count/tasks/main.yml +++ b/roles/undercloud-process-count/tasks/main.yml @@ -16,26 +16,26 @@ - name: Collect the number of running processes per OpenStack service command: "{{ container_cli.value|default('podman', true) }} exec {{ item.container }} pgrep -f -c {{ item.proc }}" become: true - ignore_errors: yes + ignore_errors: true register: "process_count" - changed_when: False + changed_when: false loop: - - {container: "heat_engine", proc: "heat-engine"} - - {container: "ironic_inspector", proc: "ironic-inspector"} - - {container: "ironic_conductor", proc: "ironic-conductor"} - - {container: "nova_api", proc: "nova_api"} - - {container: "nova_scheduler", proc: "nova-scheduler"} - - {container: "nova_conductor", proc: "nova-conductor"} - - {container: "nova_compute", proc: "nova-compute"} - - {container: "glance_api", proc: "glance-api"} - - {container: "swift_proxy", proc: "swift-proxy-server"} - - {container: "swift_object_server", proc: "swift-object-server"} + - {container: "heat_engine", proc: "heat-engine"} + - {container: "ironic_inspector", proc: "ironic-inspector"} + - {container: "ironic_conductor", proc: "ironic-conductor"} + - {container: "nova_api", proc: "nova_api"} + - {container: "nova_scheduler", proc: "nova-scheduler"} + - {container: "nova_conductor", proc: "nova-conductor"} + - {container: "nova_compute", proc: "nova-compute"} + - {container: "glance_api", proc: "glance-api"} + - {container: "swift_proxy", proc: "swift-proxy-server"} + - {container: "swift_object_server", proc: "swift-object-server"} - {container: "swift_container_server", proc: "swift-container-server"} - - {container: "zaqar", proc: "zaqar"} - - {container: "zaqar_websocket", proc: "zaqar-server"} - - {container: "mistral_api", proc: "mistral-server"} - - {container: "mistral_engine", proc: "mistral-server"} - - {container: "mistral_executor", proc: "mistral-server"} + - {container: "zaqar", proc: "zaqar"} + - {container: "zaqar_websocket", proc: "zaqar-server"} + - {container: "mistral_api", proc: "mistral-server"} + - {container: "mistral_engine", proc: "mistral-server"} + - {container: "mistral_executor", proc: "mistral-server"} - name: Create warning messages command: echo "There are {{ item.stdout }} {{ item.item }} processes running. Having more than {{ max_process_count }} risks running out of memory." diff --git a/roles/undercloud-selinux-mode/tasks/main.yml b/roles/undercloud-selinux-mode/tasks/main.yml index f8cf774b4..1cd8733ef 100644 --- a/roles/undercloud-selinux-mode/tasks/main.yml +++ b/roles/undercloud-selinux-mode/tasks/main.yml @@ -1,9 +1,9 @@ --- -- name: Get current SELinux mode +- name: Get current SELinux mode command: getenforce become: true register: sestatus - changed_when: False + changed_when: false - name: Fail if SELinux is not in Enforced mode (RHEL) fail: diff --git a/roles/undercloud-service-status/tasks/main.yml b/roles/undercloud-service-status/tasks/main.yml index da8704df7..8291e81f4 100644 --- a/roles/undercloud-service-status/tasks/main.yml +++ b/roles/undercloud-service-status/tasks/main.yml @@ -4,7 +4,7 @@ become: true with_items: "{{ undercloud_service_list }}" register: "check_services" - changed_when: False + changed_when: false ignore_errors: true - name: Fail if services were not running diff --git a/roles/undercloud-tokenflush/molecule/default/molecule.yml b/roles/undercloud-tokenflush/molecule/default/molecule.yml index 656d826fd..82496abe9 100644 --- a/roles/undercloud-tokenflush/molecule/default/molecule.yml +++ b/roles/undercloud-tokenflush/molecule/default/molecule.yml @@ -8,7 +8,7 @@ platforms: - name: centos7 hostname: centos7 image: centos:7 - override_command: True + override_command: true command: python -m SimpleHTTPServer 8787 pkg_extras: python-setuptools python-enum34 python-netaddr epel-release ruby PyYAML easy_install: @@ -20,7 +20,7 @@ platforms: - name: fedora28 hostname: fedora28 image: fedora:28 - override_command: True + override_command: true command: python3 -m http.server 8787 pkg_extras: python*-setuptools python*-enum python*-netaddr ruby PyYAML environment: diff --git a/roles/undercloud-tokenflush/molecule/default/playbook.yml b/roles/undercloud-tokenflush/molecule/default/playbook.yml index 04d73133b..ec49612f9 100644 --- a/roles/undercloud-tokenflush/molecule/default/playbook.yml +++ b/roles/undercloud-tokenflush/molecule/default/playbook.yml @@ -17,7 +17,7 @@ - name: Converge hosts: all - gather_facts: no + gather_facts: false tasks: - name: working detection diff --git a/roles/undercloud-tokenflush/molecule/default/prepare.yml b/roles/undercloud-tokenflush/molecule/default/prepare.yml index 0fd3fe995..a4d317fba 100644 --- a/roles/undercloud-tokenflush/molecule/default/prepare.yml +++ b/roles/undercloud-tokenflush/molecule/default/prepare.yml @@ -17,7 +17,7 @@ - name: Prepare hosts: all - gather_facts: no + gather_facts: false tasks: - name: install hiera diff --git a/roles/undercloud-tokenflush/tasks/main.yml b/roles/undercloud-tokenflush/tasks/main.yml index 4db7c4580..2dcd54c25 100644 --- a/roles/undercloud-tokenflush/tasks/main.yml +++ b/roles/undercloud-tokenflush/tasks/main.yml @@ -18,7 +18,7 @@ set -o pipefail {{ container_cli.value|default('podman', true) }} exec keystone_cron crontab -l -u keystone |grep -v '^#' register: cron_result - changed_when: False + changed_when: false - name: Check keystone crontab fail: diff --git a/roles/undercloud-tokenflush/vars/main.yaml b/roles/undercloud-tokenflush/vars/main.yaml index 71725c80a..006b66724 100644 --- a/roles/undercloud-tokenflush/vars/main.yaml +++ b/roles/undercloud-tokenflush/vars/main.yaml @@ -6,4 +6,4 @@ metadata: keystone database can grow very large. This validation checks that the keystone token_flush crontab has been set up. groups: - - pre-introspection + - pre-introspection diff --git a/roles/validate-selinux/molecule/default/playbook.yml b/roles/validate-selinux/molecule/default/playbook.yml index 1ec3876fe..c08819a59 100644 --- a/roles/validate-selinux/molecule/default/playbook.yml +++ b/roles/validate-selinux/molecule/default/playbook.yml @@ -17,7 +17,7 @@ - name: Converge hosts: all - gather_facts: no + gather_facts: false vars: validate_selinux_working_dir: '/tmp' diff --git a/roles/validate-selinux/molecule/default/prepare.yml b/roles/validate-selinux/molecule/default/prepare.yml index aa045ab9a..6d8f478ad 100644 --- a/roles/validate-selinux/molecule/default/prepare.yml +++ b/roles/validate-selinux/molecule/default/prepare.yml @@ -17,7 +17,7 @@ - name: Prepare hosts: all - gather_facts: no + gather_facts: false tasks: - name: Populate fake clean auditlog @@ -26,6 +26,7 @@ owner: root mode: 0600 group: root + # yamllint disable rule:line-length content: | type=SERVICE_START msg=audit(1575877870.934:286): pid=1 uid=0 auid=4294967295 ses=4294967295 subj=system_u:system_r:init_t:s0 msg='unit=sssd-kcm comm="systemd" exe="/usr/lib/systemd/systemd" hostname=? addr=? terminal=? res=success'UID="root" AUID="unset" type=SERVICE_STOP msg=audit(1575878320.981:287): pid=1 uid=0 auid=4294967295 ses=4294967295 subj=system_u:system_r:init_t:s0 msg='unit=sssd-kcm comm="systemd" exe="/usr/lib/systemd/systemd" hostname=? addr=? terminal=? res=success'UID="root" AUID="unset" @@ -38,12 +39,14 @@ type=SERVICE_START msg=audit(1575878869.915:306): pid=1 uid=0 auid=4294967295 ses=4294967295 subj=system_u:system_r:init_t:s0 msg='unit=fprintd comm="systemd" exe="/usr/lib/systemd/systemd" hostname=? addr=? terminal=? res=success'UID="root" AUID="unset" type=SERVICE_STOP msg=audit(1575878900.615:312): pid=1 uid=0 auid=4294967295 ses=4294967295 subj=system_u:system_r:init_t:s0 msg='unit=fprintd comm="systemd" exe="/usr/lib/systemd/systemd" hostname=? addr=? terminal=? res=success'UID="root" AUID="unset" + # yamllint enable rule:line-length - name: Populate unclean auditlog copy: dest: /var/log/audit-unclean.log owner: root mode: 0600 group: root + # yamllint disable rule:line-length content: | type=AVC msg=audit(1575534183.234:4933): avc: denied { write } for pid=11266 comm="iptables" path="pipe:[231496]" dev="pipefs" ino=231496 scontext=system_u:system_r:iptables_t:s0 tcontext=system_u:system_r:certmonger_t:s0 tclass=fifo_file permissive=1 type=AVC msg=audit(1575534183.342:4934): avc: denied { write } for pid=11284 comm="iptables" path="pipe:[231496]" dev="pipefs" ino=231496 scontext=system_u:system_r:iptables_t:s0 tcontext=system_u:system_r:certmonger_t:s0 tclass=fifo_file permissive=1 diff --git a/roles/validate-selinux/vars/main.yml b/roles/validate-selinux/vars/main.yml index d5c33ce10..0a9e924d4 100644 --- a/roles/validate-selinux/vars/main.yml +++ b/roles/validate-selinux/vars/main.yml @@ -19,4 +19,4 @@ # vars, items within this path are considered part of the role and not # intended to be modified. -# All variables within this role should have a prefix of "validate-selinux" \ No newline at end of file +# All variables within this role should have a prefix of "validate-selinux" diff --git a/tox.ini b/tox.ini index d90d8aaf3..ee420cb01 100644 --- a/tox.ini +++ b/tox.ini @@ -84,11 +84,12 @@ deps = -r {toxinidir}/test-requirements.txt -r {toxinidir}/molecule-requirements.txt commands = + python '{toxinidir}/tools/validate-files.py' . {[testenv:ansible-lint]commands} + {[testenv:yamllint]commands} {[testenv:bashate]commands} {[testenv:whitespace]commands} {[testenv:shebangs]commands} -# {[testenv:yamllint]commands} [testenv:releasenotes] deps = -r{toxinidir}/doc/requirements.txt diff --git a/zuul.d/playbooks/pre.yml b/zuul.d/playbooks/pre.yml index 7d63a304d..aa46e5ebd 100644 --- a/zuul.d/playbooks/pre.yml +++ b/zuul.d/playbooks/pre.yml @@ -17,7 +17,7 @@ . {{ ansible_user_dir }}/test-python/bin/activate {{ ansible_user_dir }}/{{ zuul.project.src_dir }}/scripts/bindep-install become: true - changed_when: False + changed_when: false - name: Setup test-python pip: diff --git a/zuul.d/playbooks/run.yml b/zuul.d/playbooks/run.yml index a0a04e8d4..07006f41e 100644 --- a/zuul.d/playbooks/run.yml +++ b/zuul.d/playbooks/run.yml @@ -16,4 +16,4 @@ args: chdir: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/roles/{{ tripleo_validations_role_name }}" executable: /bin/bash - changed_when: False + changed_when: false