Validate the playboooks metadata structure
This patch add a custom ansible-lint rule to enforce the structure of the validations playbooks: *ValidationHasMetadataRule*: Throw an ansible-lint error if: - the *hosts* key is empty or not found, - *vars* dictionary is missing, - *metadata* dict is missing in *vars* - *name*/*description*/*groups* keys are missing or found with a wrong data type - the validation belongs to one or several groups NOT in the official list of groups (groups.yaml) *YAMLLINT*: - Enable yamllint check in tox linters - WIP Fix detected yamllint errors Change-Id: If233286aa9f4299f02f13dc34f1e8c05d89df851 Signed-off-by: Gael Chamoulaud <gchamoul@redhat.com>
This commit is contained in:
parent
bf65faa2c3
commit
e50e1a067d
@ -2,6 +2,8 @@ exclude_paths:
|
||||
- releasenotes/
|
||||
parseable: true
|
||||
quiet: false
|
||||
rulesdir:
|
||||
- .ansible-lint_rules/
|
||||
skip_list:
|
||||
# Lines should be no longer than 120 chars.
|
||||
- '204'
|
||||
|
138
.ansible-lint_rules/ValidationHasMetadataRule.py
Normal file
138
.ansible-lint_rules/ValidationHasMetadataRule.py
Normal file
@ -0,0 +1,138 @@
|
||||
import os
|
||||
import six
|
||||
import yaml
|
||||
|
||||
from ansiblelint import AnsibleLintRule
|
||||
|
||||
|
||||
class ValidationHasMetadataRule(AnsibleLintRule):
|
||||
id = '750'
|
||||
shortdesc = 'Validation playbook must have mandatory metadata'
|
||||
|
||||
info = """
|
||||
---
|
||||
- hosts: localhost
|
||||
vars:
|
||||
metadata:
|
||||
name: Validation Name
|
||||
description: >
|
||||
A full description of the validation.
|
||||
groups:
|
||||
- group1
|
||||
- group2
|
||||
- group3
|
||||
"""
|
||||
|
||||
description = (
|
||||
"The Validation playbook must have mandatory metadata:\n"
|
||||
"```{}```".format(info)
|
||||
)
|
||||
|
||||
severity = 'HIGH'
|
||||
tags = ['metadata']
|
||||
|
||||
no_vars_found = "The validation playbook must contain a 'vars' dictionary"
|
||||
no_meta_found = (
|
||||
"The validation playbook must contain "
|
||||
"a 'metadata' dictionary under vars"
|
||||
)
|
||||
no_groups_found = \
|
||||
"*metadata* should contain a list of group (groups)"
|
||||
|
||||
unknown_groups_found = (
|
||||
"Unkown group(s) '{}' found! "
|
||||
"The official list of groups are '{}'. "
|
||||
"To add a new validation group, please add it in the groups.yaml "
|
||||
"file at the root of the tripleo-validations project."
|
||||
)
|
||||
|
||||
def get_groups(self):
|
||||
"""Returns a list of group names supported by
|
||||
tripleo-validations by reading 'groups.yaml'
|
||||
file located in the base direcotry.
|
||||
"""
|
||||
results = []
|
||||
|
||||
grp_file_path = os.path.abspath('groups.yaml')
|
||||
|
||||
with open(grp_file_path, "r") as grps:
|
||||
contents = yaml.safe_load(grps)
|
||||
|
||||
for grp_name, grp_desc in sorted(contents.items()):
|
||||
results.append(grp_name)
|
||||
|
||||
return results
|
||||
|
||||
def matchplay(self, file, data):
|
||||
results = []
|
||||
path = file['path']
|
||||
|
||||
if file['type'] == 'playbook':
|
||||
if path.startswith("playbooks/") or \
|
||||
path.find("tripleo-validations/playbooks/") > 0:
|
||||
|
||||
# *hosts* line check
|
||||
hosts = data.get('hosts', None)
|
||||
if not hosts:
|
||||
return [({
|
||||
path: data
|
||||
}, "No *hosts* key found in the playbook")]
|
||||
|
||||
# *vars* lines check
|
||||
vars = data.get('vars', None)
|
||||
if not vars:
|
||||
return [({
|
||||
path: data
|
||||
}, self.no_vars_found)]
|
||||
else:
|
||||
if not isinstance(vars, dict):
|
||||
return [({path: data}, '*vars* should be a dictionary')]
|
||||
|
||||
# *metadata* lines check
|
||||
metadata = data['vars'].get('metadata', None)
|
||||
if metadata:
|
||||
if not isinstance(metadata, dict):
|
||||
return [(
|
||||
{path: data},
|
||||
'*metadata* should be a dictionary')]
|
||||
else:
|
||||
return [({path: data}, self.no_meta_found)]
|
||||
|
||||
# *metadata>[name|description] lines check
|
||||
for info in ['name', 'description']:
|
||||
if not metadata.get(info, None):
|
||||
results.append((
|
||||
{path: data},
|
||||
'*metadata* should contain a %s key' % info))
|
||||
continue
|
||||
if not isinstance(metadata.get(info),
|
||||
six.string_types):
|
||||
results.append((
|
||||
{path: data},
|
||||
'*%s* should be a string' % info))
|
||||
|
||||
# *metadata>groups* lines check
|
||||
if not metadata.get('groups', None):
|
||||
results.append((
|
||||
{path: data},
|
||||
self.no_groups_found))
|
||||
else:
|
||||
if not isinstance(metadata.get('groups'), list):
|
||||
results.append((
|
||||
{path: data},
|
||||
'*groups* should be a list'))
|
||||
else:
|
||||
groups = metadata.get('groups')
|
||||
group_list = self.get_groups()
|
||||
unknown_groups_list = list(
|
||||
set(groups) - set(group_list))
|
||||
if unknown_groups_list:
|
||||
results.append((
|
||||
{path: data},
|
||||
self.unknown_groups_found.format(
|
||||
unknown_groups_list,
|
||||
group_list)
|
||||
))
|
||||
return results
|
||||
|
||||
return results
|
11
.yamllint
Normal file
11
.yamllint
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
line-length:
|
||||
# matches hardcoded 160 value from ansible-lint
|
||||
max: 160
|
||||
|
||||
ignore: |
|
||||
zuul.d/*.yaml
|
||||
releasenotes/notes/*.yaml
|
@ -10,7 +10,7 @@
|
||||
fail_without_deps: true
|
||||
tripleo_delegate_to: "{{ groups['overcloud'] | default([]) }}"
|
||||
packages:
|
||||
- lvm2
|
||||
- lvm2
|
||||
tasks:
|
||||
- include_role:
|
||||
name: ceph
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- hosts: undercloud
|
||||
gather_facts: yes
|
||||
gather_facts: true
|
||||
vars:
|
||||
metadata:
|
||||
name: Check if latest version of packages is installed
|
||||
|
@ -7,7 +7,7 @@
|
||||
This validation checks the flavors assigned to roles exist and have the
|
||||
correct capabilities set.
|
||||
groups:
|
||||
- pre-deployment
|
||||
- pre-upgrade
|
||||
- pre-deployment
|
||||
- pre-upgrade
|
||||
roles:
|
||||
- collect-flavors-and-verify-profiles
|
||||
|
@ -7,7 +7,7 @@
|
||||
This validation checks that keystone admin token is disabled on both
|
||||
undercloud and overcloud controller after deployment.
|
||||
groups:
|
||||
- post-deployment
|
||||
- post-deployment
|
||||
keystone_conf_file: "/var/lib/config-data/puppet-generated/keystone/etc/keystone/keystone.conf"
|
||||
roles:
|
||||
- controller-token
|
||||
|
@ -6,7 +6,7 @@
|
||||
description: >
|
||||
This will check the ulimits of each controller.
|
||||
groups:
|
||||
- post-deployment
|
||||
- post-deployment
|
||||
nofiles_min: 1024
|
||||
nproc_min: 2048
|
||||
roles:
|
||||
|
@ -7,6 +7,6 @@
|
||||
This validation checks that the nodes and hypervisor statistics
|
||||
add up.
|
||||
groups:
|
||||
- pre-deployment
|
||||
- pre-deployment
|
||||
roles:
|
||||
- default-node-count
|
||||
|
@ -17,12 +17,12 @@
|
||||
# will be passed to the Neutron services. The order is important
|
||||
# here: the values in later files take precedence.
|
||||
configs:
|
||||
- /etc/neutron/neutron.conf
|
||||
- /usr/share/neutron/neutron-dist.conf
|
||||
- /etc/neutron/metadata_agent.ini
|
||||
- /etc/neutron/dhcp_agent.ini
|
||||
- /etc/neutron/fwaas_driver.ini
|
||||
- /etc/neutron/l3_agent.ini
|
||||
- /etc/neutron/neutron.conf
|
||||
- /usr/share/neutron/neutron-dist.conf
|
||||
- /etc/neutron/metadata_agent.ini
|
||||
- /etc/neutron/dhcp_agent.ini
|
||||
- /etc/neutron/fwaas_driver.ini
|
||||
- /etc/neutron/l3_agent.ini
|
||||
|
||||
roles:
|
||||
- neutron-sanity-check
|
||||
|
@ -7,7 +7,7 @@
|
||||
When using Neutron, the `firewall_driver` option in Nova must be set to
|
||||
`NoopFirewallDriver`.
|
||||
groups:
|
||||
- post-deployment
|
||||
- post-deployment
|
||||
nova_conf_path: "/var/lib/config-data/puppet-generated/nova_libvirt/etc/nova/nova.conf"
|
||||
roles:
|
||||
- no-op-firewall-nova-driver
|
||||
|
@ -9,6 +9,6 @@
|
||||
The deployment should configure and run chronyd. This validation verifies
|
||||
that it is indeed running and connected to an NTP server on all nodes.
|
||||
groups:
|
||||
- post-deployment
|
||||
- post-deployment
|
||||
roles:
|
||||
- ntp
|
||||
|
@ -12,7 +12,7 @@
|
||||
- Are images named centos or rhel available?
|
||||
- Are there sufficient compute resources available for a default setup? (1 Master node, 1 Infra node, 2 App nodes)
|
||||
groups:
|
||||
- openshift-on-openstack
|
||||
- openshift-on-openstack
|
||||
min_total_ram_testing: 16384 # 4 per node
|
||||
min_total_vcpus_testing: 4 # 1 per node
|
||||
min_total_disk_testing: 93 # Master: 40, others: 17 per node
|
||||
@ -23,8 +23,8 @@
|
||||
min_node_disk_testing: 40 # Minimum disk per node for testing
|
||||
min_node_ram_prod: 16384 # Minimum ram per node for production
|
||||
min_node_disk_prod: 42 # Minimum disk per node for production
|
||||
resource_reqs_testing: False
|
||||
resource_reqs_prod: False
|
||||
resource_reqs_testing: false
|
||||
resource_reqs_prod: false
|
||||
tasks:
|
||||
- include_role:
|
||||
name: openshift-on-openstack
|
||||
|
@ -7,7 +7,7 @@
|
||||
Checks if an external network has been configured on the overcloud as
|
||||
required for an OpenShift deployment on top of OpenStack.
|
||||
groups:
|
||||
- openshift-on-openstack
|
||||
- openshift-on-openstack
|
||||
tasks:
|
||||
- include_role:
|
||||
name: openshift-on-openstack
|
||||
|
@ -8,8 +8,8 @@
|
||||
This validation gets the PublicVip address from the deployment and
|
||||
tries to access Horizon and get a Keystone token.
|
||||
groups:
|
||||
- post-deployment
|
||||
- pre-upgrade
|
||||
- post-upgrade
|
||||
- post-deployment
|
||||
- pre-upgrade
|
||||
- post-upgrade
|
||||
roles:
|
||||
- openstack-endpoints
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- hosts: undercloud, overcloud
|
||||
gather_facts: yes
|
||||
gather_facts: true
|
||||
vars:
|
||||
metadata:
|
||||
name: Check correctness of current repositories
|
||||
|
@ -7,7 +7,8 @@
|
||||
Verify that stonith devices are configured for your OpenStack Platform HA cluster.
|
||||
We don't configure stonith device with TripleO Installer. Because the hardware
|
||||
configuration may be differ in each environment and requires different fence agents.
|
||||
How to configure fencing please read https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes
|
||||
How to configure fencing please read
|
||||
https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes
|
||||
groups:
|
||||
- post-deployment
|
||||
roles:
|
||||
|
@ -8,7 +8,7 @@
|
||||
and that all certs being tracked by certmonger are in the
|
||||
MONITORING state.
|
||||
groups:
|
||||
- post-deployment
|
||||
- post-deployment
|
||||
tasks:
|
||||
- include_role:
|
||||
name: tls-everywhere
|
||||
|
@ -7,7 +7,7 @@
|
||||
Checks that the undercloud has novajoin set up corectly and
|
||||
that we are ready to do the overcloud deploy with tls-everywhere.
|
||||
groups:
|
||||
- pre-deployment
|
||||
- pre-deployment
|
||||
tasks:
|
||||
- include_role:
|
||||
name: tls-everywhere
|
||||
|
@ -7,7 +7,7 @@
|
||||
Checks that the undercloud is ready to set up novajoin and
|
||||
to register to IdM as a client as part of undercloud-install.
|
||||
groups:
|
||||
- prep
|
||||
- prep
|
||||
tasks:
|
||||
- include_role:
|
||||
name: tls-everywhere
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- hosts: undercloud
|
||||
gather_facts: yes
|
||||
gather_facts: true
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify undercloud fits the CPU core requirements
|
||||
|
@ -11,10 +11,10 @@
|
||||
groups:
|
||||
- pre-upgrade
|
||||
volumes:
|
||||
- {mount: /var/lib/docker, min_size: 10}
|
||||
- {mount: /var/lib/docker, min_size: 10}
|
||||
- {mount: /var/lib/config-data, min_size: 3}
|
||||
- {mount: /var, min_size: 16}
|
||||
- {mount: /, min_size: 20}
|
||||
- {mount: /var, min_size: 16}
|
||||
- {mount: /, min_size: 20}
|
||||
|
||||
roles:
|
||||
- undercloud-disk-space
|
||||
|
@ -12,12 +12,12 @@
|
||||
- prep
|
||||
- pre-introspection
|
||||
volumes:
|
||||
- {mount: /var/lib/docker, min_size: 10}
|
||||
- {mount: /var/lib/docker, min_size: 10}
|
||||
- {mount: /var/lib/config-data, min_size: 3}
|
||||
- {mount: /var/log, min_size: 3}
|
||||
- {mount: /usr, min_size: 5}
|
||||
- {mount: /var, min_size: 20}
|
||||
- {mount: /, min_size: 25}
|
||||
- {mount: /var/log, min_size: 3}
|
||||
- {mount: /usr, min_size: 5}
|
||||
- {mount: /var, min_size: 20}
|
||||
- {mount: /, min_size: 25}
|
||||
|
||||
roles:
|
||||
- undercloud-disk-space
|
||||
|
@ -8,8 +8,8 @@
|
||||
heat database can grow very large. This validation checks that
|
||||
the purge_deleted crontab has been set up.
|
||||
groups:
|
||||
- pre-upgrade
|
||||
- pre-deployment
|
||||
- pre-upgrade
|
||||
- pre-deployment
|
||||
cron_check: "heat-manage purge_deleted"
|
||||
roles:
|
||||
- undercloud-heat-purge-deleted
|
||||
|
@ -17,13 +17,13 @@
|
||||
# will be passed to the Neutron services. The order is important
|
||||
# here: the values in later files take precedence.
|
||||
configs:
|
||||
- /etc/neutron/neutron.conf
|
||||
- /usr/share/neutron/neutron-dist.conf
|
||||
- /etc/neutron/metadata_agent.ini
|
||||
- /etc/neutron/dhcp_agent.ini
|
||||
- /etc/neutron/plugins/ml2/openvswitch_agent.ini
|
||||
- /etc/neutron/fwaas_driver.ini
|
||||
- /etc/neutron/l3_agent.ini
|
||||
- /etc/neutron/neutron.conf
|
||||
- /usr/share/neutron/neutron-dist.conf
|
||||
- /etc/neutron/metadata_agent.ini
|
||||
- /etc/neutron/dhcp_agent.ini
|
||||
- /etc/neutron/plugins/ml2/openvswitch_agent.ini
|
||||
- /etc/neutron/fwaas_driver.ini
|
||||
- /etc/neutron/l3_agent.ini
|
||||
|
||||
roles:
|
||||
- neutron-sanity-check
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- hosts: undercloud
|
||||
gather_facts: yes
|
||||
gather_facts: true
|
||||
vars:
|
||||
metadata:
|
||||
name: Verify the undercloud fits the RAM requirements
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- hosts: undercloud
|
||||
gather_facts: yes
|
||||
gather_facts: true
|
||||
vars:
|
||||
metadata:
|
||||
name: Undercloud SELinux Enforcing Mode Check
|
||||
|
@ -8,7 +8,7 @@
|
||||
keystone database can grow very large. This validation checks that
|
||||
the keystone token_flush crontab has been set up.
|
||||
groups:
|
||||
- pre-introspection
|
||||
- pre-introspection
|
||||
cron_check: "keystone-manage token_flush"
|
||||
roles:
|
||||
- undercloud-tokenflush
|
||||
|
@ -2,7 +2,7 @@
|
||||
- name: List the available drives
|
||||
register: drive_list
|
||||
command: "ls /sys/class/block/"
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
|
||||
- name: Detect whether the drive uses Advanced Format
|
||||
advanced_format: drive={{ item }}
|
||||
|
@ -4,4 +4,3 @@ fail_without_deps: false
|
||||
fail_on_ceph_health_err: false
|
||||
osd_percentage_min: 0
|
||||
ceph_ansible_repo: "centos-ceph-nautilus"
|
||||
|
||||
|
@ -2,9 +2,9 @@
|
||||
- name: Check if ceph-ansible is installed
|
||||
shell: rpm -q ceph-ansible || true
|
||||
args:
|
||||
warn: no
|
||||
changed_when: False
|
||||
ignore_errors: True
|
||||
warn: false
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
register: ceph_ansible_installed
|
||||
|
||||
- name: Warn about missing ceph-ansible
|
||||
@ -24,7 +24,7 @@
|
||||
- name: Get ceph-ansible repository
|
||||
shell: "yum info ceph-ansible | awk '/From repo/ {print $4}'"
|
||||
register: repo
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
|
||||
- name: Fail if ceph-ansible doesn't belong to the specified repo
|
||||
fail:
|
||||
@ -32,4 +32,3 @@
|
||||
when:
|
||||
- (repo.stdout | length == 0 or repo.stdout != "{{ ceph_ansible_repo }}")
|
||||
- fail_without_ceph_ansible|default(false)|bool
|
||||
|
||||
|
@ -4,64 +4,65 @@
|
||||
shell: hiera -c /etc/puppet/hiera.yaml enabled_services | egrep -sq ceph_mon
|
||||
ignore_errors: true
|
||||
register: ceph_mon_enabled
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
|
||||
- when:
|
||||
- ceph_mon_enabled is succeeded
|
||||
- when: "ceph_mon_enabled is succeeded"
|
||||
block:
|
||||
- name: Set container_cli fact from the inventory
|
||||
set_fact:
|
||||
container_cli: "{{ hostvars[inventory_hostname].container_cli|default('podman') }}"
|
||||
|
||||
- name: Set container filter format
|
||||
set_fact:
|
||||
container_filter_format: !unsafe "--format '{{ .Names }}'"
|
||||
|
||||
- name: Set ceph_mon_container name
|
||||
become: true
|
||||
shell: "{{ container_cli }} ps {{ container_filter_format }} | grep ceph-mon"
|
||||
register: ceph_mon_container
|
||||
changed_when: False
|
||||
|
||||
- name: Set ceph cluster name
|
||||
become: true
|
||||
shell: find /etc/ceph -name '*.conf' -prune -print -quit | xargs basename -s '.conf'
|
||||
register: ceph_cluster_name
|
||||
changed_when: False
|
||||
|
||||
- name: Get ceph health
|
||||
become: true
|
||||
shell: "{{ container_cli }} exec {{ ceph_mon_container.stdout }} ceph --cluster {{ ceph_cluster_name.stdout }} health | awk '{print $1}'"
|
||||
register: ceph_health
|
||||
|
||||
- name: Check ceph health
|
||||
warn:
|
||||
msg: Ceph is in {{ ceph_health.stdout }} state.
|
||||
when:
|
||||
- ceph_health.stdout != 'HEALTH_OK'
|
||||
- not fail_on_ceph_health_err|default(true)|bool
|
||||
|
||||
- name: Fail if ceph health is HEALTH_ERR
|
||||
fail:
|
||||
msg: Ceph is in {{ ceph_health.stdout }} state.
|
||||
when:
|
||||
- ceph_health.stdout == 'HEALTH_ERR'
|
||||
- fail_on_ceph_health_err|default(true)|bool
|
||||
|
||||
- when:
|
||||
- osd_percentage_min|default(0) > 0
|
||||
block:
|
||||
- name: set jq osd percentage filter
|
||||
- name: Set container_cli fact from the inventory
|
||||
set_fact:
|
||||
jq_osd_percentage_filter: '( (.num_in_osds) / (.num_osds) ) * 100'
|
||||
container_cli: "{{ hostvars[inventory_hostname].container_cli|default('podman') }}"
|
||||
|
||||
- name: Get OSD stat percentage
|
||||
- name: Set container filter format
|
||||
set_fact:
|
||||
container_filter_format: !unsafe "--format '{{ .Names }}'"
|
||||
|
||||
- name: Set ceph_mon_container name
|
||||
become: true
|
||||
shell: "{{ container_cli }} exec {{ ceph_mon_container.stdout }} ceph --cluster {{ ceph_cluster_name.stdout }} osd stat -f json | jq '{{ jq_osd_percentage_filter }}'"
|
||||
register: ceph_osd_in_percentage
|
||||
shell: "{{ container_cli }} ps {{ container_filter_format }} | grep ceph-mon"
|
||||
register: ceph_mon_container
|
||||
changed_when: false
|
||||
|
||||
- name: Fail if there is an unacceptable percentage of in OSDs
|
||||
fail:
|
||||
msg: "Only {{ ceph_osd_in_percentage.stdout|float }}% of OSDs are in, but {{ osd_percentage_min|default(0) }}% are required"
|
||||
- name: Set ceph cluster name
|
||||
become: true
|
||||
shell: find /etc/ceph -name '*.conf' -prune -print -quit | xargs basename -s '.conf'
|
||||
register: ceph_cluster_name
|
||||
changed_when: false
|
||||
|
||||
- name: Get ceph health
|
||||
become: true
|
||||
shell: "{{ container_cli }} exec {{ ceph_mon_container.stdout }} ceph --cluster {{ ceph_cluster_name.stdout }} health | awk '{print $1}'"
|
||||
register: ceph_health
|
||||
|
||||
- name: Check ceph health
|
||||
warn:
|
||||
msg: Ceph is in {{ ceph_health.stdout }} state.
|
||||
when:
|
||||
- ceph_osd_in_percentage.stdout|float < osd_percentage_min|default(0)
|
||||
- ceph_health.stdout != 'HEALTH_OK'
|
||||
- not fail_on_ceph_health_err|default(true)|bool
|
||||
|
||||
- name: Fail if ceph health is HEALTH_ERR
|
||||
fail:
|
||||
msg: Ceph is in {{ ceph_health.stdout }} state.
|
||||
when:
|
||||
- ceph_health.stdout == 'HEALTH_ERR'
|
||||
- fail_on_ceph_health_err|default(true)|bool
|
||||
|
||||
- when:
|
||||
- osd_percentage_min|default(0) > 0
|
||||
block:
|
||||
- name: set jq osd percentage filter
|
||||
set_fact:
|
||||
jq_osd_percentage_filter: '( (.num_in_osds) / (.num_osds) ) * 100'
|
||||
|
||||
- name: Get OSD stat percentage
|
||||
become: true
|
||||
shell: >-
|
||||
"{{ container_cli }}" exec "{{ ceph_mon_container.stdout }}" ceph
|
||||
--cluster "{{ ceph_cluster_name.stdout }}" osd stat -f json | jq '{{ jq_osd_percentage_filter }}'
|
||||
register: ceph_osd_in_percentage
|
||||
|
||||
- name: Fail if there is an unacceptable percentage of in OSDs
|
||||
fail:
|
||||
msg: "Only {{ ceph_osd_in_percentage.stdout|float }}% of OSDs are in, but {{ osd_percentage_min|default(0) }}% are required"
|
||||
when:
|
||||
- ceph_osd_in_percentage.stdout|float < osd_percentage_min|default(0)
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: install patch rpm
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Converge
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: successful check with ctlplane-subnet
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: install hiera
|
||||
|
@ -5,12 +5,12 @@
|
||||
name: "tripleo_undercloud_conf_file"
|
||||
|
||||
- name: Get the local_subnet name from the undercloud_conf file
|
||||
become: True
|
||||
become: true
|
||||
validations_read_ini:
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: local_subnet
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
register: local_subnet
|
||||
|
||||
- name: Get gateway value from the undercloud.conf file
|
||||
@ -19,7 +19,7 @@
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: "{% if local_subnet.value %}{{ local_subnet.value }}{% else %}ctlplane-subnet{% endif %}"
|
||||
key: gateway
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
register: gateway
|
||||
|
||||
- name: Get local_ip value from the undercloud.conf file
|
||||
@ -28,7 +28,7 @@
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: local_ip
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
register: local_ip
|
||||
|
||||
- name: Test network_gateway if different from local_ip
|
||||
|
@ -5,5 +5,5 @@ metadata:
|
||||
This validation checks the flavors assigned to roles exist and have the
|
||||
correct capabilities set.
|
||||
groups:
|
||||
- pre-deployment
|
||||
- pre-upgrade
|
||||
- pre-deployment
|
||||
- pre-upgrade
|
||||
|
@ -8,29 +8,29 @@
|
||||
|
||||
- when: "'Undercloud' in group_names"
|
||||
block:
|
||||
- name: Set container_cli fact from undercloud.conf
|
||||
block:
|
||||
- name: Get the path of tripleo undercloud config file
|
||||
become: true
|
||||
hiera:
|
||||
name: "tripleo_undercloud_conf_file"
|
||||
- name: Set container_cli fact from undercloud.conf
|
||||
block:
|
||||
- name: Get the path of tripleo undercloud config file
|
||||
become: true
|
||||
hiera:
|
||||
name: "tripleo_undercloud_conf_file"
|
||||
|
||||
- name: Get container client from undercloud.conf
|
||||
validations_read_ini:
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: container_cli
|
||||
ignore_missing_file: true
|
||||
register: container_cli
|
||||
- name: Get container client from undercloud.conf
|
||||
validations_read_ini:
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: container_cli
|
||||
ignore_missing_file: true
|
||||
register: container_cli
|
||||
|
||||
- name: Set uc_container_cli for the Undercloud
|
||||
set_fact:
|
||||
uc_container_cli: "{{ container_cli.value|default('podman', true) }}"
|
||||
when: uc_container_cli is not defined
|
||||
- name: Set uc_container_cli for the Undercloud
|
||||
set_fact:
|
||||
uc_container_cli: "{{ container_cli.value|default('podman', true) }}"
|
||||
when: uc_container_cli is not defined
|
||||
|
||||
- name: Get failed containers for podman
|
||||
changed_when: false
|
||||
become: True
|
||||
become: true
|
||||
command: >
|
||||
{% if oc_container_cli is defined %}{{ oc_container_cli }}{% else %}{{ uc_container_cli }}{% endif %}
|
||||
{% raw %}
|
||||
|
@ -2,7 +2,7 @@
|
||||
- name: gather docker facts
|
||||
docker_facts:
|
||||
container_filter: status=running
|
||||
become: yes
|
||||
become: true
|
||||
|
||||
- name: compare running containers to list
|
||||
set_fact:
|
||||
@ -25,6 +25,6 @@
|
||||
state: started # Port should be open
|
||||
delay: 0 # No wait before first check (sec)
|
||||
timeout: 3 # Stop checking after timeout (sec)
|
||||
ignore_errors: yes
|
||||
ignore_errors: true
|
||||
loop: "{{ open_ports }}"
|
||||
when: ctlplane_ip is defined
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Converge
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: pass validation
|
||||
|
@ -5,7 +5,7 @@
|
||||
path: "{{ keystone_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: admin_token
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
register: token_result
|
||||
|
||||
- name: Check if token value is disabled.
|
||||
|
@ -5,4 +5,4 @@ metadata:
|
||||
This validation checks that keystone admin token is disabled on both
|
||||
undercloud and overcloud controller after deployment.
|
||||
groups:
|
||||
- post-deployment
|
||||
- post-deployment
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Converge
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
nofiles_min: 102400
|
||||
|
@ -4,7 +4,7 @@
|
||||
# NOTE: `ulimit` is a shell builtin so we have to invoke it like this:
|
||||
command: sh -c "ulimit -n"
|
||||
register: nofilesval
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
|
||||
- name: Check nofiles limit
|
||||
fail:
|
||||
@ -18,7 +18,7 @@
|
||||
# NOTE: `ulimit` is a shell builtin so we have to invoke it like this:
|
||||
command: sh -c "ulimit -u"
|
||||
register: nprocval
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
|
||||
- name: Check nproc limit
|
||||
fail:
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Converge
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: prepare directory tree for hiera
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: install hiera
|
||||
|
@ -10,7 +10,7 @@
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: ctlplane-subnet
|
||||
key: dhcp_start
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
default: "192.0.2.5"
|
||||
register: dhcp_start
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: ctlplane-subnet
|
||||
key: dhcp_end
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
default: "192.0.2.24"
|
||||
register: dhcp_end
|
||||
|
||||
|
@ -5,4 +5,4 @@ metadata:
|
||||
This validation checks that the nodes and hypervisor statistics
|
||||
add up.
|
||||
groups:
|
||||
- pre-deployment
|
||||
- pre-deployment
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Look up the introspection interface
|
||||
become: True
|
||||
become: true
|
||||
validations_read_ini:
|
||||
path: "{{ ironic_inspector_conf }}"
|
||||
section: iptables
|
||||
@ -8,7 +8,7 @@
|
||||
register: interface
|
||||
|
||||
- name: Look up the introspection interface from the deprecated option
|
||||
become: True
|
||||
become: true
|
||||
validations_read_ini:
|
||||
path: "{{ ironic_inspector_conf }}"
|
||||
section: firewall
|
||||
@ -17,4 +17,4 @@
|
||||
|
||||
- name: Look for rogue DHCP servers
|
||||
script: files/rogue_dhcp.py {{ interface.value or interface_deprecated.value or 'br-ctlplane' }}
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
|
@ -7,7 +7,7 @@
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: local_interface
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
register: local_interface
|
||||
|
||||
- name: Look for DHCP responses
|
||||
|
@ -1,4 +1,4 @@
|
||||
---
|
||||
- name: Ensure DNS resolution works
|
||||
command: "getent hosts {{ server_to_lookup }}"
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Converge
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
haproxy_config_file: /haproxy.cfg
|
||||
|
@ -2,7 +2,7 @@
|
||||
- name: Get the healthcheck services list enabled on node
|
||||
shell: >
|
||||
systemctl list-unit-files | grep "^tripleo.*healthcheck.*enabled" | awk -F'.' '{print $1}'
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
register: healthcheck_services_list
|
||||
when: inflight_healthcheck_services | length < 1
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
until:
|
||||
- systemd_healthcheck_state.status.ExecMainPID != '0'
|
||||
- systemd_healthcheck_state.status.ActiveState in ['inactive', 'failed']
|
||||
ignore_errors: True
|
||||
ignore_errors: true
|
||||
register: systemd_healthcheck_state
|
||||
with_items: "{{ hc_services }}"
|
||||
|
||||
|
@ -8,7 +8,7 @@ platforms:
|
||||
- name: centos7
|
||||
hostname: centos7
|
||||
image: centos:7
|
||||
override_command: True
|
||||
override_command: true
|
||||
command: python -m SimpleHTTPServer 8787
|
||||
pkg_extras: python-setuptools python-enum34 python-netaddr epel-release ruby PyYAML
|
||||
easy_install:
|
||||
@ -20,7 +20,7 @@ platforms:
|
||||
- name: fedora28
|
||||
hostname: fedora28
|
||||
image: fedora:28
|
||||
override_command: True
|
||||
override_command: true
|
||||
command: python3 -m http.server 8787
|
||||
pkg_extras: python*-setuptools python*-enum python*-netaddr ruby PyYAML
|
||||
environment:
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Converge
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: detect wrong port
|
||||
@ -39,7 +39,7 @@
|
||||
block:
|
||||
- name: run validation for 404
|
||||
include_role:
|
||||
name: image-serve
|
||||
name: image-serve
|
||||
rescue:
|
||||
- name: Clear host errors
|
||||
meta: clear_host_errors
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: install hiera
|
||||
|
@ -10,7 +10,7 @@
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: local_ip
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
register: local_ip
|
||||
|
||||
- name: Set container registry host
|
||||
|
@ -1,3 +1,4 @@
|
||||
---
|
||||
metadata:
|
||||
name: Image-serve availability
|
||||
description: Verify that image-serve service is ready
|
||||
|
@ -9,7 +9,7 @@
|
||||
"{{ container_cli|default('podman', true) }}" exec -u root
|
||||
$("{{ container_cli|default('podman', true) }}" ps -q --filter "name=mysql|galera-bundle" | head -1)
|
||||
/bin/bash -c 'ulimit -n'
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
register: mysqld_open_files_limit
|
||||
|
||||
- name: Test the open-files-limit value
|
||||
|
@ -7,24 +7,24 @@
|
||||
|
||||
- when: "'Undercloud' in group_names"
|
||||
block:
|
||||
- name: Get the path of tripleo undercloud config file
|
||||
become: true
|
||||
hiera:
|
||||
name: "tripleo_undercloud_conf_file"
|
||||
- name: Get the path of tripleo undercloud config file
|
||||
become: true
|
||||
hiera:
|
||||
name: "tripleo_undercloud_conf_file"
|
||||
|
||||
- name: Get the Container CLI from the undercloud.conf file
|
||||
become: true
|
||||
validations_read_ini:
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: container_cli
|
||||
ignore_missing_file: true
|
||||
register: container_cli
|
||||
- name: Get the Container CLI from the undercloud.conf file
|
||||
become: true
|
||||
validations_read_ini:
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: container_cli
|
||||
ignore_missing_file: true
|
||||
register: container_cli
|
||||
|
||||
- name: Set uc_container_cli and container_name for the Undercloud
|
||||
set_fact:
|
||||
uc_container_cli: "{{ container_cli.value|default('podman', true) }}"
|
||||
container_name: "neutron_ovs_agent"
|
||||
- name: Set uc_container_cli and container_name for the Undercloud
|
||||
set_fact:
|
||||
uc_container_cli: "{{ container_cli.value|default('podman', true) }}"
|
||||
container_name: "neutron_ovs_agent"
|
||||
|
||||
- name: Run neutron-sanity-check
|
||||
command: >
|
||||
@ -35,7 +35,7 @@
|
||||
become: true
|
||||
register: nsc_return
|
||||
ignore_errors: true
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
|
||||
- name: Detect errors
|
||||
set_fact:
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Converge
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
nova_conf_path: "/nova.conf"
|
||||
@ -48,7 +48,7 @@
|
||||
section: DEFAULT
|
||||
option: firewall_driver
|
||||
value: CHANGEME
|
||||
backup: yes
|
||||
backup: true
|
||||
|
||||
- include_role:
|
||||
name: no-op-firewall-nova-driver
|
||||
|
@ -5,4 +5,4 @@ metadata:
|
||||
When using Neutron, the `firewall_driver` option in Nova must be set to
|
||||
`NoopFirewallDriver`.
|
||||
groups:
|
||||
- post-deployment
|
||||
- post-deployment
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
- name: Ping all overcloud nodes
|
||||
icmp_ping:
|
||||
host: "{{ item }}"
|
||||
host: "{{ item }}"
|
||||
with_items: "{{ oc_ips.results | map(attribute='ansible_facts.ansible_host') | list }}"
|
||||
ignore_errors: true
|
||||
register: ping_results
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Get VIF Plugging setting values from nova.conf
|
||||
become: True
|
||||
become: true
|
||||
validations_read_ini:
|
||||
path: "{{ nova_config_file }}"
|
||||
section: DEFAULT
|
||||
@ -21,14 +21,14 @@
|
||||
with_items: "{{ nova_config_result.results }}"
|
||||
|
||||
- name: Get auth_url value from hiera
|
||||
become: True
|
||||
become: true
|
||||
command: hiera -c /etc/puppet/hiera.yaml neutron::server::notifications::auth_url
|
||||
ignore_errors: True
|
||||
changed_when: False
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
register: auth_url
|
||||
|
||||
- name: Get auth_url value from neutron.conf
|
||||
become: True
|
||||
become: true
|
||||
validations_read_ini:
|
||||
path: "{{ neutron_config_file }}"
|
||||
section: nova
|
||||
@ -45,7 +45,7 @@
|
||||
failed_when: "neutron_auth_url_result.value != auth_url.stdout"
|
||||
|
||||
- name: Get Notify Nova settings values from neutron.conf
|
||||
become: True
|
||||
become: true
|
||||
validations_read_ini:
|
||||
path: "{{ neutron_config_file }}"
|
||||
section: DEFAULT
|
||||
@ -63,7 +63,7 @@
|
||||
with_items: "{{ neutron_notify_nova_result.results }}"
|
||||
|
||||
- name: Get Tenant Name setting value from neutron.conf
|
||||
become: True
|
||||
become: true
|
||||
validations_read_ini:
|
||||
path: "{{ neutron_config_file }}"
|
||||
section: nova
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Converge
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: working detection
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Populate successful podman CLI
|
||||
|
@ -7,7 +7,7 @@
|
||||
- name: Check nova upgrade status
|
||||
become: true
|
||||
command: "{{ container_cli }} exec -u root nova_api nova-status upgrade check"
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
register: nova_upgrade_check
|
||||
|
||||
- name: Warn if at least one check encountered an issue
|
||||
|
@ -1,26 +1,26 @@
|
||||
---
|
||||
- name: Get if chrony is enabled
|
||||
become: True
|
||||
become: true
|
||||
hiera:
|
||||
name: "chrony_enabled"
|
||||
|
||||
- when: chrony_enabled|bool
|
||||
block:
|
||||
- name: Populate service facts
|
||||
service_facts: # needed to make yaml happy
|
||||
- name: Populate service facts
|
||||
service_facts: # needed to make yaml happy
|
||||
|
||||
- name: Fail if chronyd service is not running
|
||||
fail:
|
||||
msg: "Chronyd service is not running"
|
||||
when: "ansible_facts.services['chronyd.service'].state != 'running'"
|
||||
- name: Fail if chronyd service is not running
|
||||
fail:
|
||||
msg: "Chronyd service is not running"
|
||||
when: "ansible_facts.services['chronyd.service'].state != 'running'"
|
||||
|
||||
- name: Run chronyc
|
||||
become: True
|
||||
command: chronyc -a 'burst 4/4'
|
||||
changed_when: False
|
||||
- name: Run chronyc
|
||||
become: true
|
||||
command: chronyc -a 'burst 4/4'
|
||||
changed_when: false
|
||||
|
||||
# ntpstat returns 0 if synchronised and non-zero otherwise:
|
||||
- name: Run ntpstat
|
||||
# ntpstat returns 0 if synchronised and non-zero otherwise:
|
||||
command: ntpstat
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
when: not chrony_enabled|bool
|
||||
|
@ -7,4 +7,4 @@ metadata:
|
||||
The deployment should configure and run chronyd. This validation verifies
|
||||
that it is indeed running and connected to an NTP server on all nodes.
|
||||
groups:
|
||||
- post-deployment
|
||||
- post-deployment
|
||||
|
@ -9,5 +9,5 @@ min_node_ram_testing: 4096 # Minimum ram per node for testing
|
||||
min_node_disk_testing: 40 # Minimum disk per node for testing
|
||||
min_node_ram_prod: 16384 # Minimum ram per node for production
|
||||
min_node_disk_prod: 42 # Minimum disk per node for production
|
||||
resource_reqs_testing: False
|
||||
resource_reqs_prod: False
|
||||
resource_reqs_testing: false
|
||||
resource_reqs_prod: false
|
||||
|
@ -23,7 +23,7 @@
|
||||
domain:
|
||||
id: default
|
||||
password: "{{ overcloud_admin_password }}"
|
||||
return_content: yes
|
||||
return_content: true
|
||||
status_code: 201
|
||||
register: keystone_result
|
||||
no_log: true
|
||||
@ -53,7 +53,7 @@
|
||||
headers:
|
||||
X-Auth-Token: "{{ auth_token }}"
|
||||
Accept: application/vnd.openstack.compute.v2.1+json
|
||||
return_content: yes
|
||||
return_content: true
|
||||
follow_redirects: all
|
||||
register: flavors_result_testing
|
||||
|
||||
@ -64,7 +64,7 @@
|
||||
headers:
|
||||
X-Auth-Token: "{{ auth_token }}"
|
||||
Accept: application/vnd.openstack.compute.v2.1+json
|
||||
return_content: yes
|
||||
return_content: true
|
||||
follow_redirects: all
|
||||
register: flavors_result_prod
|
||||
|
||||
@ -89,7 +89,7 @@
|
||||
headers:
|
||||
X-Auth-Token: "{{ auth_token }}"
|
||||
Accept: application/vnd.openstack.compute.v2.1+json
|
||||
return_content: yes
|
||||
return_content: true
|
||||
follow_redirects: all
|
||||
register: hypervisors_result
|
||||
|
||||
@ -116,7 +116,7 @@
|
||||
method: GET
|
||||
headers:
|
||||
X-Auth-Token: "{{ auth_token }}"
|
||||
return_content: yes
|
||||
return_content: true
|
||||
follow_redirects: all
|
||||
register: images
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: Set fact to identify if the overcloud was deployed
|
||||
set_fact:
|
||||
overcloud_deployed: "{{ groups['overcloud'] is defined }}"
|
||||
overcloud_deployed: "{{ groups['overcloud'] is defined }}"
|
||||
|
||||
- name: Warn if no overcloud deployed yet
|
||||
warn:
|
||||
@ -12,62 +12,62 @@
|
||||
|
||||
- when: overcloud_deployed|bool
|
||||
block:
|
||||
# Get auth token and service catalog from Keystone and extract service urls.
|
||||
- name: Get token and catalog from Keystone
|
||||
uri:
|
||||
url: "{{ overcloud_keystone_url
|
||||
| urlsplit('scheme') }}://{{ overcloud_keystone_url
|
||||
| urlsplit('netloc') }}/v3/auth/tokens"
|
||||
method: POST
|
||||
body_format: json
|
||||
body:
|
||||
auth:
|
||||
scope:
|
||||
project:
|
||||
name: admin
|
||||
domain:
|
||||
id: default
|
||||
identity:
|
||||
methods:
|
||||
- password
|
||||
password:
|
||||
user:
|
||||
# Get auth token and service catalog from Keystone and extract service urls.
|
||||
- name: Get token and catalog from Keystone
|
||||
uri:
|
||||
url: "{{ overcloud_keystone_url
|
||||
| urlsplit('scheme') }}://{{ overcloud_keystone_url
|
||||
| urlsplit('netloc') }}/v3/auth/tokens"
|
||||
method: POST
|
||||
body_format: json
|
||||
body:
|
||||
auth:
|
||||
scope:
|
||||
project:
|
||||
name: admin
|
||||
domain:
|
||||
id: default
|
||||
password: "{{ overcloud_admin_password }}"
|
||||
return_content: yes
|
||||
status_code: 201
|
||||
register: keystone_result
|
||||
no_log: true
|
||||
when: overcloud_keystone_url|default('')
|
||||
identity:
|
||||
methods:
|
||||
- password
|
||||
password:
|
||||
user:
|
||||
name: admin
|
||||
domain:
|
||||
id: default
|
||||
password: "{{ overcloud_admin_password }}"
|
||||
return_content: true
|
||||
status_code: 201
|
||||
register: keystone_result
|
||||
no_log: true
|
||||
when: overcloud_keystone_url|default('')
|
||||
|
||||
- name: Set auth token
|
||||
set_fact: token="{{ keystone_result.x_subject_token }}"
|
||||
- name: Set auth token
|
||||
set_fact: token="{{ keystone_result.x_subject_token }}"
|
||||
|
||||
- name: Get Neutron URL from catalog
|
||||
set_fact: neutron_url="{{ keystone_result.json.token
|
||||
| json_query("catalog[?name=='neutron'].endpoints")
|
||||
| first
|
||||
| selectattr('interface', 'equalto', 'public')
|
||||
| map(attribute='url') | first }}"
|
||||
- name: Get Neutron URL from catalog
|
||||
set_fact: neutron_url="{{ keystone_result.json.token
|
||||
| json_query("catalog[?name=='neutron'].endpoints")
|
||||
| first
|
||||
| selectattr('interface', 'equalto', 'public')
|
||||
| map(attribute='url') | first }}"
|
||||
|
||||
# Get overcloud networks from Neutron and check if there is
|
||||
# a network with a common name for external networks.
|
||||
- name: Get networks from Neutron
|
||||
uri:
|
||||
url: "{{ neutron_url }}/v2.0/networks?router:external=true"
|
||||
method: GET
|
||||
headers:
|
||||
X-Auth-Token: "{{ token }}"
|
||||
return_content: yes
|
||||
follow_redirects: all
|
||||
register: networks_result
|
||||
# Get overcloud networks from Neutron and check if there is
|
||||
# a network with a common name for external networks.
|
||||
- name: Get networks from Neutron
|
||||
uri:
|
||||
url: "{{ neutron_url }}/v2.0/networks?router:external=true"
|
||||
method: GET
|
||||
headers:
|
||||
X-Auth-Token: "{{ token }}"
|
||||
return_content: true
|
||||
follow_redirects: all
|
||||
register: networks_result
|
||||
|
||||
- name: Warn if there are no matching networks
|
||||
warn:
|
||||
msg: |
|
||||
No external network found. It is strongly recommended that you
|
||||
configure an external Neutron network with a floating IP address
|
||||
pool.
|
||||
when: networks_result.json.networks | length == 0
|
||||
- name: Warn if there are no matching networks
|
||||
warn:
|
||||
msg: |
|
||||
No external network found. It is strongly recommended that you
|
||||
configure an external Neutron network with a floating IP address
|
||||
pool.
|
||||
when: networks_result.json.networks | length == 0
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: Set fact to identify if the overcloud was deployed
|
||||
set_fact:
|
||||
overcloud_deployed: "{{ groups['overcloud'] is defined }}"
|
||||
overcloud_deployed: "{{ groups['overcloud'] is defined }}"
|
||||
|
||||
# Check that the Horizon endpoint exists
|
||||
- name: Fail if the HorizonPublic endpoint is not defined
|
||||
@ -30,7 +30,7 @@
|
||||
|
||||
# Check that we can obtain an auth token from horizon
|
||||
- name: Check Keystone
|
||||
no_log: True
|
||||
no_log: true
|
||||
uri:
|
||||
url: "{{ overcloud_keystone_url | urlsplit('scheme') }}://{{ overcloud_keystone_url | urlsplit('netloc') }}/v3/auth/tokens"
|
||||
method: POST
|
||||
@ -46,7 +46,7 @@
|
||||
domain:
|
||||
name: Default
|
||||
password: "{{ overcloud_admin_password }}"
|
||||
return_content: yes
|
||||
return_content: true
|
||||
status_code: 201
|
||||
register: auth_token
|
||||
when: overcloud_keystone_url|default('')
|
||||
|
@ -1,12 +1,12 @@
|
||||
---
|
||||
- name: Get OVS DPDK PMD cores mask value
|
||||
become_method: sudo
|
||||
become: True
|
||||
become: true
|
||||
register: pmd_cpu_mask
|
||||
command: ovs-vsctl --no-wait get Open_vSwitch . other_config:pmd-cpu-mask
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
|
||||
- name: Run OVS DPDK PMD cores check
|
||||
become: True
|
||||
become: true
|
||||
ovs_dpdk_pmd_cpus_check:
|
||||
pmd_cpu_mask: "{{ pmd_cpu_mask.stdout }}"
|
||||
|
@ -1,10 +1,10 @@
|
||||
---
|
||||
- name: Check pacemaker service is running
|
||||
become: True
|
||||
become: true
|
||||
command: "/usr/bin/systemctl show pacemaker --property ActiveState"
|
||||
register: check_service
|
||||
changed_when: False
|
||||
ignore_errors: True
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
|
||||
- when: "check_service.stdout == 'ActiveState=active'"
|
||||
block:
|
||||
@ -12,7 +12,7 @@
|
||||
become: true
|
||||
command: pcs status xml
|
||||
register: pcs_status
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
- name: Check pacemaker status
|
||||
pacemaker:
|
||||
status: "{{ pcs_status.stdout }}"
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Converge
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: working detection
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Populate successful podman CLI
|
||||
|
@ -30,7 +30,7 @@
|
||||
name: faulty
|
||||
description: really faulty repository
|
||||
baseurl: http://this.repository.do-not.exists/like-not-at-all
|
||||
enabled: yes
|
||||
enabled: true
|
||||
|
||||
- name: execute role
|
||||
include_role:
|
||||
@ -56,7 +56,7 @@
|
||||
name: faulty-bis
|
||||
description: faulty repository with working DNS
|
||||
baseurl: http://download.fedoraproject.org/pub/fedora/blah
|
||||
enabled: yes
|
||||
enabled: true
|
||||
|
||||
- name: execute role
|
||||
include_role:
|
||||
|
@ -1,11 +1,11 @@
|
||||
---
|
||||
- name: List repositories
|
||||
become: True
|
||||
become: true
|
||||
shell: |
|
||||
{{ ansible_pkg_mgr }} repolist enabled -v 2>&1 || exit 0
|
||||
args:
|
||||
warn: no
|
||||
changed_when: False
|
||||
warn: false
|
||||
changed_when: false
|
||||
register: repositories
|
||||
|
||||
- name: Fail if we detect error in repolist output
|
||||
@ -16,7 +16,7 @@
|
||||
repositories.stdout is regex('(cannot|could not|failure)', ignorecase=True)
|
||||
|
||||
- name: Find repository IDs
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
shell: 'echo "{{ repositories.stdout }}" | grep Repo-id | sed "s/Repo-id.*://" | tr -d " "'
|
||||
register: repository_ids
|
||||
|
||||
@ -25,5 +25,5 @@
|
||||
msg: Found unwanted repository {{ item.0 }} enabled
|
||||
when: item.0 == item.1
|
||||
with_nested:
|
||||
- [ 'epel/x86_64' ]
|
||||
- ['epel/x86_64']
|
||||
- "{{ repository_ids.stdout_lines }}"
|
||||
|
@ -1,27 +0,0 @@
|
||||
galaxy_info:
|
||||
author: TripleO Validations Team
|
||||
company: Red Hat
|
||||
license: Apache
|
||||
min_ansible_version: 2.4
|
||||
|
||||
platforms:
|
||||
- name: CentOS
|
||||
versions:
|
||||
- 7
|
||||
- name: RHEL
|
||||
versions:
|
||||
- 7
|
||||
|
||||
categories:
|
||||
- cloud
|
||||
- baremetal
|
||||
- system
|
||||
galaxy_tags: []
|
||||
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||
# remove the '[]' above, if you add tags to this list.
|
||||
#
|
||||
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||
# Maximum 20 tags per role.
|
||||
|
||||
dependencies: []
|
@ -4,7 +4,7 @@
|
||||
systemctl list-units --failed --plain --no-legend --no-pager |
|
||||
awk '{print $1}'
|
||||
register: systemd_status
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
|
||||
- name: Fails if we find failed units
|
||||
assert:
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Populate successful stonith
|
||||
|
@ -1,13 +1,13 @@
|
||||
---
|
||||
- name: Check if we are in HA cluster environment
|
||||
become: True
|
||||
become: true
|
||||
register: pcs_cluster_status
|
||||
command: pcs cluster status
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Get all currently configured stonith devices
|
||||
become: True
|
||||
become: true
|
||||
command: "pcs stonith"
|
||||
register: stonith_devices
|
||||
changed_when: false
|
||||
|
@ -5,6 +5,7 @@ metadata:
|
||||
Verify that stonith devices are configured for your OpenStack Platform HA cluster.
|
||||
We don't configure stonith device with TripleO Installer. Because the hardware
|
||||
configuration may be differ in each environment and requires different fence agents.
|
||||
How to configure fencing please read https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes
|
||||
How to configure fencing please read
|
||||
https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes
|
||||
groups:
|
||||
- post-deployment
|
||||
|
@ -127,19 +127,19 @@
|
||||
path: "/etc/ipa/default.conf"
|
||||
section: global
|
||||
key: realm
|
||||
ignore_missing_file: False
|
||||
ignore_missing_file: false
|
||||
register: ipa_realm
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Set fact for IdM/FreeIPA host entry
|
||||
set_fact:
|
||||
host_entry: "{{ ansible_fqdn }}@{{ ipa_realm.value }}"
|
||||
when: ipa_conf_stat.stat.exists
|
||||
when: ipa_conf_stat.stat.exists
|
||||
|
||||
- name: Set fact for IdM/FreeIPA host principal
|
||||
set_fact:
|
||||
host_principal: "host/{{ host_entry }}"
|
||||
when: ipa_conf_stat.stat.exists
|
||||
when: ipa_conf_stat.stat.exists
|
||||
|
||||
# Kerberos keytab related tasks
|
||||
- name: Check for kerberos host keytab
|
||||
@ -182,7 +182,7 @@
|
||||
changed_when: false
|
||||
become: true
|
||||
when: krb5_keytab_stat.stat.exists
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Set facts for host principals in /etc/krb5.keytab
|
||||
set_fact:
|
||||
|
@ -4,7 +4,7 @@
|
||||
become: true
|
||||
hiera:
|
||||
name: "certmonger_user_enabled"
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Set facts for certmonger user service not enabled
|
||||
set_fact:
|
||||
@ -36,7 +36,7 @@
|
||||
become: true
|
||||
changed_when: false
|
||||
register: all_certnames
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
# Get status of all certificates and trim the leading whitespaces
|
||||
- name: Get status of all certificates
|
||||
@ -47,7 +47,7 @@
|
||||
loop_control:
|
||||
loop_var: certname
|
||||
register: all_cert_status
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Gather certificates that are not in MONITORING status
|
||||
set_fact:
|
||||
|
@ -3,7 +3,7 @@
|
||||
- name: Verify that join.conf exists (containzerized)
|
||||
command: "{{ command_prefix }} exec novajoin_server test -e /etc/novajoin/join.conf"
|
||||
register: containerized_join_conf_st
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
become: true
|
||||
|
||||
- name: Fail if join.conf is not present (containerized)
|
||||
@ -21,9 +21,9 @@
|
||||
path: "{{ joinconf_location }}"
|
||||
section: DEFAULT
|
||||
key: keytab
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
register: novajoin_keytab_path
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Get novajoin server port from join.conf
|
||||
become: true
|
||||
@ -31,9 +31,9 @@
|
||||
path: "{{ joinconf_location }}"
|
||||
section: DEFAULT
|
||||
key: join_listen_port
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
register: novajoin_server_port
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Get novajoin server host from join.conf
|
||||
become: true
|
||||
@ -41,9 +41,9 @@
|
||||
path: "{{ joinconf_location }}"
|
||||
section: DEFAULT
|
||||
key: join_listen
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
register: novajoin_server_host
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
### verify that the keytab and principal are usable ###
|
||||
# TODO(alee): We need to move this to a subfile so we can run
|
||||
@ -91,7 +91,7 @@
|
||||
command: "{{ command_prefix }} exec novajoin_server kdestroy -c {{ item }}"
|
||||
with_items: "{{ temp_krb_caches }}"
|
||||
ignore_errors: false
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
become: true
|
||||
when:
|
||||
- containerized_novajoin_krb5_keytab_stat.rc == 0
|
||||
|
@ -20,9 +20,9 @@
|
||||
path: "{{ joinconf_location }}"
|
||||
section: DEFAULT
|
||||
key: keytab
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
register: novajoin_keytab_path
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Get novajoin server port from join.conf
|
||||
become: true
|
||||
@ -30,9 +30,9 @@
|
||||
path: "{{ joinconf_location }}"
|
||||
section: DEFAULT
|
||||
key: join_listen_port
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
register: novajoin_server_port
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Get novajoin server host from join.conf
|
||||
become: true
|
||||
@ -40,9 +40,9 @@
|
||||
path: "{{ joinconf_location }}"
|
||||
section: DEFAULT
|
||||
key: join_listen
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
register: novajoin_server_host
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
### verify that the keytab and principal are usable ###
|
||||
# TODO(alee): We need to move this to a subfile so we can run
|
||||
@ -191,4 +191,3 @@
|
||||
report_status: "{{ service_running_status }}"
|
||||
report_reason: "{{ service_running_reason }}"
|
||||
report_recommendations: "{{ service_running_recommendations }}"
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
- name: Get the path of tripleo undercloud config file
|
||||
become: true
|
||||
hiera: name="tripleo_undercloud_conf_file"
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Get the Container CLI from the undercloud.conf file (stein+)
|
||||
become: true
|
||||
@ -27,25 +27,25 @@
|
||||
- not podman_install|bool
|
||||
- not docker_install|bool
|
||||
block:
|
||||
- name: Determine if Docker is enabled and has containers running
|
||||
command: docker ps -q
|
||||
register: docker_ps
|
||||
become: true
|
||||
ignore_errors: true
|
||||
- name: Determine if Docker is enabled and has containers running
|
||||
command: docker ps -q
|
||||
register: docker_ps
|
||||
become: true
|
||||
ignore_errors: true
|
||||
|
||||
- name: Set container facts
|
||||
set_fact:
|
||||
docker_install: true
|
||||
when: not docker_ps.stdout|length == 0
|
||||
- name: Set container facts
|
||||
set_fact:
|
||||
docker_install: true
|
||||
when: not docker_ps.stdout|length == 0
|
||||
|
||||
- name: Set container facts
|
||||
set_fact:
|
||||
docker_install: false
|
||||
when: docker_ps.stdout|length == 0
|
||||
- name: Set container facts
|
||||
set_fact:
|
||||
docker_install: false
|
||||
when: docker_ps.stdout|length == 0
|
||||
|
||||
- name: Set container facts
|
||||
set_fact:
|
||||
podman_install: false
|
||||
- name: Set container facts
|
||||
set_fact:
|
||||
podman_install: false
|
||||
|
||||
- name: Set podman command prefix
|
||||
set_fact:
|
||||
|
@ -3,7 +3,7 @@
|
||||
become: true
|
||||
hiera:
|
||||
name: "tripleo_undercloud_conf_file"
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Verify that nameservers are set in undercloud.conf
|
||||
become: true
|
||||
@ -11,9 +11,9 @@
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: undercloud_nameservers
|
||||
ignore_missing_file: False
|
||||
ignore_missing_file: false
|
||||
register: undercloud_nameservers
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Check that nameservers point to IdM/FreeIPA
|
||||
set_fact:
|
||||
@ -52,7 +52,7 @@
|
||||
shell: host {{ undercloud_conf_dns_query }} | awk '{print $5}'
|
||||
register: host_from_ip_reg
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Get domain as set in undercloud.conf
|
||||
become: true
|
||||
@ -60,9 +60,9 @@
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: overcloud_domain_name
|
||||
ignore_missing_file: False
|
||||
ignore_missing_file: false
|
||||
register: undercloud_overcloud_domain
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Set facts undercloud.conf domain is not configured correctly
|
||||
set_fact:
|
||||
@ -96,9 +96,9 @@
|
||||
path: "{{ tripleo_undercloud_conf_file }}"
|
||||
section: DEFAULT
|
||||
key: enable_novajoin
|
||||
ignore_missing_file: False
|
||||
ignore_missing_file: false
|
||||
register: undercloud_enable_novajoin
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Set facts undercloud.conf enable novajoin is disabled
|
||||
set_fact:
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
|
||||
debug_check: True
|
||||
debug_check: true
|
||||
services_conf_files:
|
||||
- /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf
|
||||
- /var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Converge
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
services_conf_files:
|
||||
@ -29,13 +29,13 @@
|
||||
dest: /tmp/debug_true_1.conf
|
||||
content: |
|
||||
[DEFAULT]
|
||||
debug: True
|
||||
debug: true
|
||||
|
||||
- name: Checking good value
|
||||
include_role:
|
||||
name: undercloud-debug
|
||||
vars:
|
||||
debug_check: False
|
||||
debug_check: false
|
||||
|
||||
- name: Should fail due to bad value
|
||||
block:
|
||||
|
@ -1,11 +1,11 @@
|
||||
---
|
||||
- name: Check the services for debug flag
|
||||
become: True
|
||||
become: true
|
||||
validations_read_ini:
|
||||
path: "{{ item }}"
|
||||
section: DEFAULT
|
||||
key: debug
|
||||
ignore_missing_file: True
|
||||
ignore_missing_file: true
|
||||
register: config_result
|
||||
with_items: "{{ services_conf_files }}"
|
||||
failed_when: "debug_check|bool == config_result.value|bool"
|
||||
|
@ -1,9 +1,8 @@
|
||||
---
|
||||
|
||||
volumes:
|
||||
- {mount: /var/lib/docker, min_size: 10}
|
||||
- {mount: /var/lib/docker, min_size: 10}
|
||||
- {mount: /var/lib/config-data, min_size: 3}
|
||||
- {mount: /var/log, min_size: 3}
|
||||
- {mount: /usr, min_size: 5}
|
||||
- {mount: /var, min_size: 20}
|
||||
- {mount: /, min_size: 25}
|
||||
- {mount: /var/log, min_size: 3}
|
||||
- {mount: /usr, min_size: 5}
|
||||
- {mount: /var, min_size: 20}
|
||||
- {mount: /, min_size: 25}
|
||||
|
@ -25,13 +25,13 @@
|
||||
shell: df -B1 {{ item.mount }} --output=avail | sed 1d
|
||||
register: volume_size
|
||||
with_items: "{{ existing_volumes }}"
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
|
||||
- name: Fail if any of the volumes are too small
|
||||
fail:
|
||||
msg: >
|
||||
Minimum free space required for {{ item.item.mount }}: {{ item.item.min_size }}G
|
||||
- current free space: {{ (item.stdout|int / const_bytes_in_gb|int) |round(1) }}G
|
||||
msg: >
|
||||
Minimum free space required for {{ item.item.mount }}: {{ item.item.min_size }}G
|
||||
- current free space: {{ (item.stdout|int / const_bytes_in_gb|int) |round(1) }}G
|
||||
when: >
|
||||
item.stdout|int / const_bytes_in_gb|int < item.item.min_size|int
|
||||
with_items: "{{ volume_size.results }}"
|
||||
|
@ -8,7 +8,7 @@ platforms:
|
||||
- name: centos7
|
||||
hostname: centos7
|
||||
image: centos:7
|
||||
override_command: True
|
||||
override_command: true
|
||||
command: python -m SimpleHTTPServer 8787
|
||||
pkg_extras: python-setuptools python-enum34 python-netaddr epel-release ruby PyYAML
|
||||
easy_install:
|
||||
@ -20,7 +20,7 @@ platforms:
|
||||
- name: fedora28
|
||||
hostname: fedora28
|
||||
image: fedora:28
|
||||
override_command: True
|
||||
override_command: true
|
||||
command: python3 -m http.server 8787
|
||||
pkg_extras: python*-setuptools python*-enum python*-netaddr ruby PyYAML
|
||||
environment:
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Converge
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: working detection
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: install hiera
|
||||
|
@ -18,7 +18,7 @@
|
||||
set -o pipefail
|
||||
{{ container_cli.value|default('podman', true) }} exec heat_api_cron crontab -l -u heat |grep -v '^#'
|
||||
register: cron_result
|
||||
changed_when: False
|
||||
changed_when: false
|
||||
|
||||
- name: Check heat crontab
|
||||
fail:
|
||||
|
@ -6,5 +6,5 @@ metadata:
|
||||
heat database can grow very large. This validation checks that
|
||||
the purge_deleted crontab has been set up.
|
||||
groups:
|
||||
- pre-upgrade
|
||||
- pre-deployment
|
||||
- pre-upgrade
|
||||
- pre-deployment
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user