Use true/false for boolean values

In ansible, usage of true/false for boolean values, instead of yes/no,
is considered as a best practise and is enforced by ansible-lint with
the "truthy value should be one of false, true (truthy)" rule.

This change replaces usage of yes/no by true/false to follow that
practise.

Change-Id: I3313278f1ef6cbee0f906aca0a77bde1a3c53784
This commit is contained in:
Takashi Kajinami 2021-10-12 09:32:17 +09:00
parent ea34234ff3
commit 76adfd4202
52 changed files with 141 additions and 141 deletions

View File

@ -64,7 +64,7 @@
copy:
content: "Container startup configs moved to /var/lib/tripleo-config/container-startup-config"
dest: /var/lib/tripleo-config/container-startup-config-readme.txt
force: yes
force: true
mode: '0600'
tags:
- container_startup_configs

View File

@ -62,12 +62,12 @@
include_vars:
file: "{{ playbook_dir }}/service_vip_vars.yaml"
name: service_vip_vars
ignore_errors: yes
ignore_errors: true
- name: Include OVN bridge MAC address variables
include_vars:
file: "{{ playbook_dir }}/ovn_bridge_mac_address_vars.yaml"
name: ovn_bridge_mac_address_vars
ignore_errors: yes
ignore_errors: true
tags:
- always

View File

@ -11,7 +11,7 @@
- container_startup_configs
when:
- ansible_check_mode|bool
check_mode: no
check_mode: false
- name: Create /var/lib/tripleo-config/check-mode directory for check mode
become: true
@ -29,7 +29,7 @@
- container_startup_configs
when:
- ansible_check_mode|bool
check_mode: no
check_mode: false
# Puppet manifest for baremetal host configuration
- name: Write the puppet step_config manifest
@ -38,12 +38,12 @@
copy:
content: "{{ lookup('file', tripleo_role_name + '/step_config.pp', errors='ignore') | default('', True) }}"
dest: /var/lib/tripleo-config/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}puppet_step_config.pp
force: yes
force: true
mode: '0600'
tags:
- host_config
check_mode: no
diff: no
check_mode: false
diff: false
- name: Diff puppet step_config manifest changes for check mode
command:
@ -51,7 +51,7 @@
register: diff_results
tags:
- host_config
check_mode: no
check_mode: false
when:
- ansible_check_mode|bool
- ansible_diff_mode
@ -87,7 +87,7 @@
state: absent
tags:
- container_config
check_mode: no
check_mode: false
when:
- ansible_check_mode|bool
@ -100,7 +100,7 @@
selevel: s0
tags:
- container_config
check_mode: no
check_mode: false
when:
- ansible_check_mode|bool
@ -110,12 +110,12 @@
copy:
content: "{{ lookup('file', tripleo_role_name + '/puppet_config.yaml', errors='ignore') | default([], True) | from_yaml | to_nice_json }}"
dest: /var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet.json
force: yes
force: true
mode: '0600'
tags:
- container_config
check_mode: no
diff: no
check_mode: false
diff: false
- name: Diff container-puppet.json changes for check mode
command:
@ -123,7 +123,7 @@
register: diff_results
tags:
- container_config
check_mode: no
check_mode: false
when:
- ansible_check_mode|bool
- ansible_diff_mode
@ -173,7 +173,7 @@
setype: container_file_t
selevel: s0
recurse: true
check_mode: no
check_mode: false
when:
- ansible_check_mode|bool
- not check_mode_dir.stat.exists
@ -190,7 +190,7 @@
when:
- ansible_check_mode|bool
- not check_mode_dir.stat.exists
check_mode: no
check_mode: false
tags:
- host_config
- container_config

View File

@ -10,7 +10,7 @@
dest: /etc/puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}hieradata/config_step.json
force: true
mode: '0600'
check_mode: no
check_mode: false
tags:
- host_config
@ -34,7 +34,7 @@
no_log: true
tags:
- host_config
check_mode: no
check_mode: false
- name: Wait for puppet host configuration to finish
async_status:

View File

@ -466,7 +466,7 @@ outputs:
strategy: tripleo_free
name: External deployment step 0
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
become: false
vars:
step: 0
@ -496,7 +496,7 @@ outputs:
strategy: tripleo_linear
name: Manage SELinux
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
tasks:
- name: Set selinux state
become: true
@ -508,7 +508,7 @@ outputs:
strategy: tripleo_linear
name: Generate /etc/hosts
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
tasks:
{% raw %}
- name: Configure Hosts Entries
@ -526,7 +526,7 @@ outputs:
strategy: tripleo_linear
name: Common roles for TripleO servers
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
# pre_tasks run before any roles in a play, so we use it for the
# named debug task for --start-at-task.
pre_tasks:
@ -550,7 +550,7 @@ outputs:
name: Deploy step tasks for step 0
become: true
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
step: 0
@ -576,7 +576,7 @@ outputs:
strategy: tripleo_free
name: Server pre network steps
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
vars:
deploy_source_host: "DEPLOY_SOURCE_HOST"
tasks:
@ -602,7 +602,7 @@ outputs:
strategy: tripleo_free
name: Server network deployments
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
tasks:
{% raw %}
- name: Network Configuration
@ -642,7 +642,7 @@ outputs:
strategy: tripleo_free
name: Server network validation
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
tasks:
{% raw %}
- name: Basic Network Validation
@ -665,7 +665,7 @@ outputs:
strategy: tripleo_free
name: Server pre deployment steps
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
tasks:
{% raw %}
- import_tasks: hiera_steps_tasks.yaml
@ -690,7 +690,7 @@ outputs:
name: Host prep steps
become: true
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
deploy_identifier: DEPLOY_IDENTIFIER
@ -735,7 +735,7 @@ outputs:
name: Overcloud container setup tasks
become: true
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
deploy_identifier: DEPLOY_IDENTIFIER
@ -770,7 +770,7 @@ outputs:
name: Pre Deployment Step Tasks
become: true
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
deploy_identifier: DEPLOY_IDENTIFIER
@ -806,7 +806,7 @@ outputs:
strategy: tripleo_free
name: External deployment step {{step}}
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
become: false
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
@ -839,7 +839,7 @@ outputs:
name: Deploy step tasks for {{step}}
become: true
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
step: '{{step}}'
@ -919,7 +919,7 @@ outputs:
name: Server Post Deployments
become: true
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
tasks:
- name: Server Post Deployments
delegate_to: localhost
@ -942,7 +942,7 @@ outputs:
strategy: tripleo_linear
name: External deployment Post Deploy tasks
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
become: false
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
@ -999,7 +999,7 @@ outputs:
become: true
serial: "{% raw %}{{ update_serial | default({% endraw %}{{ role.update_serial | default(1) }}{% raw %})}}{% endraw %}"
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
deploy_identifier: DEPLOY_IDENTIFIER
@ -1053,7 +1053,7 @@ outputs:
- hosts: DEPLOY_SOURCE_HOST
name: External update step {{step}}
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
become: false
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
@ -1081,7 +1081,7 @@ outputs:
- hosts: DEPLOY_SOURCE_HOST
name: External deploy step {{step}}
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
become: false
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
@ -1121,7 +1121,7 @@ outputs:
name: Run pre-upgrade rolling tasks
serial: {{ role.deploy_serial | default(1) }}
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
tasks:
- include_tasks: pre_upgrade_rolling_steps_tasks.yaml
with_sequence: start=0 end={{pre_upgrade_rolling_steps_max-1}}
@ -1141,7 +1141,7 @@ outputs:
name: Upgrade tasks for step {{step}}
become: true
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
step: '{{step}}'
@ -1178,7 +1178,7 @@ outputs:
deploy_target_host: "DEPLOY_TARGET_HOST"
- hosts: DEPLOY_TARGET_HOST
strategy: tripleo_free
any_errors_fatal: yes
any_errors_fatal: true
tasks:
- include_tasks: post_upgrade_steps_tasks.yaml
with_sequence: start=0 end={{post_upgrade_steps_max-1}}
@ -1208,7 +1208,7 @@ outputs:
strategy: tripleo_free
name: External upgrade step {{step}}
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
become: false
vars:
# Explicit ansible_python_interpreter to allow connecting
@ -1239,7 +1239,7 @@ outputs:
- hosts: DEPLOY_SOURCE_HOST
name: External deploy step {{step}}
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
any_errors_fatal: true
become: false
vars:
# Explicit ansible_python_interpreter to allow connecting

View File

@ -36,7 +36,7 @@
register: diff_results
tags:
- container_config
check_mode: no
check_mode: false
when:
- ansible_check_mode|bool
- ansible_diff_mode

View File

@ -3,9 +3,9 @@
copy:
content: "{{lookup ('vars', 'host_container_puppet_tasks_' ~ step, default=[]) | to_nice_json}}"
dest: "/var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet-tasks{{step}}.json"
force: yes
force: true
mode: '0600'
check_mode: no
check_mode: false
tags:
- container_config_tasks

View File

@ -772,8 +772,8 @@ outputs:
- - name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
scale_tasks:
if:
- {get_param: BarbicanPkcs11CryptoLunasaEnabled}

View File

@ -119,5 +119,5 @@ outputs:
- name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true

View File

@ -200,8 +200,8 @@ outputs:
- name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
external_upgrade_tasks:
- when:
- step|int == 1

View File

@ -72,12 +72,12 @@ outputs:
- name: set ceph_nfs upgrade node facts in a single-node environment
set_fact:
ceph_nfs_short_node_names_upgraded: "{{ ceph_nfs_short_node_names }}"
cacheable: no
cacheable: false
when: groups['ceph_nfs'] | length <= 1
- name: set ceph_nfs upgrade node facts from the limit option
set_fact:
ceph_nfs_short_node_names_upgraded: "{{ ceph_nfs_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cacheable: no
cacheable: false
when:
- groups['ceph_nfs'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')

View File

@ -262,8 +262,8 @@ outputs:
- - name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
deploy_steps_tasks:
- name: Clean up when switching cinder-backup from pcmk to active-active
when:

View File

@ -315,13 +315,13 @@ outputs:
set_fact:
cinder_backup_short_node_names_upgraded: "{{ cinder_backup_short_node_names }}"
cinder_backup_node_names_upgraded: "{{ cinder_backup_node_names }}"
cacheable: no
cacheable: false
when: groups['cinder_backup'] | length <= 1
- name: set cinder_backup upgrade node facts from the limit option
set_fact:
cinder_backup_short_node_names_upgraded: "{{ cinder_backup_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cinder_backup_node_names_upgraded: "{{ cinder_backup_node_names_upgraded|default([]) + [item] }}"
cacheable: no
cacheable: false
when:
- groups['cinder_backup'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')

View File

@ -332,8 +332,8 @@ outputs:
- name: cinder enable the LVM losetup service
systemd:
name: cinder-lvm-losetup
enabled: yes
daemon_reload: yes
enabled: true
daemon_reload: true
cinder_volume_config_settings:
description: Config settings for the cinder-volume container (HA or non-HA)

View File

@ -143,8 +143,8 @@ outputs:
- name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
external_upgrade_tasks:
- when:
- step|int == 1

View File

@ -304,13 +304,13 @@ outputs:
set_fact:
cinder_volume_short_node_names_upgraded: "{{ cinder_volume_short_node_names }}"
cinder_volume_node_names_upgraded: "{{ cinder_volume_node_names | default([]) }}"
cacheable: no
cacheable: false
when: groups['cinder_volume'] | length <= 1
- name: set cinder_volume upgrade node facts from the limit option
set_fact:
cinder_volume_short_node_names_upgraded: "{{ cinder_volume_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cinder_volume_node_names_upgraded: "{{ cinder_volume_node_names_upgraded|default([]) + [item] }}"
cacheable: no
cacheable: false
when:
- groups['cinder_volume'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')

View File

@ -514,13 +514,13 @@ outputs:
set_fact:
mysql_short_node_names_upgraded: "{{ mysql_short_node_names }}"
mysql_node_names_upgraded: "{{ mysql_node_names }}"
cacheable: no
cacheable: false
when: groups['mysql'] | length <= 1
- name: set mysql upgrade node facts from the limit option
set_fact:
mysql_short_node_names_upgraded: "{{ mysql_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
mysql_node_names_upgraded: "{{ mysql_node_names_upgraded|default([]) + [item] }}"
cacheable: no
cacheable: false
when:
- groups['mysql'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')

View File

@ -436,12 +436,12 @@ outputs:
- name: set redis upgrade node facts in a single-node environment
set_fact:
redis_short_node_names_upgraded: "{{ redis_short_node_names }}"
cacheable: no
cacheable: false
when: groups['redis'] | length <= 1
- name: set redis upgrade node facts from the limit option
set_fact:
redis_short_node_names_upgraded: "{{ redis_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cacheable: no
cacheable: false
when:
- groups['redis'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')

View File

@ -893,14 +893,14 @@ outputs:
command: /usr/bin/rpm -q libvirt-daemon
failed_when: false
register: libvirt_installed
check_mode: no
check_mode: false
- name: make sure libvirt services are disabled and masked
service:
name: "{{ item }}"
state: stopped
enabled: no
masked: yes
daemon_reload: yes
enabled: false
masked: true
daemon_reload: true
with_items:
- libvirtd.service
- virtlogd.socket

View File

@ -57,7 +57,7 @@ outputs:
step_config: ''
host_prep_tasks:
- name: enroll client in ipa and get metadata
become: yes
become: true
vars:
python_interpreter: {get_param: PythonInterpreter}
makehomedir: {get_param: MakeHomeDir}

View File

@ -300,7 +300,7 @@ outputs:
content: |
if $syslogfacility-text == '{{facility}}' and $programname == 'haproxy' then -/var/log/containers/haproxy/haproxy.log
& stop
create: yes
create: true
path: /etc/rsyslog.d/openstack-haproxy.conf
vars:
facility: {get_param: HAProxySyslogFacility}
@ -343,7 +343,7 @@ outputs:
path: /var/log/containers/haproxy
state: directory
setype: var_log_t
recurse: yes
recurse: true
when: step|int == 1
external_upgrade_tasks:
- when:

View File

@ -301,7 +301,7 @@ outputs:
content: |
if $syslogfacility-text == '{{facility}}' and $programname == 'haproxy' then -/var/log/containers/haproxy/haproxy.log
& stop
create: yes
create: true
path: /etc/rsyslog.d/openstack-haproxy.conf
vars:
facility: {get_param: HAProxySyslogFacility}
@ -529,12 +529,12 @@ outputs:
- name: set haproxy upgrade node facts in a single-node environment
set_fact:
haproxy_short_node_names_upgraded: "{{ haproxy_short_node_names }}"
cacheable: no
cacheable: false
when: groups['haproxy'] | length <= 1
- name: set haproxy upgrade node facts from the limit option
set_fact:
haproxy_short_node_names_upgraded: "{{ haproxy_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cacheable: no
cacheable: false
when:
- groups['haproxy'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')

View File

@ -285,8 +285,8 @@ outputs:
- - name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
upgrade_tasks: []
external_upgrade_tasks:
- when:

View File

@ -73,7 +73,7 @@ outputs:
command: /usr/bin/rpm -q docker-distribution
failed_when: false
register: docker_distribution_installed
check_mode: no
check_mode: false
- name: Stop, disable docker-distribution
systemd:
enabled: false

View File

@ -150,7 +150,7 @@ outputs:
ipaclient_otp: "{{ hostvars[outer_item.0]['ipa_host_otp'] }}"
ipaclient_mkhomedir: {get_param: MakeHomeDir}
ipaclient_no_ntp: {get_param: IdMNoNtpSetup}
ipaclient_force: yes
ipaclient_force: true
ipaclient_hostname: "{{ hostvars[outer_item.0]['fqdn_canonical'] }}"
ansible_fqdn: "{{ ipaclient_hostname }}"
ipaclients:

View File

@ -93,8 +93,8 @@ outputs:
- name: allow logrotate to read inside containers
seboolean:
name: logrotate_read_inside_containers
persistent: yes
state: yes
persistent: true
state: true
deploy_steps_tasks:
- name: configure tmpwatch on the host
when: step|int == 2

View File

@ -108,8 +108,8 @@ outputs:
- name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
upgrade_tasks: []
external_upgrade_tasks:
- when:

View File

@ -303,13 +303,13 @@ outputs:
set_fact:
manila_share_short_node_names_upgraded: "{{ manila_share_short_node_names }}"
manila_share_node_names_upgraded: "{{ manila_share_node_names }}"
cacheable: no
cacheable: false
when: groups['manila_share'] | length <= 1
- name: set manila_share upgrade node facts from the limit option
set_fact:
manila_share_short_node_names_upgraded: "{{ manila_share_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
manila_share_node_names_upgraded: "{{ manila_share_node_names_upgraded|default([]) + [item] }}"
cacheable: no
cacheable: false
when:
- groups['manila_share'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')

View File

@ -408,8 +408,8 @@ outputs:
- name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
- name: set conditions
set_fact:
dnsmasq_wrapper_enabled: {get_param: NeutronEnableDnsmasqDockerWrapper}

View File

@ -344,8 +344,8 @@ outputs:
- name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
- name: set conditions
set_fact:
keepalived_wrapper_enabled: {get_param: NeutronEnableKeepalivedWrapper}

View File

@ -190,6 +190,6 @@ outputs:
- name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
upgrade_tasks: []

View File

@ -394,22 +394,22 @@ outputs:
copy:
content: {get_file: ./neutron-cleanup}
dest: '/usr/libexec/neutron-cleanup'
force: yes
force: true
mode: '0755'
- name: Copy in cleanup service
copy:
content: {get_file: ./neutron-cleanup.service}
dest: '/usr/lib/systemd/system/neutron-cleanup.service'
force: yes
force: true
- name: Enabling the cleanup service
service:
name: neutron-cleanup
enabled: yes
enabled: true
- name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
update_tasks:
# puppetlabs-firewall manages security rules via Puppet but make the rules
# consistent by default. Since Neutron also creates some rules, we don't

View File

@ -192,8 +192,8 @@ outputs:
- - name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
- if:
- derive_pci_whitelist_enabled
- - name: "creating directory"

View File

@ -73,7 +73,7 @@ outputs:
- name: Warn if no discovery host available
fail:
msg: 'No hosts available to run nova cell_v2 host discovery.'
ignore_errors: yes
ignore_errors: true
when:
- nova_cellv2_discovery_delegate_host is not defined
- name: Discovering nova hosts

View File

@ -1543,7 +1543,7 @@ outputs:
service:
name: irqbalance.service
state: stopped
enabled: no
enabled: false
deploy_steps_tasks:
- name: validate nova-compute container state
containers.podman.podman_container_info:

View File

@ -223,8 +223,8 @@ outputs:
- - name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
external_upgrade_tasks:
- when: step|int == 1
block: &nova_online_db_migration

View File

@ -211,8 +211,8 @@ outputs:
- name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
external_post_deploy_tasks: {get_attr: [NovaComputeCommon, nova_compute_common_deploy_steps_tasks]}
external_upgrade_tasks:
- when:

View File

@ -935,14 +935,14 @@ outputs:
command: /usr/bin/rpm -q libvirt-daemon
failed_when: false
register: libvirt_installed
check_mode: no
check_mode: false
- name: make sure libvirt services are disabled and masked
service:
name: "{{ item }}"
state: stopped
enabled: no
masked: yes
daemon_reload: yes
enabled: false
masked: true
daemon_reload: true
with_items:
- libvirtd.service
- virtlogd.socket

View File

@ -303,8 +303,8 @@ outputs:
- - name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
external_upgrade_tasks:
- when:
- step|int == 1

View File

@ -159,8 +159,8 @@ outputs:
- name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
update_tasks: {get_attr: [OctaviaBase, role_data, update_tasks]}
upgrade_tasks: {get_attr: [OctaviaBase, role_data, upgrade_tasks]}
external_upgrade_tasks:

View File

@ -389,23 +389,23 @@ outputs:
- name: enable virt_sandbox_use_netlink for healthcheck
seboolean:
name: virt_sandbox_use_netlink
persistent: yes
state: yes
persistent: true
state: true
- name: Copy in cleanup script
copy:
content: {get_file: ../neutron/neutron-cleanup}
dest: '/usr/libexec/neutron-cleanup'
force: yes
force: true
mode: '0755'
- name: Copy in cleanup service
copy:
content: {get_file: ../neutron/neutron-cleanup.service}
dest: '/usr/lib/systemd/system/neutron-cleanup.service'
force: yes
force: true
- name: Enabling the cleanup service
service:
name: neutron-cleanup
enabled: yes
enabled: true
external_deploy_tasks:
- when:
- step|int == 0

View File

@ -287,7 +287,7 @@ outputs:
loop_control:
loop_var: ovn_container
- name: Set connection # FIXME workaround until RHBZ #1952038 is fixed
become: yes
become: true
shell: |
podman exec ovn_cluster_north_db_server bash -c "ovn-nbctl -p /etc/pki/tls/private/ovn_dbs.key -c /etc/pki/tls/certs/ovn_dbs.crt -C /etc/ipa/ca.crt set-connection pssl:{{ tripleo_ovn_cluster_nb_db_port }}"
podman exec ovn_cluster_south_db_server bash -c "ovn-sbctl -p /etc/pki/tls/private/ovn_dbs.key -c /etc/pki/tls/certs/ovn_dbs.crt -C /etc/ipa/ca.crt set-connection pssl:{{ tripleo_ovn_cluster_sb_db_port }}"

View File

@ -413,12 +413,12 @@ outputs:
- name: set ovn_dbs upgrade node facts in a single-node environment
set_fact:
ovn_dbs_short_node_names_upgraded: "{{ ovn_dbs_short_node_names }}"
cacheable: no
cacheable: false
when: groups['ovn_dbs'] | length <= 1
- name: set ovn_dbs upgrade node facts from the limit option
set_fact:
ovn_dbs_short_node_names_upgraded: "{{ ovn_dbs_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cacheable: no
cacheable: false
when:
- groups['ovn_dbs'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')

View File

@ -226,12 +226,12 @@ outputs:
- name: set pacemaker upgrade node facts in a single-node environment
set_fact:
pacemaker_short_node_names_upgraded: "{{ pacemaker_short_node_names }}"
cacheable: no
cacheable: false
when: groups['pacemaker'] | length <= 1
- name: set pacemaker upgrade node facts from the limit option
set_fact:
pacemaker_short_node_names_upgraded: "{{ pacemaker_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cacheable: no
cacheable: false
when:
- groups['pacemaker'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')
@ -260,7 +260,7 @@ outputs:
| selectattr('key', 'in', pacemaker_short_node_names_upgraded)
| map(attribute='value')
| list }}"
cacheable: no
cacheable: false
- name: add the pacemaker short name to hiera data for the upgrade.
include_role:

View File

@ -119,12 +119,12 @@ outputs:
- name: set pacemaker upgrade remote node facts in a single-node environment
set_fact:
pacemaker_remote_short_node_names_upgraded: "{{ pacemaker_remote_short_node_names }}"
cacheable: no
cacheable: false
when: groups['pacemaker_remote'] | length <= 1
- name: set pacemaker remote upgrade node facts from the limit option
set_fact:
pacemaker_remote_short_node_names_upgraded: "{{ pacemaker_remote_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cacheable: no
cacheable: false
when:
- groups['pacemaker_remote'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')
@ -145,7 +145,7 @@ outputs:
| selectattr('key', 'in', pacemaker_remote_short_node_names_upgraded)
| map(attribute='value')
| list }}"
cacheable: no
cacheable: false
- name: add the pacemaker remote short name to hiera data for the upgrade.
include_role:
name: tripleo_upgrade_hiera

View File

@ -385,13 +385,13 @@ outputs:
set_fact:
oslo_messaging_notify_short_node_names_upgraded: "{{ oslo_messaging_notify_short_node_names }}"
oslo_messaging_notify_node_names_upgraded: "{{ oslo_messaging_notify_node_names }}"
cacheable: no
cacheable: false
when: groups['oslo_messaging_notify'] | length <= 1
- name: set oslo_messaging_notify upgrade node facts from the limit option
set_fact:
oslo_messaging_notify_short_node_names_upgraded: "{{ oslo_messaging_notify_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
oslo_messaging_notify_node_names_upgraded: "{{ oslo_messaging_notify_node_names_upgraded|default([]) + [item] }}"
cacheable: no
cacheable: false
when:
- groups['oslo_messaging_notify'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')

View File

@ -390,12 +390,12 @@ outputs:
- name: set rabbitmq upgrade node facts in a single-node environment
set_fact:
rabbitmq_short_node_names_upgraded: "{{ rabbitmq_short_node_names }}"
cacheable: no
cacheable: false
when: groups['rabbitmq'] | length <= 1
- name: set rabbitmq upgrade node facts from the limit option
set_fact:
rabbitmq_short_node_names_upgraded: "{{ rabbitmq_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
cacheable: no
cacheable: false
when:
- groups['rabbitmq'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')

View File

@ -390,13 +390,13 @@ outputs:
set_fact:
oslo_messaging_rpc_short_node_names_upgraded: "{{ oslo_messaging_rpc_short_node_names }}"
oslo_messaging_rpc_node_names_upgraded: "{{ oslo_messaging_rpc_node_names }}"
cacheable: no
cacheable: false
when: groups['oslo_messaging_rpc'] | length <= 1
- name: set oslo_messaging_rpc upgrade node facts from the limit option
set_fact:
oslo_messaging_rpc_short_node_names_upgraded: "{{ oslo_messaging_rpc_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
oslo_messaging_rpc_node_names_upgraded: "{{ oslo_messaging_rpc_node_names_upgraded|default([]) + [item] }}"
cacheable: no
cacheable: false
when:
- groups['oslo_messaging_rpc'] | length > 1
- item.split('.')[0] in ansible_limit.split(':')

View File

@ -113,7 +113,7 @@ outputs:
service:
name: ntpd
state: stopped
enabled: no
enabled: false
when:
- ntp_service_check.rc is defined
- ntp_service_check.rc == 0

View File

@ -305,7 +305,7 @@ outputs:
- name: Always ensure the openvswitch service is enabled and running after upgrades
service:
name: openvswitch
enabled: yes
enabled: true
state: started
when:
- step|int == 2
@ -398,7 +398,7 @@ outputs:
- name: Always ensure the openvswitch service is enabled and running after upgrades
service:
name: openvswitch
enabled: yes
enabled: true
state: started
when:
- step|int == 2
@ -421,6 +421,6 @@ outputs:
when: step|int == 3
service:
name: openvswitch
enabled: yes
enabled: true
state: started
ignore_errors: true

View File

@ -93,7 +93,7 @@ outputs:
- name: Always ensure the openvswitch service is enabled and running after upgrades
service:
name: openvswitch
enabled: yes
enabled: true
state: started
when:
- step|int == 2

View File

@ -250,7 +250,7 @@ parameters:
have their networks configured. This is a role based parameter.
default: False
{{role.name}}AnyErrorsFatal:
default: yes
default: true
type: string
{{role.name}}MaxFailPercentage:
default: 0