Merge "Use true/false for boolean values"
This commit is contained in:
commit
debf84c5df
@ -64,7 +64,7 @@
|
||||
copy:
|
||||
content: "Container startup configs moved to /var/lib/tripleo-config/container-startup-config"
|
||||
dest: /var/lib/tripleo-config/container-startup-config-readme.txt
|
||||
force: yes
|
||||
force: true
|
||||
mode: '0600'
|
||||
tags:
|
||||
- container_startup_configs
|
||||
|
@ -62,12 +62,12 @@
|
||||
include_vars:
|
||||
file: "{{ playbook_dir }}/service_vip_vars.yaml"
|
||||
name: service_vip_vars
|
||||
ignore_errors: yes
|
||||
ignore_errors: true
|
||||
- name: Include OVN bridge MAC address variables
|
||||
include_vars:
|
||||
file: "{{ playbook_dir }}/ovn_bridge_mac_address_vars.yaml"
|
||||
name: ovn_bridge_mac_address_vars
|
||||
ignore_errors: yes
|
||||
ignore_errors: true
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
- container_startup_configs
|
||||
when:
|
||||
- ansible_check_mode|bool
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Create /var/lib/tripleo-config/check-mode directory for check mode
|
||||
become: true
|
||||
@ -29,7 +29,7 @@
|
||||
- container_startup_configs
|
||||
when:
|
||||
- ansible_check_mode|bool
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
# Puppet manifest for baremetal host configuration
|
||||
- name: Write the puppet step_config manifest
|
||||
@ -38,12 +38,12 @@
|
||||
copy:
|
||||
content: "{{ lookup('file', tripleo_role_name + '/step_config.pp', errors='ignore') | default('', True) }}"
|
||||
dest: /var/lib/tripleo-config/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}puppet_step_config.pp
|
||||
force: yes
|
||||
force: true
|
||||
mode: '0600'
|
||||
tags:
|
||||
- host_config
|
||||
check_mode: no
|
||||
diff: no
|
||||
check_mode: false
|
||||
diff: false
|
||||
|
||||
- name: Diff puppet step_config manifest changes for check mode
|
||||
command:
|
||||
@ -51,7 +51,7 @@
|
||||
register: diff_results
|
||||
tags:
|
||||
- host_config
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
when:
|
||||
- ansible_check_mode|bool
|
||||
- ansible_diff_mode
|
||||
@ -87,7 +87,7 @@
|
||||
state: absent
|
||||
tags:
|
||||
- container_config
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
when:
|
||||
- ansible_check_mode|bool
|
||||
|
||||
@ -100,7 +100,7 @@
|
||||
selevel: s0
|
||||
tags:
|
||||
- container_config
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
when:
|
||||
- ansible_check_mode|bool
|
||||
|
||||
@ -110,12 +110,12 @@
|
||||
copy:
|
||||
content: "{{ lookup('file', tripleo_role_name + '/puppet_config.yaml', errors='ignore') | default([], True) | from_yaml | to_nice_json }}"
|
||||
dest: /var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet.json
|
||||
force: yes
|
||||
force: true
|
||||
mode: '0600'
|
||||
tags:
|
||||
- container_config
|
||||
check_mode: no
|
||||
diff: no
|
||||
check_mode: false
|
||||
diff: false
|
||||
|
||||
- name: Diff container-puppet.json changes for check mode
|
||||
command:
|
||||
@ -123,7 +123,7 @@
|
||||
register: diff_results
|
||||
tags:
|
||||
- container_config
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
when:
|
||||
- ansible_check_mode|bool
|
||||
- ansible_diff_mode
|
||||
@ -173,7 +173,7 @@
|
||||
setype: container_file_t
|
||||
selevel: s0
|
||||
recurse: true
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
when:
|
||||
- ansible_check_mode|bool
|
||||
- not check_mode_dir.stat.exists
|
||||
@ -190,7 +190,7 @@
|
||||
when:
|
||||
- ansible_check_mode|bool
|
||||
- not check_mode_dir.stat.exists
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
tags:
|
||||
- host_config
|
||||
- container_config
|
||||
|
@ -10,7 +10,7 @@
|
||||
dest: /etc/puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}hieradata/config_step.json
|
||||
force: true
|
||||
mode: '0600'
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
tags:
|
||||
- host_config
|
||||
|
||||
@ -34,7 +34,7 @@
|
||||
no_log: true
|
||||
tags:
|
||||
- host_config
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
|
||||
- name: Wait for puppet host configuration to finish
|
||||
async_status:
|
||||
|
@ -462,7 +462,7 @@ outputs:
|
||||
strategy: tripleo_free
|
||||
name: External deployment step 0
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
become: false
|
||||
vars:
|
||||
step: 0
|
||||
@ -492,7 +492,7 @@ outputs:
|
||||
strategy: tripleo_linear
|
||||
name: Manage SELinux
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
tasks:
|
||||
- name: Set selinux state
|
||||
become: true
|
||||
@ -504,7 +504,7 @@ outputs:
|
||||
strategy: tripleo_linear
|
||||
name: Generate /etc/hosts
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
tasks:
|
||||
{% raw %}
|
||||
- name: Configure Hosts Entries
|
||||
@ -522,7 +522,7 @@ outputs:
|
||||
strategy: tripleo_linear
|
||||
name: Common roles for TripleO servers
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
# pre_tasks run before any roles in a play, so we use it for the
|
||||
# named debug task for --start-at-task.
|
||||
pre_tasks:
|
||||
@ -546,7 +546,7 @@ outputs:
|
||||
name: Deploy step tasks for step 0
|
||||
become: true
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
vars:
|
||||
bootstrap_server_id: BOOTSTRAP_SERVER_ID
|
||||
step: 0
|
||||
@ -572,7 +572,7 @@ outputs:
|
||||
strategy: tripleo_free
|
||||
name: Server pre network steps
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
vars:
|
||||
deploy_source_host: "DEPLOY_SOURCE_HOST"
|
||||
tasks:
|
||||
@ -598,7 +598,7 @@ outputs:
|
||||
strategy: tripleo_free
|
||||
name: Server network deployments
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
tasks:
|
||||
{% raw %}
|
||||
- name: Network Configuration
|
||||
@ -638,7 +638,7 @@ outputs:
|
||||
strategy: tripleo_free
|
||||
name: Server network validation
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
tasks:
|
||||
{% raw %}
|
||||
- name: Basic Network Validation
|
||||
@ -661,7 +661,7 @@ outputs:
|
||||
strategy: tripleo_free
|
||||
name: Server pre deployment steps
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
tasks:
|
||||
{% raw %}
|
||||
- import_tasks: hiera_steps_tasks.yaml
|
||||
@ -686,7 +686,7 @@ outputs:
|
||||
name: Host prep steps
|
||||
become: true
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
vars:
|
||||
bootstrap_server_id: BOOTSTRAP_SERVER_ID
|
||||
deploy_identifier: DEPLOY_IDENTIFIER
|
||||
@ -731,7 +731,7 @@ outputs:
|
||||
name: Overcloud container setup tasks
|
||||
become: true
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
vars:
|
||||
bootstrap_server_id: BOOTSTRAP_SERVER_ID
|
||||
deploy_identifier: DEPLOY_IDENTIFIER
|
||||
@ -766,7 +766,7 @@ outputs:
|
||||
name: Pre Deployment Step Tasks
|
||||
become: true
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
vars:
|
||||
bootstrap_server_id: BOOTSTRAP_SERVER_ID
|
||||
deploy_identifier: DEPLOY_IDENTIFIER
|
||||
@ -802,7 +802,7 @@ outputs:
|
||||
strategy: tripleo_free
|
||||
name: External deployment step {{step}}
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
become: false
|
||||
vars:
|
||||
bootstrap_server_id: BOOTSTRAP_SERVER_ID
|
||||
@ -835,7 +835,7 @@ outputs:
|
||||
name: Deploy step tasks for {{step}}
|
||||
become: true
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
vars:
|
||||
bootstrap_server_id: BOOTSTRAP_SERVER_ID
|
||||
step: '{{step}}'
|
||||
@ -915,7 +915,7 @@ outputs:
|
||||
name: Server Post Deployments
|
||||
become: true
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
tasks:
|
||||
- name: Server Post Deployments
|
||||
delegate_to: localhost
|
||||
@ -938,7 +938,7 @@ outputs:
|
||||
strategy: tripleo_linear
|
||||
name: External deployment Post Deploy tasks
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
become: false
|
||||
vars:
|
||||
bootstrap_server_id: BOOTSTRAP_SERVER_ID
|
||||
@ -995,7 +995,7 @@ outputs:
|
||||
become: true
|
||||
serial: "{% raw %}{{ update_serial | default({% endraw %}{{ role.update_serial | default(1) }}{% raw %})}}{% endraw %}"
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
vars:
|
||||
bootstrap_server_id: BOOTSTRAP_SERVER_ID
|
||||
deploy_identifier: DEPLOY_IDENTIFIER
|
||||
@ -1049,7 +1049,7 @@ outputs:
|
||||
- hosts: DEPLOY_SOURCE_HOST
|
||||
name: External update step {{step}}
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
become: false
|
||||
vars:
|
||||
bootstrap_server_id: BOOTSTRAP_SERVER_ID
|
||||
@ -1077,7 +1077,7 @@ outputs:
|
||||
- hosts: DEPLOY_SOURCE_HOST
|
||||
name: External deploy step {{step}}
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
become: false
|
||||
vars:
|
||||
bootstrap_server_id: BOOTSTRAP_SERVER_ID
|
||||
@ -1117,7 +1117,7 @@ outputs:
|
||||
name: Run pre-upgrade rolling tasks
|
||||
serial: {{ role.deploy_serial | default(1) }}
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
tasks:
|
||||
- include_tasks: pre_upgrade_rolling_steps_tasks.yaml
|
||||
with_sequence: start=0 end={{pre_upgrade_rolling_steps_max-1}}
|
||||
@ -1137,7 +1137,7 @@ outputs:
|
||||
name: Upgrade tasks for step {{step}}
|
||||
become: true
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
vars:
|
||||
bootstrap_server_id: BOOTSTRAP_SERVER_ID
|
||||
step: '{{step}}'
|
||||
@ -1174,7 +1174,7 @@ outputs:
|
||||
deploy_target_host: "DEPLOY_TARGET_HOST"
|
||||
- hosts: DEPLOY_TARGET_HOST
|
||||
strategy: tripleo_free
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
tasks:
|
||||
- include_tasks: post_upgrade_steps_tasks.yaml
|
||||
with_sequence: start=0 end={{post_upgrade_steps_max-1}}
|
||||
@ -1204,7 +1204,7 @@ outputs:
|
||||
strategy: tripleo_free
|
||||
name: External upgrade step {{step}}
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
become: false
|
||||
vars:
|
||||
# Explicit ansible_python_interpreter to allow connecting
|
||||
@ -1235,7 +1235,7 @@ outputs:
|
||||
- hosts: DEPLOY_SOURCE_HOST
|
||||
name: External deploy step {{step}}
|
||||
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
|
||||
any_errors_fatal: yes
|
||||
any_errors_fatal: true
|
||||
become: false
|
||||
vars:
|
||||
# Explicit ansible_python_interpreter to allow connecting
|
||||
|
@ -36,7 +36,7 @@
|
||||
register: diff_results
|
||||
tags:
|
||||
- container_config
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
when:
|
||||
- ansible_check_mode|bool
|
||||
- ansible_diff_mode
|
||||
|
@ -3,9 +3,9 @@
|
||||
copy:
|
||||
content: "{{lookup ('vars', 'host_container_puppet_tasks_' ~ step, default=[]) | to_nice_json}}"
|
||||
dest: "/var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet-tasks{{step}}.json"
|
||||
force: yes
|
||||
force: true
|
||||
mode: '0600'
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
tags:
|
||||
- container_config_tasks
|
||||
|
||||
|
@ -772,8 +772,8 @@ outputs:
|
||||
- - name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
scale_tasks:
|
||||
if:
|
||||
- {get_param: BarbicanPkcs11CryptoLunasaEnabled}
|
||||
|
@ -119,5 +119,5 @@ outputs:
|
||||
- name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
|
@ -200,8 +200,8 @@ outputs:
|
||||
- name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
external_upgrade_tasks:
|
||||
- when:
|
||||
- step|int == 1
|
||||
|
@ -72,12 +72,12 @@ outputs:
|
||||
- name: set ceph_nfs upgrade node facts in a single-node environment
|
||||
set_fact:
|
||||
ceph_nfs_short_node_names_upgraded: "{{ ceph_nfs_short_node_names }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when: groups['ceph_nfs'] | length <= 1
|
||||
- name: set ceph_nfs upgrade node facts from the limit option
|
||||
set_fact:
|
||||
ceph_nfs_short_node_names_upgraded: "{{ ceph_nfs_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when:
|
||||
- groups['ceph_nfs'] | length > 1
|
||||
- item.split('.')[0] in ansible_limit.split(':')
|
||||
|
@ -262,8 +262,8 @@ outputs:
|
||||
- - name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
deploy_steps_tasks:
|
||||
- name: Clean up when switching cinder-backup from pcmk to active-active
|
||||
when:
|
||||
|
@ -315,13 +315,13 @@ outputs:
|
||||
set_fact:
|
||||
cinder_backup_short_node_names_upgraded: "{{ cinder_backup_short_node_names }}"
|
||||
cinder_backup_node_names_upgraded: "{{ cinder_backup_node_names }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when: groups['cinder_backup'] | length <= 1
|
||||
- name: set cinder_backup upgrade node facts from the limit option
|
||||
set_fact:
|
||||
cinder_backup_short_node_names_upgraded: "{{ cinder_backup_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
||||
cinder_backup_node_names_upgraded: "{{ cinder_backup_node_names_upgraded|default([]) + [item] }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when:
|
||||
- groups['cinder_backup'] | length > 1
|
||||
- item.split('.')[0] in ansible_limit.split(':')
|
||||
|
@ -332,8 +332,8 @@ outputs:
|
||||
- name: cinder enable the LVM losetup service
|
||||
systemd:
|
||||
name: cinder-lvm-losetup
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
|
||||
cinder_volume_config_settings:
|
||||
description: Config settings for the cinder-volume container (HA or non-HA)
|
||||
|
@ -143,8 +143,8 @@ outputs:
|
||||
- name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
external_upgrade_tasks:
|
||||
- when:
|
||||
- step|int == 1
|
||||
|
@ -304,13 +304,13 @@ outputs:
|
||||
set_fact:
|
||||
cinder_volume_short_node_names_upgraded: "{{ cinder_volume_short_node_names }}"
|
||||
cinder_volume_node_names_upgraded: "{{ cinder_volume_node_names | default([]) }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when: groups['cinder_volume'] | length <= 1
|
||||
- name: set cinder_volume upgrade node facts from the limit option
|
||||
set_fact:
|
||||
cinder_volume_short_node_names_upgraded: "{{ cinder_volume_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
||||
cinder_volume_node_names_upgraded: "{{ cinder_volume_node_names_upgraded|default([]) + [item] }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when:
|
||||
- groups['cinder_volume'] | length > 1
|
||||
- item.split('.')[0] in ansible_limit.split(':')
|
||||
|
@ -514,13 +514,13 @@ outputs:
|
||||
set_fact:
|
||||
mysql_short_node_names_upgraded: "{{ mysql_short_node_names }}"
|
||||
mysql_node_names_upgraded: "{{ mysql_node_names }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when: groups['mysql'] | length <= 1
|
||||
- name: set mysql upgrade node facts from the limit option
|
||||
set_fact:
|
||||
mysql_short_node_names_upgraded: "{{ mysql_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
||||
mysql_node_names_upgraded: "{{ mysql_node_names_upgraded|default([]) + [item] }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when:
|
||||
- groups['mysql'] | length > 1
|
||||
- item.split('.')[0] in ansible_limit.split(':')
|
||||
|
@ -436,12 +436,12 @@ outputs:
|
||||
- name: set redis upgrade node facts in a single-node environment
|
||||
set_fact:
|
||||
redis_short_node_names_upgraded: "{{ redis_short_node_names }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when: groups['redis'] | length <= 1
|
||||
- name: set redis upgrade node facts from the limit option
|
||||
set_fact:
|
||||
redis_short_node_names_upgraded: "{{ redis_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when:
|
||||
- groups['redis'] | length > 1
|
||||
- item.split('.')[0] in ansible_limit.split(':')
|
||||
|
@ -895,14 +895,14 @@ outputs:
|
||||
command: /usr/bin/rpm -q libvirt-daemon
|
||||
failed_when: false
|
||||
register: libvirt_installed
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
- name: make sure libvirt services are disabled and masked
|
||||
service:
|
||||
name: "{{ item }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
masked: yes
|
||||
daemon_reload: yes
|
||||
enabled: false
|
||||
masked: true
|
||||
daemon_reload: true
|
||||
with_items:
|
||||
- libvirtd.service
|
||||
- virtlogd.socket
|
||||
|
@ -57,7 +57,7 @@ outputs:
|
||||
step_config: ''
|
||||
host_prep_tasks:
|
||||
- name: enroll client in ipa and get metadata
|
||||
become: yes
|
||||
become: true
|
||||
vars:
|
||||
python_interpreter: {get_param: PythonInterpreter}
|
||||
makehomedir: {get_param: MakeHomeDir}
|
||||
|
@ -300,7 +300,7 @@ outputs:
|
||||
content: |
|
||||
if $syslogfacility-text == '{{facility}}' and $programname == 'haproxy' then -/var/log/containers/haproxy/haproxy.log
|
||||
& stop
|
||||
create: yes
|
||||
create: true
|
||||
path: /etc/rsyslog.d/openstack-haproxy.conf
|
||||
vars:
|
||||
facility: {get_param: HAProxySyslogFacility}
|
||||
@ -343,7 +343,7 @@ outputs:
|
||||
path: /var/log/containers/haproxy
|
||||
state: directory
|
||||
setype: var_log_t
|
||||
recurse: yes
|
||||
recurse: true
|
||||
when: step|int == 1
|
||||
external_upgrade_tasks:
|
||||
- when:
|
||||
|
@ -301,7 +301,7 @@ outputs:
|
||||
content: |
|
||||
if $syslogfacility-text == '{{facility}}' and $programname == 'haproxy' then -/var/log/containers/haproxy/haproxy.log
|
||||
& stop
|
||||
create: yes
|
||||
create: true
|
||||
path: /etc/rsyslog.d/openstack-haproxy.conf
|
||||
vars:
|
||||
facility: {get_param: HAProxySyslogFacility}
|
||||
@ -529,12 +529,12 @@ outputs:
|
||||
- name: set haproxy upgrade node facts in a single-node environment
|
||||
set_fact:
|
||||
haproxy_short_node_names_upgraded: "{{ haproxy_short_node_names }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when: groups['haproxy'] | length <= 1
|
||||
- name: set haproxy upgrade node facts from the limit option
|
||||
set_fact:
|
||||
haproxy_short_node_names_upgraded: "{{ haproxy_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when:
|
||||
- groups['haproxy'] | length > 1
|
||||
- item.split('.')[0] in ansible_limit.split(':')
|
||||
|
@ -285,8 +285,8 @@ outputs:
|
||||
- - name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
upgrade_tasks: []
|
||||
external_upgrade_tasks:
|
||||
- when:
|
||||
|
@ -73,7 +73,7 @@ outputs:
|
||||
command: /usr/bin/rpm -q docker-distribution
|
||||
failed_when: false
|
||||
register: docker_distribution_installed
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
- name: Stop, disable docker-distribution
|
||||
systemd:
|
||||
enabled: false
|
||||
|
@ -150,7 +150,7 @@ outputs:
|
||||
ipaclient_otp: "{{ hostvars[outer_item.0]['ipa_host_otp'] }}"
|
||||
ipaclient_mkhomedir: {get_param: MakeHomeDir}
|
||||
ipaclient_no_ntp: {get_param: IdMNoNtpSetup}
|
||||
ipaclient_force: yes
|
||||
ipaclient_force: true
|
||||
ipaclient_hostname: "{{ hostvars[outer_item.0]['fqdn_canonical'] }}"
|
||||
ansible_fqdn: "{{ ipaclient_hostname }}"
|
||||
ipaclients:
|
||||
|
@ -93,8 +93,8 @@ outputs:
|
||||
- name: allow logrotate to read inside containers
|
||||
seboolean:
|
||||
name: logrotate_read_inside_containers
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
deploy_steps_tasks:
|
||||
- name: configure tmpwatch on the host
|
||||
when: step|int == 2
|
||||
|
@ -108,8 +108,8 @@ outputs:
|
||||
- name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
upgrade_tasks: []
|
||||
external_upgrade_tasks:
|
||||
- when:
|
||||
|
@ -303,13 +303,13 @@ outputs:
|
||||
set_fact:
|
||||
manila_share_short_node_names_upgraded: "{{ manila_share_short_node_names }}"
|
||||
manila_share_node_names_upgraded: "{{ manila_share_node_names }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when: groups['manila_share'] | length <= 1
|
||||
- name: set manila_share upgrade node facts from the limit option
|
||||
set_fact:
|
||||
manila_share_short_node_names_upgraded: "{{ manila_share_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
||||
manila_share_node_names_upgraded: "{{ manila_share_node_names_upgraded|default([]) + [item] }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when:
|
||||
- groups['manila_share'] | length > 1
|
||||
- item.split('.')[0] in ansible_limit.split(':')
|
||||
|
@ -408,8 +408,8 @@ outputs:
|
||||
- name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
- name: set conditions
|
||||
set_fact:
|
||||
dnsmasq_wrapper_enabled: {get_param: NeutronEnableDnsmasqDockerWrapper}
|
||||
|
@ -344,8 +344,8 @@ outputs:
|
||||
- name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
- name: set conditions
|
||||
set_fact:
|
||||
keepalived_wrapper_enabled: {get_param: NeutronEnableKeepalivedWrapper}
|
||||
|
@ -190,6 +190,6 @@ outputs:
|
||||
- name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
upgrade_tasks: []
|
||||
|
@ -394,22 +394,22 @@ outputs:
|
||||
copy:
|
||||
content: {get_file: ./neutron-cleanup}
|
||||
dest: '/usr/libexec/neutron-cleanup'
|
||||
force: yes
|
||||
force: true
|
||||
mode: '0755'
|
||||
- name: Copy in cleanup service
|
||||
copy:
|
||||
content: {get_file: ./neutron-cleanup.service}
|
||||
dest: '/usr/lib/systemd/system/neutron-cleanup.service'
|
||||
force: yes
|
||||
force: true
|
||||
- name: Enabling the cleanup service
|
||||
service:
|
||||
name: neutron-cleanup
|
||||
enabled: yes
|
||||
enabled: true
|
||||
- name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
update_tasks:
|
||||
# puppetlabs-firewall manages security rules via Puppet but make the rules
|
||||
# consistent by default. Since Neutron also creates some rules, we don't
|
||||
|
@ -192,8 +192,8 @@ outputs:
|
||||
- - name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
- if:
|
||||
- derive_pci_whitelist_enabled
|
||||
- - name: "creating directory"
|
||||
|
@ -73,7 +73,7 @@ outputs:
|
||||
- name: Warn if no discovery host available
|
||||
fail:
|
||||
msg: 'No hosts available to run nova cell_v2 host discovery.'
|
||||
ignore_errors: yes
|
||||
ignore_errors: true
|
||||
when:
|
||||
- nova_cellv2_discovery_delegate_host is not defined
|
||||
- name: Discovering nova hosts
|
||||
|
@ -1543,7 +1543,7 @@ outputs:
|
||||
service:
|
||||
name: irqbalance.service
|
||||
state: stopped
|
||||
enabled: no
|
||||
enabled: false
|
||||
deploy_steps_tasks:
|
||||
- name: validate nova-compute container state
|
||||
containers.podman.podman_container_info:
|
||||
|
@ -223,8 +223,8 @@ outputs:
|
||||
- - name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
external_upgrade_tasks:
|
||||
- when: step|int == 1
|
||||
block: &nova_online_db_migration
|
||||
|
@ -211,8 +211,8 @@ outputs:
|
||||
- name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
external_post_deploy_tasks: {get_attr: [NovaComputeCommon, nova_compute_common_deploy_steps_tasks]}
|
||||
external_upgrade_tasks:
|
||||
- when:
|
||||
|
@ -934,14 +934,14 @@ outputs:
|
||||
command: /usr/bin/rpm -q libvirt-daemon
|
||||
failed_when: false
|
||||
register: libvirt_installed
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
- name: make sure libvirt services are disabled and masked
|
||||
service:
|
||||
name: "{{ item }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
masked: yes
|
||||
daemon_reload: yes
|
||||
enabled: false
|
||||
masked: true
|
||||
daemon_reload: true
|
||||
with_items:
|
||||
- libvirtd.service
|
||||
- virtlogd.socket
|
||||
|
@ -303,8 +303,8 @@ outputs:
|
||||
- - name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
external_upgrade_tasks:
|
||||
- when:
|
||||
- step|int == 1
|
||||
|
@ -159,8 +159,8 @@ outputs:
|
||||
- name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
update_tasks: {get_attr: [OctaviaBase, role_data, update_tasks]}
|
||||
upgrade_tasks: {get_attr: [OctaviaBase, role_data, upgrade_tasks]}
|
||||
external_upgrade_tasks:
|
||||
|
@ -389,23 +389,23 @@ outputs:
|
||||
- name: enable virt_sandbox_use_netlink for healthcheck
|
||||
seboolean:
|
||||
name: virt_sandbox_use_netlink
|
||||
persistent: yes
|
||||
state: yes
|
||||
persistent: true
|
||||
state: true
|
||||
- name: Copy in cleanup script
|
||||
copy:
|
||||
content: {get_file: ../neutron/neutron-cleanup}
|
||||
dest: '/usr/libexec/neutron-cleanup'
|
||||
force: yes
|
||||
force: true
|
||||
mode: '0755'
|
||||
- name: Copy in cleanup service
|
||||
copy:
|
||||
content: {get_file: ../neutron/neutron-cleanup.service}
|
||||
dest: '/usr/lib/systemd/system/neutron-cleanup.service'
|
||||
force: yes
|
||||
force: true
|
||||
- name: Enabling the cleanup service
|
||||
service:
|
||||
name: neutron-cleanup
|
||||
enabled: yes
|
||||
enabled: true
|
||||
external_deploy_tasks:
|
||||
- when:
|
||||
- step|int == 0
|
||||
|
@ -287,7 +287,7 @@ outputs:
|
||||
loop_control:
|
||||
loop_var: ovn_container
|
||||
- name: Set connection # FIXME workaround until RHBZ #1952038 is fixed
|
||||
become: yes
|
||||
become: true
|
||||
shell: |
|
||||
podman exec ovn_cluster_north_db_server bash -c "ovn-nbctl -p /etc/pki/tls/private/ovn_dbs.key -c /etc/pki/tls/certs/ovn_dbs.crt -C /etc/ipa/ca.crt set-connection pssl:{{ tripleo_ovn_cluster_nb_db_port }}"
|
||||
podman exec ovn_cluster_south_db_server bash -c "ovn-sbctl -p /etc/pki/tls/private/ovn_dbs.key -c /etc/pki/tls/certs/ovn_dbs.crt -C /etc/ipa/ca.crt set-connection pssl:{{ tripleo_ovn_cluster_sb_db_port }}"
|
||||
|
@ -413,12 +413,12 @@ outputs:
|
||||
- name: set ovn_dbs upgrade node facts in a single-node environment
|
||||
set_fact:
|
||||
ovn_dbs_short_node_names_upgraded: "{{ ovn_dbs_short_node_names }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when: groups['ovn_dbs'] | length <= 1
|
||||
- name: set ovn_dbs upgrade node facts from the limit option
|
||||
set_fact:
|
||||
ovn_dbs_short_node_names_upgraded: "{{ ovn_dbs_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when:
|
||||
- groups['ovn_dbs'] | length > 1
|
||||
- item.split('.')[0] in ansible_limit.split(':')
|
||||
|
@ -226,12 +226,12 @@ outputs:
|
||||
- name: set pacemaker upgrade node facts in a single-node environment
|
||||
set_fact:
|
||||
pacemaker_short_node_names_upgraded: "{{ pacemaker_short_node_names }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when: groups['pacemaker'] | length <= 1
|
||||
- name: set pacemaker upgrade node facts from the limit option
|
||||
set_fact:
|
||||
pacemaker_short_node_names_upgraded: "{{ pacemaker_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when:
|
||||
- groups['pacemaker'] | length > 1
|
||||
- item.split('.')[0] in ansible_limit.split(':')
|
||||
@ -260,7 +260,7 @@ outputs:
|
||||
| selectattr('key', 'in', pacemaker_short_node_names_upgraded)
|
||||
| map(attribute='value')
|
||||
| list }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
|
||||
- name: add the pacemaker short name to hiera data for the upgrade.
|
||||
include_role:
|
||||
|
@ -119,12 +119,12 @@ outputs:
|
||||
- name: set pacemaker upgrade remote node facts in a single-node environment
|
||||
set_fact:
|
||||
pacemaker_remote_short_node_names_upgraded: "{{ pacemaker_remote_short_node_names }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when: groups['pacemaker_remote'] | length <= 1
|
||||
- name: set pacemaker remote upgrade node facts from the limit option
|
||||
set_fact:
|
||||
pacemaker_remote_short_node_names_upgraded: "{{ pacemaker_remote_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when:
|
||||
- groups['pacemaker_remote'] | length > 1
|
||||
- item.split('.')[0] in ansible_limit.split(':')
|
||||
@ -145,7 +145,7 @@ outputs:
|
||||
| selectattr('key', 'in', pacemaker_remote_short_node_names_upgraded)
|
||||
| map(attribute='value')
|
||||
| list }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
- name: add the pacemaker remote short name to hiera data for the upgrade.
|
||||
include_role:
|
||||
name: tripleo_upgrade_hiera
|
||||
|
@ -385,13 +385,13 @@ outputs:
|
||||
set_fact:
|
||||
oslo_messaging_notify_short_node_names_upgraded: "{{ oslo_messaging_notify_short_node_names }}"
|
||||
oslo_messaging_notify_node_names_upgraded: "{{ oslo_messaging_notify_node_names }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when: groups['oslo_messaging_notify'] | length <= 1
|
||||
- name: set oslo_messaging_notify upgrade node facts from the limit option
|
||||
set_fact:
|
||||
oslo_messaging_notify_short_node_names_upgraded: "{{ oslo_messaging_notify_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
||||
oslo_messaging_notify_node_names_upgraded: "{{ oslo_messaging_notify_node_names_upgraded|default([]) + [item] }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when:
|
||||
- groups['oslo_messaging_notify'] | length > 1
|
||||
- item.split('.')[0] in ansible_limit.split(':')
|
||||
|
@ -390,12 +390,12 @@ outputs:
|
||||
- name: set rabbitmq upgrade node facts in a single-node environment
|
||||
set_fact:
|
||||
rabbitmq_short_node_names_upgraded: "{{ rabbitmq_short_node_names }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when: groups['rabbitmq'] | length <= 1
|
||||
- name: set rabbitmq upgrade node facts from the limit option
|
||||
set_fact:
|
||||
rabbitmq_short_node_names_upgraded: "{{ rabbitmq_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when:
|
||||
- groups['rabbitmq'] | length > 1
|
||||
- item.split('.')[0] in ansible_limit.split(':')
|
||||
|
@ -390,13 +390,13 @@ outputs:
|
||||
set_fact:
|
||||
oslo_messaging_rpc_short_node_names_upgraded: "{{ oslo_messaging_rpc_short_node_names }}"
|
||||
oslo_messaging_rpc_node_names_upgraded: "{{ oslo_messaging_rpc_node_names }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when: groups['oslo_messaging_rpc'] | length <= 1
|
||||
- name: set oslo_messaging_rpc upgrade node facts from the limit option
|
||||
set_fact:
|
||||
oslo_messaging_rpc_short_node_names_upgraded: "{{ oslo_messaging_rpc_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}"
|
||||
oslo_messaging_rpc_node_names_upgraded: "{{ oslo_messaging_rpc_node_names_upgraded|default([]) + [item] }}"
|
||||
cacheable: no
|
||||
cacheable: false
|
||||
when:
|
||||
- groups['oslo_messaging_rpc'] | length > 1
|
||||
- item.split('.')[0] in ansible_limit.split(':')
|
||||
|
@ -113,7 +113,7 @@ outputs:
|
||||
service:
|
||||
name: ntpd
|
||||
state: stopped
|
||||
enabled: no
|
||||
enabled: false
|
||||
when:
|
||||
- ntp_service_check.rc is defined
|
||||
- ntp_service_check.rc == 0
|
||||
|
@ -305,7 +305,7 @@ outputs:
|
||||
- name: Always ensure the openvswitch service is enabled and running after upgrades
|
||||
service:
|
||||
name: openvswitch
|
||||
enabled: yes
|
||||
enabled: true
|
||||
state: started
|
||||
when:
|
||||
- step|int == 2
|
||||
@ -398,7 +398,7 @@ outputs:
|
||||
- name: Always ensure the openvswitch service is enabled and running after upgrades
|
||||
service:
|
||||
name: openvswitch
|
||||
enabled: yes
|
||||
enabled: true
|
||||
state: started
|
||||
when:
|
||||
- step|int == 2
|
||||
@ -421,6 +421,6 @@ outputs:
|
||||
when: step|int == 3
|
||||
service:
|
||||
name: openvswitch
|
||||
enabled: yes
|
||||
enabled: true
|
||||
state: started
|
||||
ignore_errors: true
|
||||
|
@ -93,7 +93,7 @@ outputs:
|
||||
- name: Always ensure the openvswitch service is enabled and running after upgrades
|
||||
service:
|
||||
name: openvswitch
|
||||
enabled: yes
|
||||
enabled: true
|
||||
state: started
|
||||
when:
|
||||
- step|int == 2
|
||||
|
@ -250,7 +250,7 @@ parameters:
|
||||
have their networks configured. This is a role based parameter.
|
||||
default: False
|
||||
{{role.name}}AnyErrorsFatal:
|
||||
default: yes
|
||||
default: true
|
||||
type: string
|
||||
{{role.name}}MaxFailPercentage:
|
||||
default: 0
|
||||
|
Loading…
Reference in New Issue
Block a user