Updating Jinja filters to conform to Ansible 2.5+

Since Ansible 2.5, the use of jinja tests as filters has been deprecated.

I've run the script provided by the ansible team to 'fix' the jinja filters
to conform to the newer syntax.

This fixes the deprecation warnings.

Change-Id: I775c849c944f82bdfc779c8c530346e7ebedbd2a
This commit is contained in:
Raimund Hook 2019-06-28 16:34:24 +01:00
parent 67a4d50288
commit 6df6277096
15 changed files with 20 additions and 20 deletions

View File

@ -24,7 +24,7 @@
delegate_to: "{{ controller_host }}" delegate_to: "{{ controller_host }}"
register: result register: result
failed_when: failed_when:
- result | failed - result is failed
# Some BMCs complain if the node is already powered off. # Some BMCs complain if the node is already powered off.
- "'Command not supported in present state' not in result.stderr" - "'Command not supported in present state' not in result.stderr"
vars: vars:

View File

@ -67,7 +67,7 @@
delegate_to: "{{ delegate_host }}" delegate_to: "{{ delegate_host }}"
register: arp_result register: arp_result
failed_when: failed_when:
- arp_result | failed - arp_result is failed
- "'No ARP entry for ' ~ idrac_default_ip not in arp_result.stdout" - "'No ARP entry for ' ~ idrac_default_ip not in arp_result.stdout"
# Ansible's until keyword seems to not work nicely with failed_when, causing # Ansible's until keyword seems to not work nicely with failed_when, causing

View File

@ -17,7 +17,7 @@
- iscsid.service - iscsid.service
register: result register: result
failed_when: failed_when:
- result|failed - result is failed
# If a service is not installed, the ansible service module will fail # If a service is not installed, the ansible service module will fail
# with this error message. # with this error message.
- '"Could not find the requested service" not in result.msg' - '"Could not find the requested service" not in result.msg'

View File

@ -44,7 +44,7 @@
become: True become: True
register: nm_result register: nm_result
failed_when: failed_when:
- nm_result | failed - nm_result is failed
# Ugh, Ansible's service module doesn't handle uninstalled services. # Ugh, Ansible's service module doesn't handle uninstalled services.
- "'Could not find the requested service' not in nm_result.msg" - "'Could not find the requested service' not in nm_result.msg"

View File

@ -51,7 +51,7 @@
tasks: tasks:
- name: Set a fact about whether the configuration changed - name: Set a fact about whether the configuration changed
set_fact: set_fact:
bios_or_raid_change: "{{ drac_result | changed }}" bios_or_raid_change: "{{ drac_result is changed }}"
- name: Ensure that overcloud BIOS and RAID volumes are configured - name: Ensure that overcloud BIOS and RAID volumes are configured
hosts: overcloud_with_bmcs_of_type_idrac hosts: overcloud_with_bmcs_of_type_idrac

View File

@ -81,7 +81,7 @@
-m command -m command
-a "openstack baremetal node undeploy {% raw %}{{ inventory_hostname }}{% endraw %}"' -a "openstack baremetal node undeploy {% raw %}{{ inventory_hostname }}{% endraw %}"'
register: delete_result register: delete_result
until: delete_result | success or 'is locked by host' in delete_result.stdout until: delete_result is successful or 'is locked by host' in delete_result.stdout
retries: "{{ ironic_retries }}" retries: "{{ ironic_retries }}"
delay: "{{ ironic_retry_interval }}" delay: "{{ ironic_retry_interval }}"
when: initial_provision_state != 'available' when: initial_provision_state != 'available'

View File

@ -75,7 +75,7 @@
-m command -m command
-a "openstack baremetal node manage {% raw %}{{ inventory_hostname }}{% endraw %}"' -a "openstack baremetal node manage {% raw %}{{ inventory_hostname }}{% endraw %}"'
register: manage_result register: manage_result
until: manage_result | success or 'is locked by host' in manage_result.stdout until: manage_result is successful or 'is locked by host' in manage_result.stdout
retries: "{{ ironic_retries }}" retries: "{{ ironic_retries }}"
delay: "{{ ironic_retry_interval }}" delay: "{{ ironic_retry_interval }}"
when: initial_provision_state != 'manageable' when: initial_provision_state != 'manageable'
@ -100,7 +100,7 @@
-m command -m command
-a "openstack baremetal node inspect {% raw %}{{ inventory_hostname }}{% endraw %}"' -a "openstack baremetal node inspect {% raw %}{{ inventory_hostname }}{% endraw %}"'
register: provide_result register: provide_result
until: provide_result | success or 'is locked by host' in provide_result.stdout until: provide_result is successful or 'is locked by host' in provide_result.stdout
retries: "{{ ironic_retries }}" retries: "{{ ironic_retries }}"
delay: "{{ ironic_retry_interval }}" delay: "{{ ironic_retry_interval }}"
delegate_to: "{{ seed_host }}" delegate_to: "{{ seed_host }}"

View File

@ -84,7 +84,7 @@
-m command -m command
-a "openstack baremetal node manage {% raw %}{{ inventory_hostname }}{% endraw %}"' -a "openstack baremetal node manage {% raw %}{{ inventory_hostname }}{% endraw %}"'
register: manage_result register: manage_result
until: manage_result | success or 'is locked by host' in manage_result.stdout until: manage_result is successful or 'is locked by host' in manage_result.stdout
retries: "{{ ironic_retries }}" retries: "{{ ironic_retries }}"
delay: "{{ ironic_retry_interval }}" delay: "{{ ironic_retry_interval }}"
when: initial_provision_state == 'enroll' when: initial_provision_state == 'enroll'
@ -109,7 +109,7 @@
-m command -m command
-a "openstack baremetal node provide {% raw %}{{ inventory_hostname }}{% endraw %}"' -a "openstack baremetal node provide {% raw %}{{ inventory_hostname }}{% endraw %}"'
register: provide_result register: provide_result
until: provide_result | success or 'is locked by host' in provide_result.stdout until: provide_result is successful or 'is locked by host' in provide_result.stdout
retries: "{{ ironic_retries }}" retries: "{{ ironic_retries }}"
delay: "{{ ironic_retry_interval }}" delay: "{{ ironic_retry_interval }}"
when: initial_provision_state in ['enroll', 'manageable'] when: initial_provision_state in ['enroll', 'manageable']

View File

@ -29,7 +29,7 @@
sudo shutdown -r now "Applying SELinux changes" sudo shutdown -r now "Applying SELinux changes"
register: reboot_result register: reboot_result
failed_when: failed_when:
- reboot_result | failed - reboot_result is failed
- "'closed by remote host' not in reboot_result.stderr" - "'closed by remote host' not in reboot_result.stderr"
when: not is_local | bool when: not is_local | bool
@ -51,4 +51,4 @@
when: not is_local | bool when: not is_local | bool
when: when:
- disable_selinux_do_reboot | bool - disable_selinux_do_reboot | bool
- selinux_result | changed - selinux_result is changed

View File

@ -23,7 +23,7 @@
command: docker volume rm {{ volume }} command: docker volume rm {{ volume }}
with_items: "{{ volume_result.results }}" with_items: "{{ volume_result.results }}"
when: when:
- not item | skipped - item is not skipped
- item.rc == 0 - item.rc == 0
vars: vars:
volume: "{{ item.item.1.split(':')[0] }}" volume: "{{ item.item.1.split(':')[0] }}"

View File

@ -42,14 +42,14 @@
register: cp_sockets register: cp_sockets
run_once: True run_once: True
when: when:
- group_result|changed - group_result is changed
- name: Drop all persistent SSH connections to activate the new group membership - name: Drop all persistent SSH connections to activate the new group membership
local_action: local_action:
module: shell ssh -O stop None -o ControlPath={{ item.path }} module: shell ssh -O stop None -o ControlPath={{ item.path }}
with_items: "{{ cp_sockets.files }}" with_items: "{{ cp_sockets.files }}"
run_once: True run_once: True
when: not cp_sockets|skipped when: cp_sockets is not skipped
- name: Ensure Docker daemon is started - name: Ensure Docker daemon is started
service: service:

View File

@ -23,7 +23,7 @@
command: docker volume rm {{ volume }} command: docker volume rm {{ volume }}
with_items: "{{ volume_result.results }}" with_items: "{{ volume_result.results }}"
when: when:
- not item | skipped - item is not skipped
- item.rc == 0 - item.rc == 0
vars: vars:
volume: "{{ item.item.1.split(':')[0] }}" volume: "{{ item.item.1.split(':')[0] }}"

View File

@ -42,7 +42,7 @@
Ensure that each disk in 'ceph_disks' does not have any partitions. Ensure that each disk in 'ceph_disks' does not have any partitions.
with_items: "{{ disk_journal_info.results }}" with_items: "{{ disk_journal_info.results }}"
when: when:
- not item | skipped - item is not skipped
- item.partitions | length > 0 - item.partitions | length > 0
- not item.partitions.0.name.startswith('KOLLA_CEPH') - not item.partitions.0.name.startswith('KOLLA_CEPH')
loop_control: loop_control:
@ -75,7 +75,7 @@
state: present state: present
with_items: "{{ disk_journal_info.results }}" with_items: "{{ disk_journal_info.results }}"
when: when:
- not item | skipped - item is not skipped
- item.partitions | length == 0 - item.partitions | length == 0
loop_control: loop_control:
label: "{{item.item}}" label: "{{item.item}}"

View File

@ -23,7 +23,7 @@
command: docker volume rm {{ volume }} command: docker volume rm {{ volume }}
with_items: "{{ volume_result.results }}" with_items: "{{ volume_result.results }}"
when: when:
- not item | skipped - item is not skipped
- item.rc == 0 - item.rc == 0
vars: vars:
volume: "{{ item.item.1.split(':')[0] }}" volume: "{{ item.item.1.split(':')[0] }}"

View File

@ -39,5 +39,5 @@
with_together: with_together:
- "{{ veth_result.results }}" - "{{ veth_result.results }}"
- "{{ peer_result.results }}" - "{{ peer_result.results }}"
when: ctl_result|changed or item[0]|changed or item[1]|changed when: ctl_result is changed or item[0] is changed or item[1] is changed
become: True become: True