Use community.general ansible collection instead of modules
Replace module calls by community.general ansible collection calls. Change-Id: Ie96b3d35cea61370b1f98d7e060d696c4807c6b7
This commit is contained in:
parent
c144e9e54b
commit
019419463f
|
@ -1,5 +1,5 @@
|
||||||
- name: Create fcontext entry for container-config-scripts
|
- name: Create fcontext entry for container-config-scripts
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/lib/container-config-scripts(/.*)?"
|
target: "/var/lib/container-config-scripts(/.*)?"
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
become: true
|
become: true
|
||||||
register: cloud_init_enabled
|
register: cloud_init_enabled
|
||||||
- name: Wait for cloud-init to finish, if enabled
|
- name: Wait for cloud-init to finish, if enabled
|
||||||
cloud_init_data_facts:
|
community.general.cloud_init_data_facts:
|
||||||
filter: status
|
filter: status
|
||||||
register: res
|
register: res
|
||||||
until: >
|
until: >
|
||||||
|
|
|
@ -132,7 +132,7 @@ outputs:
|
||||||
description: Common host prep tasks for cinder-volume and cinder-backup services
|
description: Common host prep tasks for cinder-volume and cinder-backup services
|
||||||
value: &cinder_common_host_prep_tasks
|
value: &cinder_common_host_prep_tasks
|
||||||
- name: create fcontext entry for cinder data
|
- name: create fcontext entry for cinder data
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/lib/cinder(/.*)?"
|
target: "/var/lib/cinder(/.*)?"
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
@ -259,7 +259,7 @@ outputs:
|
||||||
changed_when: _loopback_device.rc == 2
|
changed_when: _loopback_device.rc == 2
|
||||||
failed_when: _loopback_device.rc not in [0,2]
|
failed_when: _loopback_device.rc not in [0,2]
|
||||||
- name: Create LVM volume group
|
- name: Create LVM volume group
|
||||||
lvg:
|
community.general.lvg:
|
||||||
vg: "cinder-volumes"
|
vg: "cinder-volumes"
|
||||||
pvs: "{{ _loopback_device.stdout }}"
|
pvs: "{{ _loopback_device.stdout }}"
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -250,7 +250,7 @@ outputs:
|
||||||
get_attr: [MysqlBase, role_data, deploy_steps_tasks]
|
get_attr: [MysqlBase, role_data, deploy_steps_tasks]
|
||||||
host_prep_tasks:
|
host_prep_tasks:
|
||||||
- name: create fcontext entry for mysql data
|
- name: create fcontext entry for mysql data
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/lib/mysql(/.*)?"
|
target: "/var/lib/mysql(/.*)?"
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -236,7 +236,7 @@ outputs:
|
||||||
external_deploy_tasks: {get_attr: [RedisBase, role_data, external_deploy_tasks]}
|
external_deploy_tasks: {get_attr: [RedisBase, role_data, external_deploy_tasks]}
|
||||||
host_prep_tasks:
|
host_prep_tasks:
|
||||||
- name: create fcontext entry for redis data
|
- name: create fcontext entry for redis data
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/run/redis(/.*)?" # conflicts with equivalency rule '/run /var/run' - have to use /var/run here...
|
target: "/var/run/redis(/.*)?" # conflicts with equivalency rule '/run /var/run' - have to use /var/run here...
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -47,7 +47,7 @@ outputs:
|
||||||
- 13787
|
- 13787
|
||||||
host_prep_tasks:
|
host_prep_tasks:
|
||||||
- name: authorize httpd to listen on registry ports
|
- name: authorize httpd to listen on registry ports
|
||||||
seport:
|
community.general.seport:
|
||||||
ports:
|
ports:
|
||||||
- '8787'
|
- '8787'
|
||||||
- '13787'
|
- '13787'
|
||||||
|
|
|
@ -578,7 +578,7 @@ outputs:
|
||||||
modules:
|
modules:
|
||||||
- name: iscsi_tcp
|
- name: iscsi_tcp
|
||||||
- name: create fcontext entry for ironic data
|
- name: create fcontext entry for ironic data
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/lib/ironic(/.*)?"
|
target: "/var/lib/ironic(/.*)?"
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -516,7 +516,7 @@ outputs:
|
||||||
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
|
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
|
||||||
host_prep_tasks:
|
host_prep_tasks:
|
||||||
- name: create fcontext entry for ironic-inspector data
|
- name: create fcontext entry for ironic-inspector data
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/lib/ironic-inspector/dhcp-hostsdir(/.*)?"
|
target: "/var/lib/ironic-inspector/dhcp-hostsdir(/.*)?"
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -153,7 +153,7 @@ outputs:
|
||||||
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
|
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
|
||||||
host_prep_tasks:
|
host_prep_tasks:
|
||||||
- name: create fcontext entry for iscsi
|
- name: create fcontext entry for iscsi
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "{{ item.path }}(/.*)?"
|
target: "{{ item.path }}(/.*)?"
|
||||||
setype: "{{ item.setype }}"
|
setype: "{{ item.setype }}"
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -177,7 +177,7 @@ outputs:
|
||||||
environment: {get_attr: [ManilaShareCommon, manila_share_environment]}
|
environment: {get_attr: [ManilaShareCommon, manila_share_environment]}
|
||||||
host_prep_tasks:
|
host_prep_tasks:
|
||||||
- name: create fcontext entry for manila data
|
- name: create fcontext entry for manila data
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/lib/manila(/.*)?"
|
target: "/var/lib/manila(/.*)?"
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -139,7 +139,7 @@ outputs:
|
||||||
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
|
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
|
||||||
host_prep_tasks:
|
host_prep_tasks:
|
||||||
- name: create fcontext entry for qdrouterd data
|
- name: create fcontext entry for qdrouterd data
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/lib/qdrouterd(/.*)?"
|
target: "/var/lib/qdrouterd(/.*)?"
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -229,7 +229,7 @@ outputs:
|
||||||
cluster_recreate: "{{ tripleo_transfer_flag_stat.stat.exists|bool }}"
|
cluster_recreate: "{{ tripleo_transfer_flag_stat.stat.exists|bool }}"
|
||||||
- name: Check pacemaker cluster running before upgrade
|
- name: Check pacemaker cluster running before upgrade
|
||||||
tags: validation
|
tags: validation
|
||||||
pacemaker_cluster: state=online check_and_fail=true
|
community.general.pacemaker_cluster: state=online check_and_fail=true
|
||||||
async: 30
|
async: 30
|
||||||
poll: 4
|
poll: 4
|
||||||
when: not cluster_recreate|bool
|
when: not cluster_recreate|bool
|
||||||
|
@ -305,13 +305,13 @@ outputs:
|
||||||
when: step|int == 2
|
when: step|int == 2
|
||||||
block:
|
block:
|
||||||
- name: Stop pacemaker cluster
|
- name: Stop pacemaker cluster
|
||||||
pacemaker_cluster: state=offline
|
community.general.pacemaker_cluster: state=offline
|
||||||
when: not cluster_recreate|bool
|
when: not cluster_recreate|bool
|
||||||
- name: upgrade step 4
|
- name: upgrade step 4
|
||||||
when: step|int == 4
|
when: step|int == 4
|
||||||
block:
|
block:
|
||||||
- name: Start pacemaker cluster
|
- name: Start pacemaker cluster
|
||||||
pacemaker_cluster: state=online
|
community.general.pacemaker_cluster: state=online
|
||||||
when: not cluster_recreate|bool
|
when: not cluster_recreate|bool
|
||||||
external_upgrade_tasks:
|
external_upgrade_tasks:
|
||||||
- when:
|
- when:
|
||||||
|
@ -334,7 +334,7 @@ outputs:
|
||||||
update_tasks:
|
update_tasks:
|
||||||
- name: Check pacemaker cluster running before the minor update
|
- name: Check pacemaker cluster running before the minor update
|
||||||
when: step|int == 0 # TODO(marios) disabling validations?
|
when: step|int == 0 # TODO(marios) disabling validations?
|
||||||
pacemaker_cluster: state=online check_and_fail=true
|
community.general.pacemaker_cluster: state=online check_and_fail=true
|
||||||
async: 30
|
async: 30
|
||||||
poll: 4
|
poll: 4
|
||||||
- name: Move virtual IPs to another node before stopping pacemaker
|
- name: Move virtual IPs to another node before stopping pacemaker
|
||||||
|
@ -364,10 +364,10 @@ outputs:
|
||||||
command: systemd-cat -t ha-shutdown /var/lib/container-config-scripts/pacemaker_mutex_shutdown.sh --acquire
|
command: systemd-cat -t ha-shutdown /var/lib/container-config-scripts/pacemaker_mutex_shutdown.sh --acquire
|
||||||
- name: Stop pacemaker cluster
|
- name: Stop pacemaker cluster
|
||||||
when: step|int == 1
|
when: step|int == 1
|
||||||
pacemaker_cluster: state=offline
|
community.general.pacemaker_cluster: state=offline
|
||||||
- name: Start pacemaker cluster
|
- name: Start pacemaker cluster
|
||||||
when: step|int == 4
|
when: step|int == 4
|
||||||
pacemaker_cluster: state=online
|
community.general.pacemaker_cluster: state=online
|
||||||
- name: Release the cluster shutdown lock
|
- name: Release the cluster shutdown lock
|
||||||
when: step|int == 4
|
when: step|int == 4
|
||||||
command: systemd-cat -t ha-shutdown /var/lib/container-config-scripts/pacemaker_mutex_shutdown.sh --release
|
command: systemd-cat -t ha-shutdown /var/lib/container-config-scripts/pacemaker_mutex_shutdown.sh --release
|
||||||
|
|
|
@ -209,7 +209,7 @@ outputs:
|
||||||
path: /usr/sbin/pcs
|
path: /usr/sbin/pcs
|
||||||
register: pcs_stat
|
register: pcs_stat
|
||||||
- name: Stop pacemaker cluster before stopping all docker containers
|
- name: Stop pacemaker cluster before stopping all docker containers
|
||||||
pacemaker_cluster: state=offline
|
community.general.pacemaker_cluster: state=offline
|
||||||
when: pcs_stat.stat.exists
|
when: pcs_stat.stat.exists
|
||||||
- name: Destroy pacemaker cluster
|
- name: Destroy pacemaker cluster
|
||||||
command: /usr/sbin/pcs cluster destroy
|
command: /usr/sbin/pcs cluster destroy
|
||||||
|
|
|
@ -129,7 +129,7 @@ outputs:
|
||||||
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
|
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
|
||||||
host_prep_tasks:
|
host_prep_tasks:
|
||||||
- name: create fcontext entry for qrouterd data
|
- name: create fcontext entry for qrouterd data
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/lib/qdrouterd(/.*)?"
|
target: "/var/lib/qdrouterd(/.*)?"
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -393,7 +393,7 @@ outputs:
|
||||||
ca: ipa
|
ca: ipa
|
||||||
host_prep_tasks:
|
host_prep_tasks:
|
||||||
- name: creat fcontext entry for rabbitmq data
|
- name: creat fcontext entry for rabbitmq data
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/lib/rabbitmq(/.*)?"
|
target: "/var/lib/rabbitmq(/.*)?"
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -311,7 +311,7 @@ outputs:
|
||||||
ca: ipa
|
ca: ipa
|
||||||
host_prep_tasks:
|
host_prep_tasks:
|
||||||
- name: create fcontext for rabbitmq data
|
- name: create fcontext for rabbitmq data
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/lib/rabbitmq(/.*)?"
|
target: "/var/lib/rabbitmq(/.*)?"
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -214,7 +214,7 @@ outputs:
|
||||||
TRIPLEO_DEPLOY_IDENTIFIER: {get_param: DeployIdentifier}
|
TRIPLEO_DEPLOY_IDENTIFIER: {get_param: DeployIdentifier}
|
||||||
host_prep_tasks:
|
host_prep_tasks:
|
||||||
- name: create fcontext for rabbitmq data
|
- name: create fcontext for rabbitmq data
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/lib/rabbitmq(/.*)?"
|
target: "/var/lib/rabbitmq(/.*)?"
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -311,7 +311,7 @@ outputs:
|
||||||
ca: ipa
|
ca: ipa
|
||||||
host_prep_tasks:
|
host_prep_tasks:
|
||||||
- name: create fcontext for rabbitmq data
|
- name: create fcontext for rabbitmq data
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/lib/rabbitmq(/.*)?"
|
target: "/var/lib/rabbitmq(/.*)?"
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -216,7 +216,7 @@ outputs:
|
||||||
get_attr: [RabbitmqBase, role_data, metadata_settings]
|
get_attr: [RabbitmqBase, role_data, metadata_settings]
|
||||||
host_prep_tasks:
|
host_prep_tasks:
|
||||||
- name: create fcontext for rabbitmq data
|
- name: create fcontext for rabbitmq data
|
||||||
sefcontext:
|
community.general.sefcontext:
|
||||||
target: "/var/lib/rabbitmq(/.*)?"
|
target: "/var/lib/rabbitmq(/.*)?"
|
||||||
setype: container_file_t
|
setype: container_file_t
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -639,7 +639,7 @@ outputs:
|
||||||
swift_raw_disks: "{{ hiera_output.stdout | from_json }}"
|
swift_raw_disks: "{{ hiera_output.stdout | from_json }}"
|
||||||
when: use_node_data_lookup|bool
|
when: use_node_data_lookup|bool
|
||||||
- name: Format SwiftRawDisks
|
- name: Format SwiftRawDisks
|
||||||
filesystem:
|
community.general.filesystem:
|
||||||
fstype: xfs
|
fstype: xfs
|
||||||
dev: "{{ swift_raw_disks[item]['base_dir']|default('/dev') }}/{{ item }}"
|
dev: "{{ swift_raw_disks[item]['base_dir']|default('/dev') }}/{{ item }}"
|
||||||
opts: -f -i size=1024
|
opts: -f -i size=1024
|
||||||
|
|
Loading…
Reference in New Issue