Merge "ansible-lint: Fix fqcn[action/action-core]"

This commit is contained in:
Zuul
2025-12-03 08:01:23 +00:00
committed by Gerrit Code Review
31 changed files with 229 additions and 220 deletions

View File

@@ -3,9 +3,6 @@ use_default_rules: true
skip_list:
# Experimental
- experimental
# Use FQCN
- fqcn[action]
- fqcn[action-core]
# Galaxy changelog and runtime defined
- galaxy[no-changelog]
- galaxy[no-runtime]

View File

@@ -1,5 +1,5 @@
---
- include_tasks: remove-profile.yml
- ansible.builtin.include_tasks: remove-profile.yml
when:
- ansible_facts.distribution == "Ubuntu"
- apparmor_remove_libvirt_profile | bool

View File

@@ -1,18 +1,19 @@
---
- name: Get stat of libvirtd apparmor profile
stat:
ansible.builtin.stat:
path: /etc/apparmor.d/usr.sbin.libvirtd
register: apparmor_libvirtd_profile
- name: Get stat of libvirtd apparmor disable profile
stat:
ansible.builtin.stat:
path: /etc/apparmor.d/disable/usr.sbin.libvirtd
register: apparmor_libvirtd_disable_profile
- name: Remove apparmor profile for libvirt
shell: |
apparmor_parser -v -R /etc/apparmor.d/usr.sbin.libvirtd && \
ln -vsf /etc/apparmor.d/usr.sbin.libvirtd /etc/apparmor.d/disable
ansible.builtin.shell:
cmd: |
apparmor_parser -v -R /etc/apparmor.d/usr.sbin.libvirtd && \
ln -vsf /etc/apparmor.d/usr.sbin.libvirtd /etc/apparmor.d/disable
args:
executable: /bin/bash
become: true

View File

@@ -5,33 +5,33 @@
block:
- block:
- name: Ensure apt sources list directory exists
file:
ansible.builtin.file:
path: /etc/apt/sources.list.d
state: directory
recurse: true
- name: Ensure apt keyrings directory exists
file:
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
recurse: true
- name: Install ceph apt gpg key
get_url:
ansible.builtin.get_url:
url: "{{ ceph_apt_key_file }}"
dest: "/etc/apt/keyrings/ceph.gpg"
mode: "0644"
force: true
- name: Ensure old ceph repository absent
file:
ansible.builtin.file:
path: /etc/apt/sources.list.d/ceph.list
state: absent
# TODO(mmalchuk): replace with ansible.builtin.deb822_repository module
# when all stable releases moves to the ansible-core >= 2.15
- name: Enable ceph apt repository
copy:
ansible.builtin.copy:
dest: /etc/apt/sources.list.d/ceph.sources
content: |
# Ansible managed
@@ -44,7 +44,7 @@
mode: "0644"
- name: Install apt packages
apt:
ansible.builtin.apt:
name: "{{ ceph_apt_package }}"
state: present
update_cache: true
@@ -53,7 +53,7 @@
when: ansible_facts.os_family == 'RedHat'
block:
- name: Enable ceph yum repository
yum_repository:
ansible.builtin.yum_repository:
name: ceph
description: Ceph main Repository
baseurl: "{{ ceph_yum_baseurl }}"
@@ -61,19 +61,19 @@
gpgkey: "{{ ceph_yum_gpgkey }}"
- name: Enable epel yum repository
package:
ansible.builtin.package:
name: "{{ epel_yum_package }}"
state: present
- name: Install ceph rpm gpg key
rpm_key:
ansible.builtin.rpm_key:
state: present
key: "{{ ceph_yum_gpgkey }}"
when:
- ceph_yum_gpgcheck | bool
- name: Install RPM packages
package:
ansible.builtin.package:
name: "{{ ceph_yum_package }}"
state: present
enablerepo: epel

View File

@@ -5,7 +5,7 @@
- name: Set firewall default policy
# noqa ignore-errors
become: true
ufw:
community.general.ufw:
state: disabled
policy: allow
when: ansible_facts.os_family == 'Debian'
@@ -13,7 +13,8 @@
- name: Check if firewalld is installed
# noqa command-instead-of-module
command: rpm -q firewalld
ansible.builtin.command:
cmd: rpm -q firewalld
register: firewalld_check
changed_when: false
failed_when: firewalld_check.rc > 1
@@ -21,7 +22,7 @@
- name: Disable firewalld
become: true
service:
ansible.builtin.service:
name: "{{ item }}"
enabled: false
state: stopped
@@ -31,8 +32,8 @@
- ansible_facts.os_family == 'RedHat'
- firewalld_check.rc == 0
- import_role:
- ansible.builtin.import_role:
name: openstack.kolla.packages
- import_role:
- ansible.builtin.import_role:
name: openstack.kolla.{{ container_engine }}

View File

@@ -1,15 +1,15 @@
---
- import_role:
- ansible.builtin.import_role:
name: openstack.kolla.etc_hosts
- import_tasks: pre-install.yml
- ansible.builtin.import_tasks: pre-install.yml
- import_tasks: install.yml
- ansible.builtin.import_tasks: install.yml
- import_tasks: post-install.yml
- ansible.builtin.import_tasks: post-install.yml
- name: Configure ceph for zun
include_tasks: configure-ceph-for-zun.yml
ansible.builtin.include_tasks: configure-ceph-for-zun.yml
when:
- zun_configure_for_cinder_ceph | bool
- "'zun-compute' in group_names"

View File

@@ -1,49 +1,49 @@
---
- import_role:
- ansible.builtin.import_role:
name: openstack.kolla.kolla_user
when: create_kolla_user | bool
- import_role:
- ansible.builtin.import_role:
name: openstack.kolla.{{ container_engine }}_sdk
- name: Ensure node_config_directory directory exists
file:
become: true
ansible.builtin.file:
path: "{{ node_config_directory }}"
state: directory
owner: "{{ kolla_user if create_kolla_user | bool else omit }}"
group: "{{ kolla_group if create_kolla_user | bool else omit }}"
mode: "0755"
become: true
- import_role:
- ansible.builtin.import_role:
name: openstack.kolla.apparmor_libvirt
- name: Change state of selinux
selinux:
policy: targeted
state: "{{ selinux_state }}"
become: true
when:
- change_selinux | bool
- ansible_facts.os_family == "RedHat"
ansible.posix.selinux:
policy: targeted
state: "{{ selinux_state }}"
- name: Set https proxy for git
git_config:
when: git_https_proxy | length > 0
community.general.git_config:
name: https.proxy
scope: global
value: "{{ git_https_proxy }}"
when: git_https_proxy | length > 0
- name: Set http proxy for git
git_config:
when: git_http_proxy | length > 0
community.general.git_config:
name: http.proxy
scope: global
value: "{{ git_http_proxy }}"
when: git_http_proxy | length > 0
- name: Copying over kolla.target
become: true
template:
ansible.builtin.template:
src: kolla.target.j2
dest: /etc/systemd/system/kolla.target
mode: "0644"

View File

@@ -1,7 +1,7 @@
---
- name: Ensure unprivileged users can use ping
become: true
sysctl:
ansible.posix.sysctl:
name: net.ipv4.ping_group_range
value: '0 2147483647'
state: present

View File

@@ -1,21 +1,21 @@
---
- name: Reload docker service file
become: true
systemd:
ansible.builtin.systemd:
name: docker
daemon_reload: true
notify:
- Restart docker
- name: Restart docker
systemd:
become: true
ansible.builtin.systemd:
name: docker
state: "{{ 'reloaded' if docker_systemd_reload | bool else 'restarted' }}"
masked: false
become: true
- name: Restart containerd
service:
become: true
ansible.builtin.service:
name: containerd
state: restarted
become: true

View File

@@ -1,10 +1,10 @@
---
- name: Ensure docker config directory exists
file:
become: true
ansible.builtin.file:
path: /etc/docker
state: directory
mode: "0755"
become: true
- name: Write docker config
become: true
@@ -45,14 +45,15 @@
| combine(docker_config_ulimit_nofile if docker_ulimit_nofile | bool else {})
| combine(docker_config_debug if docker_debug | bool else {})
| combine(docker_custom_config) }}
copy:
ansible.builtin.copy:
content: "{{ docker_config | to_nice_json }}"
dest: /etc/docker/daemon.json
mode: "0644"
notify:
- Restart docker
- meta: flush_handlers
- name: Flush handlers
ansible.builtin.meta: flush_handlers
- name: Get Docker API version
become: true
@@ -74,91 +75,91 @@
}}
- name: Copying over containerd config
become: true
when:
- docker_storage_containerd | bool
- not containerd_configure_for_zun | bool
template:
ansible.builtin.template:
src: "containerd_config.toml.j2"
dest: "/etc/containerd/config.toml"
mode: "0660"
become: true
notify:
- Restart containerd
- name: Remove old docker options file
become: true
file:
path: /etc/systemd/system/docker.service.d/kolla.conf
state: absent
when:
- not docker_configure_for_zun | bool or 'zun-compute' not in group_names
- not docker_http_proxy
- not docker_https_proxy
- not docker_no_proxy
ansible.builtin.file:
path: /etc/systemd/system/docker.service.d/kolla.conf
state: absent
notify:
- Reload docker service file
- name: Ensure docker service directory exists
become: true
file:
when: >
(docker_configure_for_zun | bool and 'zun-compute' in group_names) or
docker_http_proxy | length > 0 or
docker_https_proxy | length > 0 or
docker_no_proxy | length > 0
ansible.builtin.file:
path: /etc/systemd/system/docker.service.d
state: directory
recurse: true
when: >
(docker_configure_for_zun | bool and 'zun-compute' in group_names) or
docker_http_proxy | length > 0 or
docker_https_proxy | length > 0 or
docker_no_proxy | length > 0
- name: Configure docker service
become: true
template:
src: docker_systemd_service.j2
dest: /etc/systemd/system/docker.service.d/kolla.conf
mode: "0644"
when: >
(docker_configure_for_zun | bool and 'zun-compute' in group_names) or
docker_http_proxy | length > 0 or
docker_https_proxy | length > 0 or
docker_no_proxy | length > 0
ansible.builtin.template:
src: docker_systemd_service.j2
dest: /etc/systemd/system/docker.service.d/kolla.conf
mode: "0644"
notify:
- Reload docker service file
- name: Ensure the path for CA file for private registry exists
file:
become: true
when: >
docker_registry is not none and
docker_registry_ca is not none and
not docker_registry_insecure | bool
ansible.builtin.file:
path: "/etc/docker/certs.d/{{ docker_registry }}"
owner: root
group: root
mode: "0700"
state: directory
- name: Ensure the CA file for private registry exists
become: true
when: >
docker_registry is not none and
docker_registry_ca is not none and
not docker_registry_insecure | bool
- name: Ensure the CA file for private registry exists
copy:
ansible.builtin.copy:
src: "{{ docker_registry_ca }}"
dest: "/etc/docker/certs.d/{{ docker_registry }}/ca.crt"
owner: root
group: root
mode: "0600"
become: true
when: >
docker_registry is not none and
docker_registry_ca is not none and
not docker_registry_insecure | bool
notify:
- Restart docker
- name: Flush handlers
meta: flush_handlers
ansible.builtin.meta: flush_handlers
- name: Start and enable docker
systemd:
become: true
ansible.builtin.systemd:
name: docker
state: started
enabled: true
masked: false
become: true

View File

@@ -1,45 +1,45 @@
---
- name: Ensuring CNI config directory exist
file:
become: true
ansible.builtin.file:
path: "{{ cni_config_dir }}"
state: "directory"
mode: "0770"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
become: true
- name: Copying CNI config file
template:
become: true
ansible.builtin.template:
src: "10-zun-cni.conf.j2"
dest: "{{ cni_config_dir }}/10-zun-cni.conf"
mode: "0660"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
become: true
notify:
- Restart containerd
- name: Ensuring CNI bin directory exist
file:
become: true
ansible.builtin.file:
path: "{{ cni_bin_dir }}"
state: "directory"
mode: "0770"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
become: true
- name: Copy zun-cni script
template:
become: true
ansible.builtin.template:
src: "zun-cni.j2"
dest: "{{ cni_bin_dir }}/zun-cni"
mode: "0775"
become: true
- name: Copying over containerd config
template:
become: true
ansible.builtin.template:
src: "containerd_config.toml.j2"
dest: "/etc/containerd/config.toml"
mode: "0660"
become: true
notify:
- Restart containerd

View File

@@ -1,13 +1,14 @@
---
- include_tasks: "repo-{{ ansible_facts.os_family }}.yml"
- ansible.builtin.include_tasks: "repo-{{ ansible_facts.os_family }}.yml"
when: enable_docker_repo | bool
# Upgrading docker engine may cause containers to stop. Take a snapshot of the
# running containers prior to a potential upgrade of Docker.
- name: Check which containers are running
command: docker ps -f 'status=running' -q
become: true
ansible.builtin.command:
cmd: docker ps -f 'status=running' -q
# If Docker is not installed this command may exit non-zero.
failed_when: false
changed_when: false
@@ -18,12 +19,12 @@
- when: ansible_facts.os_family == 'Debian'
block:
- name: Check if docker systemd unit exists
stat:
ansible.builtin.stat:
path: /etc/systemd/system/docker.service
register: docker_unit_file
- name: Mask the docker systemd unit on Debian/Ubuntu
file:
ansible.builtin.file:
src: /dev/null
dest: /etc/systemd/system/docker.service
owner: root
@@ -33,18 +34,19 @@
when: not docker_unit_file.stat.exists
- name: Install packages
package:
become: true
ansible.builtin.package:
name: "{{ docker_packages | select | list }}"
cache_valid_time: "{{ apt_cache_valid_time if ansible_facts.os_family == 'Debian' else omit }}"
update_cache: true
state: present
become: true
register: docker_install_result
# If any packages were updated, and any containers were running, wait for the
# daemon to come up and start all previously running containers.
- when:
- become: true
when:
- docker_install_result is changed
- running_containers.rc == 0
- running_containers.stdout != ''
@@ -52,16 +54,15 @@
# At some point (at least on CentOS 7) Docker CE stopped starting
# automatically after an upgrade from legacy docker . Start it manually.
- name: Start docker
systemd:
ansible.builtin.systemd:
name: docker
state: started
enabled: true
masked: false
become: true
- name: Wait for Docker to start
command: docker info
become: true
ansible.builtin.command:
cmd: docker info
changed_when: false
register: result
until: result is success
@@ -70,13 +71,13 @@
- name: Ensure containers are running after Docker upgrade
# noqa no-changed-when
command: "docker start {{ running_containers.stdout }}"
become: true
ansible.builtin.command:
cmd: "docker start {{ running_containers.stdout }}"
changed_when: true
- import_tasks: config.yml
- ansible.builtin.import_tasks: config.yml
- include_tasks: configure-containerd-for-zun.yml
- ansible.builtin.include_tasks: configure-containerd-for-zun.yml
when:
- containerd_configure_for_zun|bool
- "'zun-cni-daemon' in group_names"

View File

@@ -1,2 +1,2 @@
---
- include_tasks: "{{ package_action }}.yml"
- ansible.builtin.include_tasks: "{{ package_action }}.yml"

View File

@@ -1,58 +1,58 @@
---
- name: Install CA certificates and gnupg packages
apt:
become: true
ansible.builtin.apt:
name:
- ca-certificates
- gnupg
cache_valid_time: "{{ apt_cache_valid_time }}"
update_cache: true
state: present
become: true
- name: Ensure apt sources list directory exists
file:
become: true
ansible.builtin.file:
path: /etc/apt/sources.list.d
state: directory
recurse: true
become: true
- name: Ensure apt keyrings directory exists
file:
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
recurse: true
become: true
- name: Install docker apt gpg key
get_url:
become: true
ansible.builtin.get_url:
url: "{{ docker_apt_url }}/{{ docker_apt_key_file }}"
dest: "/etc/apt/keyrings/docker.asc"
mode: "0644"
force: true
become: true
environment: "{{ docker_apt_key_env }}"
- name: Install docker apt pin
copy:
become: true
ansible.builtin.copy:
dest: "/etc/apt/preferences.d/docker"
content: |
Package: {{ docker_apt_package }}
Pin: version {{ docker_apt_package_pin }}
Pin-Priority: 1000
mode: "0644"
become: true
when: docker_apt_package_pin | length > 0
- name: Ensure old docker repository absent
file:
become: true
ansible.builtin.file:
path: /etc/apt/sources.list.d/docker.list
state: absent
become: true
# TODO(mmalchuk): replace with ansible.builtin.deb822_repository module
# when all stable releases moves to the ansible-core >= 2.15
- name: Enable docker apt repository
copy:
become: true
ansible.builtin.copy:
dest: /etc/apt/sources.list.d/docker.sources
content: |
# Ansible managed
@@ -63,9 +63,8 @@
Components: stable
Signed-by: /etc/apt/keyrings/docker.asc
mode: "0644"
become: true
- name: Update the apt cache
apt:
update_cache: true
become: true
ansible.builtin.apt:
update_cache: true

View File

@@ -1,13 +1,14 @@
---
- name: Ensure yum repos directory exists
file:
become: true
ansible.builtin.file:
path: /etc/yum.repos.d/
state: directory
recurse: true
become: true
- name: Enable docker yum repository
yum_repository:
become: true
ansible.builtin.yum_repository:
name: docker
description: Docker main Repository
baseurl: "{{ docker_yum_baseurl }}"
@@ -17,11 +18,10 @@
# modular package in CentOS 8 see:
# https://bugzilla.redhat.com/show_bug.cgi?id=1734081
module_hotfixes: true
become: true
- name: Install docker rpm gpg key
rpm_key:
become: true
ansible.builtin.rpm_key:
state: present
key: "{{ docker_yum_gpgkey }}"
become: true
when: docker_yum_gpgcheck | bool

View File

@@ -1,27 +1,29 @@
---
- name: Check for leftover containers
command: docker ps -q
become: true
ansible.builtin.command:
cmd: docker ps -q
changed_when: false
failed_when: false
register: containers
- name: Check for leftover volumes
command: docker volume ls -q
become: true
ansible.builtin.command:
cmd: docker volume ls -q
changed_when: false
failed_when: false
register: volumes
- name: Fail if there are any containers
assert:
ansible.builtin.assert:
that: (containers.stdout_lines | length) == 0
fail_msg: |-
There are still some containers left over!
Remove them before uninstalling container engine!
- name: Fail if there are any volumes
assert:
ansible.builtin.assert:
that: (volumes.stdout_lines | length) == 0
fail_msg: |-
There are still some volumes left over!
@@ -29,13 +31,13 @@
- name: Stop docker service
become: true
systemd:
ansible.builtin.systemd:
name: docker
state: stopped
enabled: false
- name: Uninstall docker packages
package:
ansible.builtin.package:
name: "{{ docker_packages | select | list }}"
autoremove: true
state: absent
@@ -43,13 +45,13 @@
- name: Remove docker group
become: true
group:
ansible.builtin.group:
name: docker
state: absent
- name: Cleanup CNI config directory
become: true
file:
ansible.builtin.file:
path: "{{ cni_config_dir }}"
state: absent
@@ -58,11 +60,11 @@
# qemu-kvm processes running that prevent the removal
- name: Cleanup docker files
become: true
file:
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop: "{{ docker_paths }}"
rescue:
- name: Unable to remove all files
debug:
ansible.builtin.debug:
var: ansible_failed_result

View File

@@ -1,47 +1,44 @@
---
- name: Configure osbpo apt repository
become: true
when:
- ansible_facts.distribution == 'Debian'
- docker_sdk_python_externally_managed | default(false)
- virtualenv is none
block:
- name: Ensure apt sources list directory exists
file:
ansible.builtin.file:
path: /etc/apt/sources.list.d
state: directory
recurse: true
become: true
- name: Ensure apt keyrings directory exists
file:
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
recurse: true
become: true
- name: Install osbpo apt gpg key
template:
ansible.builtin.template:
src: osbpo_pubkey.gpg.j2
dest: /etc/apt/keyrings/osbpo.asc
mode: "0644"
become: true
- name: Enable osbpo apt repository
apt_repository:
ansible.builtin.apt_repository:
repo: "{{ docker_sdk_osbpo_apt_repo }}"
filename: osbpo
become: true
- name: Install packages
package:
become: true
ansible.builtin.package:
name: "{{ docker_sdk_packages | select | list }}"
cache_valid_time: "{{ apt_cache_valid_time if ansible_facts.os_family == 'Debian' else omit }}"
update_cache: "{{ True if ansible_facts.os_family == 'Debian' else omit }}"
state: present
become: true
- name: Check if virtualenv is a directory
stat:
ansible.builtin.stat:
path: "{{ virtualenv }}"
register: virtualenv_stat
when: virtualenv is not none and virtualenv | length > 0
@@ -49,9 +46,10 @@
# NOTE(kevko) Packaging needs to be installed before ansible's pip module is used
# This is in case user created venv manually
- name: Check if packaging is already installed
command: "{{ virtualenv }}/bin/pip show packaging"
become: true
become_user: "{{ docker_sdk_virtualenv_owner }}"
ansible.builtin.command:
cmd: "{{ virtualenv }}/bin/pip show packaging"
register: packaging_installed
failed_when: false
changed_when: false
@@ -63,9 +61,10 @@
- virtualenv_stat.stat.isdir
- name: Install packaging into virtualenv
command: "{{ virtualenv }}/bin/python -m pip install packaging"
become: true
become_user: "{{ docker_sdk_virtualenv_owner }}"
ansible.builtin.command:
cmd: "{{ virtualenv }}/bin/python -m pip install packaging"
check_mode: false
changed_when: packaging_installed.rc != 0
when:
@@ -76,7 +75,9 @@
- packaging_installed.rc != 0
- name: Install latest pip and packaging in the virtualenv
pip:
become: true
become_user: "{{ docker_sdk_virtualenv_owner }}"
ansible.builtin.pip:
# NOTE(hrw) pip 19.3 is first version complaining about being run with Python 2
name:
- "pip>19.3"
@@ -84,18 +85,16 @@
virtualenv: "{{ virtualenv }}"
virtualenv_site_packages: "{{ virtualenv_site_packages }}"
virtualenv_command: "{{ ansible_facts.python.executable }} -m venv"
become: true
become_user: "{{ docker_sdk_virtualenv_owner }}"
when: virtualenv is not none
- name: Install docker SDK for python using pip
pip:
become: true
become_user: "{{ virtualenv is none | ternary(omit, docker_sdk_virtualenv_owner) }}"
ansible.builtin.pip:
name: "{{ docker_sdk_core_pip_packages + docker_sdk_additional_pip_packages }}"
executable: "{{ (virtualenv is none) | ternary(ansible_facts.python.executable | regex_replace('python(\\d+(\\.\\d+)?)$', 'pip\\1'), omit) }}"
extra_args: "{% if docker_sdk_upper_constraints_file %}-c {{ docker_sdk_upper_constraints_file }}{% endif %}"
virtualenv: "{{ virtualenv is none | ternary(omit, virtualenv) }}"
virtualenv_site_packages: "{{ virtualenv is none | ternary(omit, virtualenv_site_packages) }}"
virtualenv_command: "{{ virtualenv is none | ternary(omit, ansible_facts.python.executable ~ ' -m venv') }}"
become: true
become_user: "{{ virtualenv is none | ternary(omit, docker_sdk_virtualenv_owner) }}"
when: not (docker_sdk_python_externally_managed | default(false) and virtualenv is none)

View File

@@ -14,8 +14,8 @@
register: python_externally_managed
- name: Set docker_sdk_python_externally_managed fact
set_fact:
ansible.builtin.set_fact:
docker_sdk_python_externally_managed: true
when: python_externally_managed.stat.exists
- include_tasks: "{{ package_action }}.yml"
- ansible.builtin.include_tasks: "{{ package_action }}.yml"

View File

@@ -1,6 +1,6 @@
---
- name: Uninstall docker SDK for python using pip
pip:
ansible.builtin.pip:
name: "{{ docker_sdk_core_pip_packages }}"
executable: "{{ (virtualenv is none) | ternary(ansible_facts.python.executable | regex_replace('python(\\d+(\\.\\d+)?)$', 'pip\\1'), omit) }}"
virtualenv: "{{ virtualenv is none | ternary(omit, virtualenv) }}"

View File

@@ -1,13 +1,13 @@
---
- name: Ensure localhost in /etc/hosts
lineinfile:
become: true
ansible.builtin.lineinfile:
dest: /etc/hosts
regexp: "^127.0.0.1.*"
line: "127.0.0.1 localhost"
create: true
mode: "0644"
state: present
become: true
# NOTE(mgoddard): Ubuntu may include a line in /etc/hosts that makes the local
# hostname and fqdn point to 127.0.1.1. This can break
@@ -16,14 +16,15 @@
# see https://bugs.launchpad.net/kolla-ansible/+bug/1837699
# and https://bugs.launchpad.net/kolla-ansible/+bug/1862739
- name: Ensure hostname does not point to 127.0.1.1 in /etc/hosts
lineinfile:
become: true
ansible.builtin.lineinfile:
dest: /etc/hosts
regexp: "^127.0.1.1\\b.*\\s{{ ansible_facts.hostname }}\\b"
state: absent
become: true
- name: Generate /etc/hosts for all of the nodes
blockinfile:
become: true
ansible.builtin.blockinfile:
dest: /etc/hosts
marker: "# {mark} ANSIBLE GENERATED HOSTS"
block: |
@@ -43,7 +44,6 @@
{{ 'api' | kolla_address(host, override_var="etc_hosts_api_address") }} {{ hostnames | unique | join(' ') }}
{% endif %}
{% endfor %}
become: true
when:
# Skip hosts in the bifrost group that do not have a valid api_interface.
- inventory_hostname not in groups['bifrost'] or
@@ -57,12 +57,12 @@
become: true
block:
- name: Check whether /etc/cloud/cloud.cfg exists
stat:
ansible.builtin.stat:
path: /etc/cloud/cloud.cfg
register: cloud_init
- name: Disable cloud-init manage_etc_hosts
copy:
ansible.builtin.copy:
content: "manage_etc_hosts: false"
dest: /etc/cloud/cloud.cfg.d/99-kolla.cfg
mode: "0660"

View File

@@ -1,4 +1,4 @@
---
- name: Include etc-hosts.yml
include_tasks: etc-hosts.yml
ansible.builtin.include_tasks: etc-hosts.yml
when: customize_etc_hosts | bool

View File

@@ -1,16 +1,17 @@
---
- name: Ensure groups are present
group:
become: true
ansible.builtin.group:
name: "{{ item }}"
state: present
become: true
loop:
- docker
- sudo
- "{{ kolla_group }}"
- name: Create kolla user
user:
become: true
ansible.builtin.user:
name: "{{ kolla_user }}"
state: present
group: "{{ kolla_group }}"
@@ -18,21 +19,20 @@
- docker
- sudo
append: true
become: true
- name: Add public key to kolla user authorized keys
authorized_key:
become: true
ansible.posix.authorized_key:
user: "{{ kolla_user }}"
key: "{{ kolla_ssh_key.public_key }}"
become: true
- name: Grant kolla user passwordless sudo
lineinfile:
become: true
ansible.builtin.lineinfile:
dest: /etc/sudoers.d/kolla-ansible-users
state: present
create: true
mode: '0640'
regexp: '^{{ kolla_user }}'
line: '{{ kolla_user }} ALL=(ALL) NOPASSWD: ALL'
become: true
when: create_kolla_user_sudoers | bool

View File

@@ -1,22 +1,22 @@
---
- name: Install packages
become: true
vars:
pkg_installs: >-
{{ ubuntu_pkg_install if ansible_facts.os_family == 'Debian'
else redhat_pkg_install }}
package:
ansible.builtin.package:
name: "{{ pkg_installs | select | list }}"
cache_valid_time: "{{ apt_cache_valid_time if ansible_facts.os_family == 'Debian' else omit }}"
update_cache: "{{ True if ansible_facts.os_family == 'Debian' else omit }}"
state: present
become: true
- name: Remove packages
become: true
vars:
pkg_removals: >-
{{ ubuntu_pkg_removals if ansible_facts.os_family == 'Debian'
else redhat_pkg_removals }}
package:
ansible.builtin.package:
name: "{{ pkg_removals | select | list }}"
state: absent
become: true

View File

@@ -1,5 +1,6 @@
---
- name: Restart systemd-tmpfiles
become: true
command: systemd-tmpfiles --create
ansible.builtin.command:
cmd: systemd-tmpfiles --create
changed_when: true

View File

@@ -1,6 +1,6 @@
---
- name: Ensure podman config directory exists
file:
ansible.builtin.file:
path: /etc/containers/{{ item }}
state: directory
mode: "0755"
@@ -17,7 +17,7 @@
[[registry]]
location = "{{ podman_registry }}"
insecure = {{ podman_registry_insecure | bool | lower }}
copy:
ansible.builtin.copy:
content: "{{ registry }}"
dest: /etc/containers/registries.conf.d/registries.conf
mode: "0644"
@@ -30,7 +30,7 @@
[[registry.mirror]]
prefix = docker.io
location = "{{ podman_registry_mirror }}"
copy:
ansible.builtin.copy:
content: "{{ registry_mirror }}"
dest: /etc/containers/registries.conf.d/registry-mirror.conf
mode: "0644"
@@ -46,14 +46,14 @@
{% if podman_runtime_directory is not none %}
runroot = {{ podman_runtime_directory }}
{% endif %}
copy:
ansible.builtin.copy:
content: "{{ config }}"
dest: /etc/containers/storage.conf.d/storage.conf
mode: "0644"
when: podman_storage_driver is not none or podman_runtime_directory is not none
- name: Ensure the path for CA file for podman registry exists
file:
ansible.builtin.file:
path: "/etc/containers/certs.d/{{ podman_registry }}"
owner: root
group: root
@@ -66,14 +66,14 @@
- not podman_registry_insecure | bool
- name: Write kolla.conf to containers.conf.d
template:
ansible.builtin.template:
src: "kolla.conf.j2"
dest: "/etc/containers/containers.conf.d/kolla.conf"
mode: "0664"
become: true
- name: Ensure the CA file for private registry exists
copy:
ansible.builtin.copy:
src: "{{ private_registry_ca }}"
dest: "/etc/containers/certs.d/{{ private_registry }}/ca.crt"
owner: root
@@ -87,7 +87,7 @@
- name: Copying over /run subdirectories conf
become: true
template:
ansible.builtin.template:
src: kolla-directories.conf.j2
dest: /etc/tmpfiles.d/kolla.conf
mode: "0644"

View File

@@ -2,20 +2,21 @@
# Upgrading podman engine may cause containers to stop. Take a snapshot of the
# running containers prior to a potential upgrade of Podman.
- name: Check which containers are running
command: podman ps -f 'status=running' -q
become: true
ansible.builtin.command:
cmd: podman ps -f 'status=running' -q
# If Podman is not installed this command may exit non-zero.
failed_when: false
changed_when: false
register: running_containers
- name: Install packages
package:
become: true
ansible.builtin.package:
name: "{{ podman_packages | select | list }}"
cache_valid_time: "{{ apt_cache_valid_time if ansible_facts.os_family == 'Debian' else omit }}"
update_cache: true
state: present
become: true
register: podman_install_result
# If any packages were updated, and any containers were running, wait for the
@@ -23,7 +24,7 @@
- name: Start podman.socket
become: true
service:
ansible.builtin.service:
name: podman.socket
state: started
enabled: true
@@ -34,8 +35,9 @@
- running_containers.stdout != ''
block:
- name: Wait for Podman to start
command: podman info
become: true
ansible.builtin.command:
cmd: podman info
changed_when: false
register: result
until: result is success
@@ -43,8 +45,9 @@
delay: 10
- name: Ensure containers are running after Podman upgrade
command: "podman start {{ running_containers.stdout }}"
ansible.builtin.command:
cmd: "podman start {{ running_containers.stdout }}"
become: true
changed_when: true
- import_tasks: config.yml
- ansible.builtin.import_tasks: config.yml

View File

@@ -1,5 +1,5 @@
---
- include_tasks: "{{ package_action }}.yml"
- ansible.builtin.include_tasks: "{{ package_action }}.yml"
- name: Flush handlers
meta: flush_handlers
ansible.builtin.meta: flush_handlers

View File

@@ -1,27 +1,29 @@
---
- name: Check for leftover containers
command: podman ps -q
ansible.builtin.command:
cmd: podman ps -q
become: true
changed_when: false
failed_when: false
register: containers
- name: Check for leftover volumes
command: podman volume ls -q
ansible.builtin.command:
cmd: podman volume ls -q
become: true
changed_when: false
failed_when: false
register: volumes
- name: Fail if there are any containers
assert:
ansible.builtin.assert:
that: (containers.stdout_lines | length) == 0
fail_msg: |-
There are still some containers left over!
Remove them before uninstalling container engine!
- name: Fail if there are any volumes
assert:
ansible.builtin.assert:
that: (volumes.stdout_lines | length) == 0
fail_msg: |-
There are still some volumes left over!
@@ -29,7 +31,7 @@
- name: Uninstall podman packages
become: true
package:
ansible.builtin.package:
name: "{{ podman_packages | select | list }}"
autoclean: true
state: absent
@@ -39,11 +41,11 @@
# qemu-kvm processes running that prevent the removal
- name: Cleanup docker files
become: true
file:
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop: "{{ podman_paths }}"
rescue:
- name: Unable to remove all files
debug:
ansible.builtin.debug:
var: ansible_failed_result

View File

@@ -7,32 +7,32 @@
- virtualenv is none
block:
- name: Ensure apt sources list directory exists
file:
ansible.builtin.file:
path: /etc/apt/sources.list.d
state: directory
recurse: true
- name: Ensure apt keyrings directory exists
file:
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
recurse: true
- name: Install osbpo apt gpg key
template:
ansible.builtin.template:
src: osbpo_pubkey.gpg.j2
dest: /etc/apt/keyrings/osbpo.asc
mode: "0644"
- name: Ensure old osbpo apt repository absent
file:
ansible.builtin.file:
path: /etc/apt/sources.list.d/osbpo.list
state: absent
# TODO(mmalchuk): replace with ansible.builtin.deb822_repository module
# when all stable releases moves to the ansible-core >= 2.15
- name: Enable osbpo apt repository
copy:
ansible.builtin.copy:
dest: /etc/apt/sources.list.d/docker.sources
content: |
# Ansible managed
@@ -45,11 +45,11 @@
mode: "0644"
- name: Update the apt cache
apt:
ansible.builtin.apt:
update_cache: true
- name: Install packages
package:
ansible.builtin.package:
name: "{{ podman_sdk_packages | select | list }}"
cache_valid_time: "{{ apt_cache_valid_time if ansible_facts.os_family == 'Debian' else omit }}"
update_cache: "{{ true if ansible_facts.os_family == 'Debian' else omit }}"
@@ -57,7 +57,7 @@
become: true
- name: Check if virtualenv is a directory
stat:
ansible.builtin.stat:
path: "{{ virtualenv }}"
register: virtualenv_stat
when: virtualenv is not none and virtualenv | length > 0
@@ -65,7 +65,8 @@
# NOTE(kevko) Packaging needs to be installed before ansible's pip module is used
# This is in case user created venv manually
- name: Check if packaging is already installed
command: "{{ virtualenv }}/bin/pip show packaging"
ansible.builtin.command:
cmd: "{{ virtualenv }}/bin/pip show packaging"
become: true
become_user: "{{ podman_sdk_virtualenv_owner }}"
register: packaging_installed
@@ -79,7 +80,8 @@
- virtualenv_stat.stat.isdir
- name: Install packaging into virtualenv
command: "{{ virtualenv }}/bin/python -m pip install packaging"
ansible.builtin.command:
cmd: "{{ virtualenv }}/bin/python -m pip install packaging"
become: true
become_user: "{{ podman_sdk_virtualenv_owner }}"
check_mode: false
@@ -92,7 +94,7 @@
- packaging_installed.rc != 0
- name: Install latest pip and packaging in the virtualenv
pip:
ansible.builtin.pip:
# NOTE(hrw) pip 19.3 is first version complaining about being run with Python 2
name:
- "pip>19.3"
@@ -105,7 +107,7 @@
when: virtualenv is not none
- name: Install podman SDK for python
pip:
ansible.builtin.pip:
name: "{{ podman_sdk_core_pip_packages + podman_sdk_additional_pip_packages }}"
executable: "{{ (virtualenv is none) | ternary(ansible_facts.python.executable | regex_replace('python(\\d+(\\.\\d+)?)$', 'pip\\1'), omit) }}"
extra_args: "{% if podman_sdk_upper_constraints_file %}-c {{ podman_sdk_upper_constraints_file }}{% endif %}"

View File

@@ -14,8 +14,8 @@
register: python_externally_managed
- name: Set podman_sdk_python_externally_managed fact
set_fact:
ansible.builtin.set_fact:
podman_sdk_python_externally_managed: true
when: python_externally_managed.stat.exists
- include_tasks: "{{ package_action }}.yml"
- ansible.builtin.include_tasks: "{{ package_action }}.yml"

View File

@@ -1,6 +1,6 @@
---
- name: Uninstall podman SDK for python
pip:
ansible.builtin.pip:
name: "{{ podman_sdk_core_pip_packages }}"
executable: "{{ (virtualenv is none) | ternary(ansible_facts.python.executable | regex_replace('python(\\d+(\\.\\d+)?)$', 'pip\\1'), omit) }}"
virtualenv: "{{ virtualenv is none | ternary(omit, virtualenv) }}"