Remove support for CentOS 7 and Python 2

* Always use Python 3
* Drop code paths for CentOS 7
* Drop support for Yum
* Remove support for host NTP daemon, always use chrony
* Switch references from 'yum_install_epel' to 'dnf_install_epel'
* Remove overcloud host image workaround for tagged VLAN admin network
* Remove the kayobe.utils.yum_install function, which is unused

Change-Id: I368f6edafed9779658798fc342116b4c1b3ffd48
Story: 2006574
Task: 39481
This commit is contained in:
Mark Goddard 2020-04-17 20:02:10 +01:00
parent 8fb3020827
commit b9d76f6ef5
86 changed files with 137 additions and 978 deletions

View File

@ -14,7 +14,7 @@
roles:
- role: stackhpc.os_openstacksdk
os_openstacksdk_venv: "{{ venv }}"
os_openstacksdk_install_epel: "{{ yum_install_epel }}"
os_openstacksdk_install_epel: "{{ dnf_install_epel }}"
os_openstacksdk_state: latest
os_openstacksdk_upper_constraints_file: "{{ pip_upper_constraints_file }}"

View File

@ -14,7 +14,7 @@
roles:
- role: stackhpc.os_openstacksdk
os_openstacksdk_venv: "{{ venv }}"
os_openstacksdk_install_epel: "{{ yum_install_epel }}"
os_openstacksdk_install_epel: "{{ dnf_install_epel }}"
os_openstacksdk_state: latest
os_openstacksdk_upper_constraints_file: "{{ pip_upper_constraints_file }}"

View File

@ -14,7 +14,7 @@
roles:
- role: stackhpc.os_openstacksdk
os_openstacksdk_venv: "{{ venv }}"
os_openstacksdk_install_epel: "{{ yum_install_epel }}"
os_openstacksdk_install_epel: "{{ dnf_install_epel }}"
os_openstacksdk_state: latest
os_openstacksdk_upper_constraints_file: "{{ pip_upper_constraints_file }}"

View File

@ -40,15 +40,6 @@
{{ query('inventory_hostnames', console_compute_node_limit |
default('baremetal-compute') ) | unique }}
# NOTE(mgoddard): This task may be removed when CentOS 7 is no longer
# supported.
- name: Gather facts for localhost
setup:
gather_subset: min
delegate_to: localhost
delegate_facts: true
when: not hostvars.localhost.module_setup | default(false)
- name: Reserve TCP ports for ironic serial consoles
include_role:
name: console-allocation

View File

@ -13,4 +13,3 @@
- dnf-automatic
when:
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version | int >= 8

View File

@ -4,7 +4,7 @@
hosts: controllers[0]
roles:
- role: stackhpc.os-networks
os_openstacksdk_install_epel: "{{ yum_install_epel }}"
os_openstacksdk_install_epel: "{{ dnf_install_epel }}"
os_openstacksdk_state: latest
os_openstacksdk_upper_constraints_file: "{{ pip_upper_constraints_file }}"
os_networks_venv: "{{ virtualenv_path }}/openstacksdk"

View File

@ -13,6 +13,7 @@ ipa_build_source_url: "https://opendev.org/openstack/ironic-python-agent"
# Version of IPA source repository. Default is {{ openstack_branch }}.
ipa_build_source_version: "{{ openstack_branch }}"
# TODO(mgoddard): Use CentOS 8.
# List of default Diskimage Builder (DIB) elements to use when building IPA
# images.
ipa_build_dib_elements_default:

View File

@ -349,11 +349,7 @@ kolla_enable_barbican: "no"
kolla_enable_blazar: "no"
kolla_enable_central_logging: "no"
kolla_enable_ceilometer: "no"
# The chrony container is disabled by default on CentOS 7 because we enable an
# NTP daemon on the host. On CentOS 8 the chrony container is enabled by
# default because the NTP daemon is not supported. Setting this to true will
# disable NTP on the host.
kolla_enable_chrony: "{{ ansible_os_family != 'RedHat' or ansible_distribution_major_version | int >= 8 }}"
kolla_enable_chrony: "yes"
kolla_enable_cinder: "no"
kolla_enable_cloudkitty: "no"
kolla_enable_collectd: "no"

View File

@ -6,41 +6,3 @@
# Name of the local timezone.
timezone: "{{ ansible_date_time.tz }}"
###############################################################################
# Network Time Protocol (NTP).
# Whether to enable the NTP daemon on the host. On CentOS 7 the default is true
# unless 'kolla_enable_chrony' has been set to true on overcloud hosts. On
# CentOS 8 the host NTP daemon is not supported, and kolla_enable_chrony is set
# to true by default.
ntp_service_enabled: >-
{{ ansible_os_family == 'RedHat' and
ansible_distribution_major_version | int == 7 and
('overcloud' not in group_names or not kolla_enable_chrony | bool) }}
ntp_package_state: "{{ 'present' if ntp_service_enabled | bool else 'absent' }}"
ntp_service_state: "{{ 'started' if ntp_service_enabled | bool else 'stopped' }}"
# List of names of NTP servers.
#ntp_config_server:
# List of NTP restrictions to add to ntp.conf.
#ntp_config_restrict:
# List of addresses for NTP daemon to listen on.
#ntp_config_listen:
# Other NTP configuration options.
#ntp_config_filegen:
#ntp_config_statistics:
#ntp_config_crypto:
#ntp_config_includefile:
#ntp_config_keys:
#ntp_config_trustedkey:
#ntp_config_requestkey:
#ntp_config_controlkey:
#ntp_config_broadcast:
#ntp_config_broadcastclient:
#ntp_config_multicastclient:
#ntp_config_tinker_panic_enabled:

View File

@ -29,11 +29,3 @@ disable_cloud_init: False
# employed here is to remove this bogus entry from the image using
# virt-customize, if it exists. See https://bugs.centos.org/view.php?id=14369.
overcloud_host_image_workaround_resolv_enabled: True
# Workaround a CentOS 7.5 bug: cloud-init 0.7.9-24 does not correctly set
# an IP address for VLAN subinterfaces configured with the Openstack metadata
# format/Config drive. # See, https://bugs.centos.org/view.php?id=14964.
overcloud_host_image_workaround_cloud_init_enabled: False
# cloud-init repository for overcloud_host_image_workaround_cloud_init_enabled
overcloud_host_image_workaround_cloud_init_repo: https://stackhpc.github.io/cloud-init-repo/

View File

@ -8,7 +8,7 @@
host_package_update_security: false
tasks:
- name: Update host packages
yum:
dnf:
name: "{{ host_package_update_packages }}"
security: "{{ host_package_update_security | bool }}"
state: latest

View File

@ -1,22 +1,4 @@
---
# NOTE(mgoddard): We use delegate_to rather than specify localhost in the
# hosts list since this playbook is typically called with a limit that does
# not include localhost. This play may be removed when CentOS 7 is no longer
# supported.
- name: Gather facts for localhost
hosts: seed-hypervisor:seed:overcloud
tags:
- ip-allocation
gather_facts: no
tasks:
- name: Gather facts for localhost
setup:
gather_subset: min
delegate_to: localhost
delegate_facts: true
run_once: true
when: not hostvars.localhost.module_setup | default(false)
- name: Ensure IP addresses are allocated
hosts: seed-hypervisor:seed:overcloud
tags:

View File

@ -24,7 +24,7 @@
- name: Ensure the Python virtualenv package is installed
package:
name: python{{ ansible_python.version.major }}-virtualenv
name: python3-virtualenv
state: present
become: True
@ -49,27 +49,16 @@
mode: 0700
become: True
- name: Ensure pip is installed
easy_install:
name: pip
virtualenv: "{{ virtualenv }}"
virtualenv_site_packages: True
when:
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version | int == 7
- name: Ensure kayobe virtualenv has the latest version of pip installed
pip:
name: pip
state: latest
virtualenv: "{{ virtualenv }}"
# Site packages are required for using the yum and selinux python
# modules, which are not available via PyPI.
# Site packages are required for using the dnf module, which is not
# available via PyPI.
virtualenv_site_packages: True
virtualenv_python: "python{{ ansible_python.version.major }}.{{ ansible_python.version.minor }}"
virtualenv_python: "python3.{{ ansible_python.version.minor }}"
# NOTE(mgoddard): SELinux python bindings available on PyPI only work
# with Python 3 on CentOS 8.
- name: Ensure kayobe virtualenv has SELinux bindings installed
pip:
name: selinux
@ -77,7 +66,6 @@
virtualenv: "{{ virtualenv }}"
when:
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version | int >= 8
vars:
# Use the system python interpreter since the virtualenv might not
# exist.
@ -98,18 +86,10 @@
- name: Ensure Python setuptools and pip packages are installed
vars:
packages:
- python{{ ansible_python.version.major }}-setuptools
- "{% if ansible_distribution_major_version | int >= 8 %}python3-pip{% endif %}"
- python3-setuptools
- python3-pip
package:
name: "{{ packages | select }}"
state: present
become: True
- name: Ensure pip is installed
easy_install:
name: pip
become: True
when:
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version | int == 7
when: virtualenv is not defined

View File

@ -286,7 +286,7 @@
- import_role:
name: kolla-ansible
vars:
kolla_ansible_install_epel: "{{ yum_install_epel }}"
kolla_ansible_install_epel: "{{ dnf_install_epel }}"
kolla_external_fqdn_cert: "{{ kolla_config_path }}/certificates/haproxy.pem"
kolla_internal_fqdn_cert: "{{ kolla_config_path }}/certificates/haproxy-internal.pem"
kolla_ansible_passwords_path: "{{ kayobe_config_path }}/kolla/passwords.yml"
@ -302,7 +302,5 @@
kolla_inspector_dhcp_pool_end: "{{ inspection_net_name | net_inspection_allocation_pool_end }}"
kolla_inspector_default_gateway: "{{ inspection_net_name | net_inspection_gateway or inspection_net_name | net_gateway }}"
kolla_inspector_extra_kernel_options: "{{ inspector_extra_kernel_options }}"
# While kayobe has its own support for installing an NTP daemon, the
# kolla-ansible baremetal role does a one-time sync which is useful.
kolla_enable_host_ntp: "{{ ntp_service_enabled }}"
kolla_enable_host_ntp: false
docker_daemon_mtu: "{{ public_net_name | net_mtu | default }}"

View File

@ -5,6 +5,6 @@
- kolla-build
roles:
- role: kolla
kolla_install_epel: "{{ yum_install_epel }}"
kolla_install_epel: "{{ dnf_install_epel }}"
- role: kolla-build
kolla_build_extra_config_path: "{{ kayobe_config_path }}/kolla/kolla-build.conf"

View File

@ -25,7 +25,7 @@
- name: Ensure the Python virtualenv package is installed
package:
name: python{{ ansible_python.version.major }}-virtualenv
name: python3-virtualenv
state: present
become: True
@ -34,10 +34,10 @@
name: pip
state: latest
virtualenv: "{{ kolla_ansible_target_venv }}"
# Site packages are required for using the yum and selinux python
# modules, which are not available via PyPI.
# Site packages are required for using the dnf python module, which
# is not available via PyPI.
virtualenv_site_packages: True
virtualenv_python: "python{{ ansible_python.version.major }}.{{ ansible_python.version.minor }}"
virtualenv_python: "python3.{{ ansible_python.version.minor }}"
become: True
- name: Ensure kolla-ansible virtualenv has docker SDK for python installed
@ -48,8 +48,6 @@
extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}"
become: True
# NOTE(mgoddard): SELinux python bindings available on PyPI only work
# with Python 3 on CentOS 8.
- name: Ensure kolla-ansible virtualenv has SELinux bindings installed
pip:
name: selinux
@ -58,7 +56,6 @@
become: True
when:
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version | int >= 8
- name: Ensure kolla-ansible virtualenv has correct ownership
file:

View File

@ -32,7 +32,7 @@
roles:
- role: stackhpc.os-openstackclient
os_openstackclient_venv: "{{ venv }}"
os_openstackclient_install_epel: "{{ yum_install_epel }}"
os_openstackclient_install_epel: "{{ dnf_install_epel }}"
os_openstackclient_state: latest
os_openstackclient_upper_constraints_file: "{{ pip_upper_constraints_file }}"
when: kolla_enable_monasca | bool

View File

@ -1,66 +0,0 @@
---
# Workaround a CentOS 7.5 bug: cloud-init 0.7.9-24 does not correctly set
# an IP address for VLAN subinterfaces configured with the Openstack metadata
# format/Config drive. # See, https://bugs.centos.org/view.php?id=14964.
- name: Ensure the overcloud host image uses an updated version of cloud-init
hosts: seed
tags:
- overcloud-host-image-workaround
vars:
custom_repo_tmp_path: /tmp/cloud-init-upstream.repo
tasks:
- block:
- name: Ensure libguestfs-tools is installed
command: >
docker exec bifrost_deploy
bash -c '
ansible localhost
--connection local
--become
-m yum
-a "name=libguestfs-tools state=present"'
- name: Template cloud-init-repo
copy:
content: |
[cloudinit]
name=StackHPC cloud-init
baseurl={{ overcloud_host_image_workaround_cloud_init_repo }}
gpgcheck=0
enabled=1
dest: "{{ custom_repo_tmp_path }}"
- name: Copy cloud init repo into docker container
command: docker cp {{ custom_repo_tmp_path }} bifrost_deploy:{{ custom_repo_tmp_path }}
- name: Clean up template on seed
file:
path: "{{ custom_repo_tmp_path }}"
state: absent
- name: Install custom repo
command: >
docker exec bifrost_deploy
bash -c '
export LIBGUESTFS_BACKEND=direct &&
ansible localhost
--connection local
--become
-m command
-a "virt-customize -a /httpboot/deployment_image.qcow2 --upload {{ custom_repo_tmp_path }}:/etc/yum.repos.d/"'
- name: Clean up tmp file in docker container
command: >
docker exec bifrost_deploy
bash -c '
ansible localhost
--connection local
--become
-m file
-a "path=\"{{ custom_repo_tmp_path }}\" state=absent"'
- name: upgrade cloud init
command: >
docker exec bifrost_deploy
bash -c '
export LIBGUESTFS_BACKEND=direct &&
ansible localhost
--connection local
--become
-m command
-a "virt-customize -a /httpboot/deployment_image.qcow2 --install cloud-init"'
when: overcloud_host_image_workaround_cloud_init_enabled | bool

View File

@ -19,7 +19,7 @@
ansible localhost
--connection local
--become
-m yum
-m dnf
-a "name=libguestfs-tools state=present"'
- name: Ensure the overcloud host image has bogus name server entries removed

View File

@ -123,7 +123,7 @@
roles:
- role: ironic-inspector-rules
os_openstacksdk_install_epel: "{{ yum_install_epel }}"
os_openstacksdk_install_epel: "{{ dnf_install_epel }}"
os_openstacksdk_state: latest
ironic_inspector_venv: "{{ virtualenv_path }}/openstacksdk"
ironic_inspector_upper_constraints_file: "{{ pip_upper_constraints_file }}"

View File

@ -35,7 +35,7 @@
name: stackhpc.os-openstackclient
vars:
os_openstackclient_venv: "{{ venv }}"
os_openstackclient_install_epel: "{{ yum_install_epel }}"
os_openstackclient_install_epel: "{{ dnf_install_epel }}"
os_openstackclient_state: latest
os_openstackclient_upper_constraints_file: "{{ pip_upper_constraints_file }}"
@ -57,7 +57,7 @@
roles:
- role: ironic-inspector-rules
os_openstacksdk_install_epel: "{{ yum_install_epel }}"
os_openstacksdk_install_epel: "{{ dnf_install_epel }}"
os_openstacksdk_state: latest
ironic_inspector_venv: "{{ venv }}"
ironic_inspector_upper_constraints_file: "{{ pip_upper_constraints_file }}"

View File

@ -99,7 +99,7 @@
- item.src != item.dest
roles:
- role: ipa-images
os_openstacksdk_install_epel: "{{ yum_install_epel }}"
os_openstacksdk_install_epel: "{{ dnf_install_epel }}"
os_openstacksdk_state: latest
ipa_images_venv: "{{ virtualenv_path }}/openstacksdk"
ipa_images_upper_constraints_file: "{{ pip_upper_constraints_file }}"

View File

@ -61,7 +61,7 @@
roles:
- role: stackhpc.os-networks
os_openstacksdk_install_epel: "{{ yum_install_epel }}"
os_openstacksdk_install_epel: "{{ dnf_install_epel }}"
os_openstacksdk_state: latest
os_openstacksdk_upper_constraints_file: "{{ pip_upper_constraints_file }}"
os_networks_venv: "{{ venv }}"

View File

@ -1,24 +1,4 @@
---
- block:
- name: Include OS family-specific variables
include_vars: "{{ hostvars.localhost.ansible_os_family }}.yml"
# Note: Currently we install these using the system package manager rather than
# pip to a virtualenv. This is because Yum is required elsewhere and cannot
# easily be installed in a virtualenv.
- name: Ensure package dependencies are installed
local_action:
module: package
name: "{{ item }}"
state: present
use: "{{ console_allocation_package_manager }}"
become: True
with_items: "{{ console_allocation_package_dependencies }}"
run_once: True
when:
- hostvars.localhost.ansible_os_family == 'RedHat'
- hostvars.localhost.ansible_distribution_major_version | int == 7
- name: Validate allocation pool start
vars:
port: "{{ console_allocation_pool_start | int(default=-1) }}"
@ -53,12 +33,8 @@
- name: Ensure Ironic serial console ports are allocated
vars:
# NOTE(mgoddard): Use the Python interpreter used to run ansible-playbook,
# since this has Python dependencies available to it (PyYAML). On CentOS 7
# we use the system Python to ensure that we can import SELinux bindings.
ansible_python_interpreter: >-
{{ '/usr/libexec/platform-python'
if hostvars.localhost.ansible_os_family == 'RedHat' and hostvars.localhost.ansible_distribution_major_version | int == 7
else ansible_playbook_python }}
# since this has Python dependencies available to it (PyYAML).
ansible_python_interpreter: "{{ ansible_playbook_python }}"
local_action:
module: console_allocation
allocation_file: "{{ console_allocation_filename }}"

View File

@ -1,7 +0,0 @@
---
# Package manager to use.
console_allocation_package_manager: yum
# List of packages to install.
console_allocation_package_dependencies:
- PyYAML

View File

@ -1,7 +1,7 @@
---
- name: Ensure required packages are installed
package:
name: "{% if ansible_distribution_major_version | int == 7 %}libselinux-python{% else %}python3-libselinux{% endif %}"
name: python3-libselinux
state: present
become: True

View File

@ -1,33 +1,9 @@
---
- block:
- name: Include OS family-specific variables
include_vars: "{{ hostvars.localhost.ansible_os_family }}.yml"
# Note: Currently we install these using the system package manager rather than
# pip to a virtualenv. This is because Yum is required elsewhere and cannot
# easily be installed in a virtualenv.
- name: Ensure package dependencies are installed
local_action:
module: package
name: "{{ item }}"
state: present
use: "{{ ip_allocation_package_manager }}"
become: True
with_items: "{{ ip_allocation_package_dependencies }}"
run_once: True
when:
- hostvars.localhost.ansible_os_family == 'RedHat'
- hostvars.localhost.ansible_distribution_major_version | int == 7
- name: Ensure IP addresses are allocated
vars:
# NOTE(mgoddard): Use the Python interpreter used to run ansible-playbook,
# since this has Python dependencies available to it (PyYAML). On CentOS 7
# we use the system Python to ensure that we can import SELinux bindings.
ansible_python_interpreter: >-
{{ '/usr/libexec/platform-python'
if hostvars.localhost.ansible_os_family == 'RedHat' and hostvars.localhost.ansible_distribution_major_version | int == 7
else ansible_playbook_python }}
# since this has Python dependencies available to it (PyYAML).
ansible_python_interpreter: "{{ ansible_playbook_python }}"
local_action:
module: ip_allocation
allocation_file: "{{ ip_allocation_filename }}"

View File

@ -1,8 +0,0 @@
---
# Package manager to use.
ip_allocation_package_manager: yum
# List of packages to install.
ip_allocation_package_dependencies:
- python-netaddr
- PyYAML

View File

@ -16,12 +16,7 @@ kolla_ansible_source_version:
kolla_ansible_venv: "{{ ansible_env['PWD'] }}/kolla-venv"
# Python interpreter to use to create Kolla Ansible virtualenv.
# FIXME(mgoddard): Use ansible_python when Kayobe supports Python 3.
kolla_ansible_venv_python: "python{{ kolla_ansible_venv_python_major_version }}"
# Major version of Python interpreter used in Kolla Ansible virtualenv.
# FIXME(mgoddard): Use ansible_python when Kayobe supports Python 3.
kolla_ansible_venv_python_major_version: 3
kolla_ansible_venv_python: python3
# Extra requirements to install inside the kolla-ansible virtualenv.
kolla_ansible_venv_extra_requirements: []

View File

@ -99,12 +99,8 @@
- name: Ensure the Kolla passwords file exists
vars:
# NOTE(mgoddard): Use the Python interpreter used to run ansible-playbook,
# since this has Python dependencies available to it (PyYAML). On CentOS 7
# we use the system Python to ensure that we can import SELinux bindings.
ansible_python_interpreter: >-
{{ '/usr/libexec/platform-python'
if ansible_os_family == 'RedHat' and ansible_distribution_major_version | int == 7
else ansible_playbook_python }}
# since this has Python dependencies available to it (PyYAML).
ansible_python_interpreter: "{{ ansible_playbook_python }}"
kolla_passwords:
src: "{{ kolla_ansible_passwords_path }}"
dest: "{{ kolla_ansible_passwords_path }}"

View File

@ -58,7 +58,6 @@
when: stat_result.stat.exists
when:
- kolla_ansible_venv is not none
- kolla_ansible_venv_python_major_version | int == 3
- name: Ensure the latest version of pip is installed
pip:
@ -83,31 +82,10 @@
# releases from breaking tested code. Changes to this limit should be
# tested.
- ansible>=2.8,<2.10,!=2.8.9,!=2.9.8
- >-
{%- if ansible_os_family == 'RedHat' and ansible_distribution_major_version | int >= 8 %}
selinux
{% endif -%}
- selinux
pip:
name: "{{ (kolla_ansible_packages + kolla_ansible_venv_extra_requirements) | select | list }}"
state: latest
extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}"
virtualenv: "{{ kolla_ansible_venv }}"
virtualenv_python: "{{ kolla_ansible_venv_python }}"
# This is a workaround for the lack of a python package for libselinux-python
# on PyPI. Without using --system-site-packages to create the virtualenv, it
# seems difficult to ensure the selinux python module is available. It is a
# dependency for Ansible when selinux is enabled. On CentOS 8, we can install
# the selinux package from PyPI, however when using Python 3 on CentOS 7 this
# does not work.
- name: Ensure selinux Python package is linked into the virtualenv
file:
src: "/usr/lib64/python2.7/site-packages/selinux"
dest: "{{ kolla_ansible_venv }}/lib/python2.7/site-packages/selinux"
state: link
when:
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version | int == 7
- ansible_selinux != False
- ansible_selinux.status != 'disabled'
- kolla_ansible_venv_python_major_version | int == 2

View File

@ -4,15 +4,6 @@
# Components define groups of services, e.g. nova or ironic.
# Services define single containers, e.g. nova-compute or ironic-api.
{% if ansible_os_family == 'RedHat' and ansible_distribution_major_version | int == 7 and kolla_ansible_venv_python_major_version | int == 3 %}
# TODO(mgoddard): Remove when CentOS 7 is no longer supported.
# Force the use of python2 for localhost. This is necessary for delegate_to:
# localhost, which will otherwise use the playbook python interpreter
# (python3). On CentOS 7, that fails due to a lack of python3 bindings for
# SELinux. https://bugs.centos.org/view.php?id=16389
localhost ansible_python_interpreter=/usr/bin/python2
{% endif %}
{% for group in kolla_overcloud_top_level_groups %}
# Top level {{ group }} group.
[{{ group }}]

View File

@ -5,7 +5,6 @@ kolla_ansible_package_dependencies:
- libffi-dev
- libssl-dev
- patch
- "python{% if kolla_ansible_venv_python_major_version | int == 3 %}3{% endif %}-dev"
- "python{% if kolla_ansible_venv_python_major_version | int == 3 %}3{% endif %}-pip"
- "python{% if kolla_ansible_venv_python_major_version | int == 3 %}3-venv{% else %}-virtualenv{% endif %}"
- "{% if kolla_ansible_venv_python_major_version | int == 2 %}python-yaml{% endif %}"
- python3-dev
- python3-pip
- python3-venv

View File

@ -5,7 +5,5 @@ kolla_ansible_package_dependencies:
- libffi-devel
- openssl-devel
- patch
- "python{% if kolla_ansible_venv_python_major_version | int == 3 %}3{% endif %}-devel"
- "python{% if kolla_ansible_venv_python_major_version | int == 3 %}3{% endif %}-pip"
- "{% if kolla_ansible_venv_python_major_version | int == 2 %}python-virtualenv{% endif %}"
- "{% if kolla_ansible_venv_python_major_version | int == 2 %}PyYAML{% endif %}"
- python3-devel
- python3-pip

View File

@ -2,8 +2,7 @@
FROM {{ item.image }}
RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get upgrade -y && apt-get install -y python sudo bash ca-certificates && apt-get clean; \
elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python2-dnf bash && dnf clean all; \
elif [ $(command -v yum) ]; then yum makecache fast && yum update -y && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \
elif [ $(command -v zypper) ]; then zypper refresh && zypper update -y && zypper install -y python sudo bash python-xml && zypper clean -a; \
elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; fi
RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get upgrade -y && apt-get install -y python3 sudo bash ca-certificates && apt-get clean; \
elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python3 sudo python3-devel python3-dnf bash && dnf clean all; \
elif [ $(command -v zypper) ]; then zypper refresh && zypper update -y && zypper install -y python3 sudo bash python3-xml && zypper clean -a; \
elif [ $(command -v apk) ]; then apk update && apk add --no-cache python3 sudo bash ca-certificates; fi

View File

@ -3,7 +3,6 @@
hosts: localhost
connection: local
gather_facts: false
no_log: "{{ not lookup('env', 'MOLECULE_DEBUG') | bool }}"
vars:
molecule_file: "{{ lookup('env', 'MOLECULE_FILE') }}"
molecule_ephemeral_directory: "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}"

View File

@ -6,8 +6,8 @@ driver:
lint:
name: yamllint
platforms:
- name: centos-7
image: centos:7
- name: centos-8
image: centos:8
provisioner:
name: ansible
lint:

View File

@ -2,8 +2,7 @@
FROM {{ item.image }}
RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get upgrade -y && apt-get install -y python sudo bash ca-certificates && apt-get clean; \
elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python2-dnf bash && dnf clean all; \
elif [ $(command -v yum) ]; then yum makecache fast && yum update -y && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \
elif [ $(command -v zypper) ]; then zypper refresh && zypper update -y && zypper install -y python sudo bash python-xml && zypper clean -a; \
elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; fi
RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get upgrade -y && apt-get install -y python3 sudo bash ca-certificates && apt-get clean; \
elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python3 sudo python3-devel python3-dnf bash && dnf clean all; \
elif [ $(command -v zypper) ]; then zypper refresh && zypper update -y && zypper install -y python3 sudo bash python3-xml && zypper clean -a; \
elif [ $(command -v apk) ]; then apk update && apk add --no-cache python3 sudo bash ca-certificates; fi

View File

@ -6,8 +6,8 @@ driver:
lint:
name: yamllint
platforms:
- name: centos-7
image: centos:7
- name: centos-8
image: centos:8
provisioner:
name: ansible
inventory:

View File

@ -14,9 +14,9 @@
- gcc
- libffi-devel
- openssl-devel
- python{{ ansible_python.version.major }}-devel
- python{{ ansible_python.version.major }}-pip
- python{{ ansible_python.version.major }}-virtualenv
- python3-devel
- python3-pip
- python3-virtualenv
state: present
become: True
@ -50,7 +50,7 @@
name: "{{ item.name }}"
state: latest
virtualenv: "{{ kolla_venv }}"
virtualenv_python: "python{{ ansible_python.version.major }}.{{ ansible_python.version.minor }}"
virtualenv_python: "python3.{{ ansible_python.version.minor }}"
with_items:
- { name: pip }

View File

@ -22,9 +22,7 @@ ONBOOT={{ item.onboot }}
{% if item.peer_bridge is defined %}
BRIDGE={{ item.peer_bridge }}
{% endif %}
{% if ansible_distribution_major_version | int >= 7 %}
NM_CONTROLLED=no
{% endif %}
{% if item.peer_mtu is defined and item.peer_mtu %}
MTU={{ item.peer_mtu }}
{% endif %}

View File

@ -22,9 +22,7 @@ ONBOOT={{ item.onboot }}
{% if item.bridge is defined %}
BRIDGE={{ item.bridge }}
{% endif %}
{% if ansible_distribution_major_version | int >= 7 %}
NM_CONTROLLED=no
{% endif %}
{% if item.mtu is defined and item.mtu %}
MTU={{ item.mtu }}
{% endif %}

View File

@ -1,4 +0,0 @@
---
yum_cron_enabled: false
yum_cron_update_cmd: 'security'

View File

@ -1,7 +0,0 @@
---
- name: Restart yum-cron
service:
name: yum-cron
state: restarted
become: True

View File

@ -1,36 +0,0 @@
---
- name: Yum | Install yum-cron (CentOS)
yum:
name: yum-cron
state: present
when: yum_cron_enabled
become: True
- name: Replace | Enable update applying for yum-cron (CentOS)
replace:
dest: /etc/yum/yum-cron.conf
regexp: "^apply_updates = no"
replace: "apply_updates = yes"
when: yum_cron_enabled
notify:
- Restart yum-cron
become: True
- name: Replace | Enable update applying for yum-cron (CentOS)
replace:
dest: /etc/yum/yum-cron.conf
regexp: "^update_cmd = .*$"
replace: "update_cmd = {{ yum_cron_update_cmd }}"
when: yum_cron_enabled
notify:
- Restart yum-cron
become: True
- name: Service | Enable yum-cron (CentOS)
service:
name: yum-cron
state: started
enabled: yes
when: yum_cron_enabled
become: True

View File

@ -1,27 +0,0 @@
---
# Yum configuration. Dict mapping Yum config option names to their values.
# yum_config:
# proxy: http://proxy.example.com
yum_config: {}
# Whether or not to use a local Yum mirror.
yum_use_local_mirror: false
# Mirror FQDN for Yum repos.
yum_centos_mirror_host: 'mirror.centos.org'
# Mirror directory for Yum CentOS repos.
yum_centos_mirror_directory: 'centos'
# Mirror FQDN for Yum EPEL repos.
yum_epel_mirror_host: 'download.fedoraproject.org'
# Mirror directory for Yum EPEL repos.
yum_epel_mirror_directory: 'pub/epel'
# A dict of custom repositories.
# You can see params on
# http://docs.ansible.com/ansible/latest/modules/yum_repository_module.html.
# For example:
# yum_custom_repos:
# reponame:
# baseurl: http://repo
# file: myrepo
# gpgkey: http://gpgkey
# gpgcheck: yes
yum_custom_repos: {}

View File

@ -1,28 +0,0 @@
---
- name: Install custom repositories
yum_repository:
name: "{{ item.key }}"
description: "{% if 'description' in item.value %}{{ item.value.description }}{% else %}{{ item.key }} repository{% endif %}"
baseurl: "{{ item.value.baseurl }}"
file: "{{ item.value.file | default(omit)}}"
gpgkey: "{{ item.value.gpgkey | default(omit)}}"
gpgcheck: "{{ item.value.gpgcheck | default(omit)}}"
cost: "{{ item.value.cost | default(omit)}}"
enabled: "{{ item.value.enabled | default(omit)}}"
gpgcakey: "{{ item.value.gpgcakey | default(omit)}}"
metadata_expire: "{{ item.value.metadata_expire | default(omit)}}"
mirrorlist: "{{ item.value.mirrorlist | default(omit)}}"
mirrorlist_expire: "{{ item.value.mirrorlist_expire | default(omit)}}"
priority: "{{ item.value.priority | default(omit)}}"
proxy: "{{ item.value.proxy | default(omit)}}"
proxy_password: "{{ item.value.proxy_password | default(omit)}}"
proxy_username: "{{ item.value.proxy_username | default(omit)}}"
repo_gpgcheck: "{{ item.value.repo_gpgcheck | default(omit)}}"
sslverify: "{{ item.value.sslverify | default(omit)}}"
with_dict: "{{ yum_custom_repos }}"
register: register_yum_command
retries: 3
delay: 10
until: register_yum_command is success
become: true

View File

@ -1,43 +0,0 @@
---
- name: Replace | Disable YUM fastestmirror plugin (CentOS)
replace:
dest: /etc/yum/pluginconf.d/fastestmirror.conf
regexp: "enabled=1"
replace: "enabled=0"
become: True
- name: Template | Copy CentOS repo templates (CentOS)
template:
src: CentOS-Base.repo.j2
dest: /etc/yum.repos.d/CentOS-Base.repo
owner: root
group: root
mode: 0664
become: True
- name: Yum | Update cache (CentOS)
yum:
name: '*'
update_cache: yes
become: True
- name: Yum | Install epel-release (CentOS)
yum:
name: epel-release
state: present
become: True
- name: Template | Copy EPEL repo templates (CentOS)
template:
src: epel.repo.j2
dest: /etc/yum.repos.d/epel.repo
owner: root
group: root
mode: 0664
become: True
- name: Yum | Update cache (CentOS)
yum:
name: '*'
update_cache: yes
become: True

View File

@ -1,16 +0,0 @@
---
- block:
- name: Ensure yum.conf configuration exists
ini_file:
path: /etc/yum.conf
section: "main"
option: "{{ item.key }}"
value: "{{ item.value }}"
loop: "{{ query('dict', yum_config) }}"
become: true
- include_tasks: local-mirror.yml
when: yum_use_local_mirror | bool
- include_tasks: custom_repo.yml
when: ansible_os_family == 'RedHat'

View File

@ -1,43 +0,0 @@
# CentOS-Base.repo
#
# The mirror system uses the connecting IP address of the client and the
# update status of each mirror to pick mirrors that are updated to and
# geographically close to the client. You should use this for CentOS updates
# unless you are manually picking other mirrors.
#
# If the mirrorlist= does not work for you, as a fall back you can try the
# remarked out baseurl= line instead.
#
#
[base]
name=CentOS-$releasever - Base
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra
baseurl=http://{{ yum_centos_mirror_host }}/{{ yum_centos_mirror_directory }}/$releasever/os/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
#released updates
[updates]
name=CentOS-$releasever - Updates
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra
baseurl=http://{{ yum_centos_mirror_host }}/{{ yum_centos_mirror_directory }}/$releasever/updates/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
#additional packages that may be useful
[extras]
name=CentOS-$releasever - Extras
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras&infra=$infra
baseurl=http://{{ yum_centos_mirror_host }}/{{ yum_centos_mirror_directory }}/$releasever/extras/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
#additional packages that extend functionality of existing packages
[centosplus]
name=CentOS-$releasever - Plus
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus&infra=$infra
baseurl=http://{{ yum_centos_mirror_host }}/{{ yum_centos_mirror_directory }}/$releasever/centosplus/$basearch/
gpgcheck=1
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

View File

@ -1,26 +0,0 @@
[epel]
name=Extra Packages for Enterprise Linux 7 - $basearch
baseurl=http://{{ yum_epel_mirror_host }}/{{ yum_epel_mirror_directory }}/7/$basearch
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
[epel-debuginfo]
name=Extra Packages for Enterprise Linux 7 - $basearch - Debug
baseurl=http://{{ yum_epel_mirror_host }}/{{ yum_epel_mirror_directory }}/7/$basearch/debug
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1
[epel-source]
name=Extra Packages for Enterprise Linux 7 - $basearch - Source
baseurl=http://{{ yum_epel_mirror_host }}/{{ yum_epel_mirror_directory }}/7/SRPMS
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1

View File

@ -5,7 +5,7 @@
- introspection-rules
roles:
- role: ironic-inspector-rules
os_openstacksdk_install_epel: "{{ yum_install_epel }}"
os_openstacksdk_install_epel: "{{ dnf_install_epel }}"
os_openstacksdk_state: latest
ironic_inspector_venv: "{{ virtualenv_path }}/openstacksdk"
ironic_inspector_upper_constraints_file: "{{ pip_upper_constraints_file }}"

View File

@ -20,7 +20,6 @@
become: True
when:
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version | int >= 8
- name: Ensure the image cache directory exists
file:

View File

@ -1,10 +1,8 @@
---
- name: Ensure NTP is installed and configured
- name: Ensure timezone is configured
hosts: seed-hypervisor:seed:overcloud
tags:
- ntp
- timezone
roles:
- role: yatesr.timezone
become: True
- role: stackhpc.ntp
become: True

View File

@ -1,16 +0,0 @@
---
- name: Ensure Yum repos are configured
hosts: seed-hypervisor:seed:overcloud
tags:
- yum
tasks:
- block:
- import_role:
name: yum
- import_role:
name: yum-cron
tags:
- yum-cron
when:
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version | int == 7

View File

@ -126,23 +126,13 @@ function is_yum {
fi
}
function python_version {
# Echo python major version.
if is_dnf; then
echo 3
elif is_yum; then
echo 2
else
echo 3
fi
}
function install_dependencies {
echo "Installing package dependencies for kayobe"
if is_dnf; then
sudo dnf -y install gcc git vim python3-pyyaml python3-virtualenv libffi-devel
elif is_yum; then
sudo yum -y install gcc git vim python2-virtualenv libffi-devel
echo "CentOS 7 is no longer supported"
exit 1
else
sudo apt install -y python-dev python3-virtualenv gcc git libffi-dev
fi
@ -160,7 +150,7 @@ function install_venv {
fi
if [[ ! -f "${venv_path}/bin/activate" ]]; then
echo "Creating virtual environment in ${venv_path}"
virtualenv -p python$(python_version) "${venv_path}"
virtualenv -p python3 "${venv_path}"
# NOTE: Virtualenv's activate and deactivate scripts reference an
# unbound variable.
set +u
@ -186,7 +176,7 @@ function install_kayobe_dev_venv {
function upgrade_kayobe_venv {
echo "Upgrading kayobe virtual environment in ${KAYOBE_VENV_PATH}"
virtualenv -p python$(python_version) "${KAYOBE_VENV_PATH}"
virtualenv -p python3 "${KAYOBE_VENV_PATH}"
# NOTE: Virtualenv's activate and deactivate scripts reference an
# unbound variable.
set +u

View File

@ -9,7 +9,7 @@ node_types:
volumes:
# There is a minimum disk space capacity requirement of 4GiB when using Ironic Python Agent:
# https://github.com/openstack/ironic-python-agent/blob/master/ironic_python_agent/utils.py#L290
# The CentOS7 cloud image seems to fill a 4GiB disk, so allow 6.
# The CentOS8 cloud image seems to fill a 4GiB disk, so allow 6.
- capacity: 6GiB
physical_networks:
- physnet1

View File

@ -12,9 +12,9 @@ Package Repositories
If using custom package repositories, it may be necessary to update these prior
to running a package update. To do this, update the configuration in
``${KAYOBE_CONFIG_PATH}/yum.yml`` and run the following command::
``${KAYOBE_CONFIG_PATH}/dnf.yml`` and run the following command::
(kayobe) $ kayobe overcloud host configure --tags yum --kolla-tags none
(kayobe) $ kayobe overcloud host configure --tags dnf --kolla-tags none
Package Update
--------------

View File

@ -23,9 +23,9 @@ Package Repositories
If using custom package repositories, it may be necessary to update these prior
to running a package update. To do this, update the configuration in
``${KAYOBE_CONFIG_PATH}/yum.yml`` and run the following command::
``${KAYOBE_CONFIG_PATH}/dnf.yml`` and run the following command::
(kayobe) $ kayobe seed host configure --tags yum --kolla-tags none
(kayobe) $ kayobe seed host configure --tags dnf --kolla-tags none
Package Update
--------------

View File

@ -45,7 +45,7 @@ configure how this image is built. Consult the
:diskimage-builder-doc:`Diskimage-builder documentation <>` for further
information on building disk images.
The default configuration builds a CentOS 7 whole disk (partitioned) image with
The default configuration builds a CentOS 8 whole disk (partitioned) image with
SELinux disabled and a serial console enabled. `Cloud-init
<https://cloudinit.readthedocs.io/en/latest/>`__ is used to process the
configuration drive built by Bifrost, rather than the Bifrost default of
@ -129,15 +129,6 @@ In DIB, we achieve this by setting the ``FS_TYPE`` environment variable to
kolla_bifrost_dib_env_vars_extra:
FS_TYPE: "xfs"
In order to build the image, we also require the ``xfsprogs`` package
to be installed in the Bifrost container. As of the Ussuri release of Bifrost
this will be done automatically, but until then, the following workaround
can be made on the seed host:
.. code-block:: console
docker exec bifrost_deploy yum -y install xfsprogs
Example: Configuring a development user account
-----------------------------------------------

View File

@ -202,84 +202,8 @@ added to the Kayobe configuration.
ssh_key:
- "{{ lookup('file', kayobe_config_path ~ '/ssh-keys/id_rsa_bob.pub') }}"
Package Repositories (CentOS 7)
===============================
*tags:*
| ``yum``
Kayobe supports configuration of package repositories via Yum, via variables in
``${KAYOBE_CONFIG_PATH}/yum.yml``.
Configuration of yum.conf
-------------------------
Global configuration of Yum is stored in ``/etc/yum.conf``, and options can be
set via the ``yum_config`` variable. Options are added to the ``[main]``
section of the file. For example, to configure Yum to use a proxy server:
.. code-block:: yaml
:caption: ``yum.yml``
yum_config:
proxy: https://proxy.example.com
CentOS and EPEL Mirrors
-----------------------
CentOS and EPEL mirrors can be enabled by setting ``yum_use_local_mirror`` to
``true``. CentOS repository mirrors are configured via the following
variables:
* ``yum_centos_mirror_host`` (default ``mirror.centos.org``) is the mirror
hostname.
* ``yum_centos_mirror_directory`` (default ``centos``) is a directory on the
mirror in which repositories may be accessed.
EPEL repository mirrors are configured via the following variables:
* ``yum_epel_mirror_host`` (default ``download.fedoraproject.org``) is the
mirror hostname.
* ``yum_epel_mirror_directory`` (default ``pub/epel``) is a directory on the
mirror in which repositories may be accessed.
For example, to configure CentOS and EPEL mirrors at mirror.example.com:
.. code-block:: yaml
:caption: ``yum.yml``
yum_use_local_mirror: true
yum_centos_mirror_host: mirror.example.com
yum_epel_mirror_host: mirror.example.com
Custom Yum Repositories
-----------------------
It is also possible to configure a list of custom Yum repositories via the
``yum_custom_repos`` variable. The format is a dict/map, with repository names
mapping to a dict/map of arguments to pass to the Ansible ``yum_repository``
module.
For example, the following configuration defines a single Yum repository called
``widgets``.
.. code-block:: yaml
:caption: ``yum.yml``
yum_custom_repos:
widgets:
baseurl: http://example.com/repo
file: widgets
gpgkey: http://example.com/gpgkey
gpgcheck: yes
Disabling EPEL
--------------
It is possible to disable the EPEL Yum repository by setting
``yum_install_epel`` to ``false``.
Package Repositories (CentOS 8)
===============================
Package Repositories
====================
*tags:*
| ``dnf``
@ -470,57 +394,20 @@ timezone. For example:
NTP
===
*tags:*
| ``ntp``
.. note::
Since the Ussuri release, Kayobe no longer supports configuration of an NTP
daemon on the host, since the ``ntp`` package is no longer available in CentOS
8.
CentOS 8 does not support configuring an NTP daemon. Use :ref:`chrony
<configuration-hosts-chrony>` instead.
Kolla Ansible can deploy a chrony container, and from the Ussuri release chrony
is enabled by default.
Network Time Protocol (NTP) may be configured via variables in
``${KAYOBE_CONFIG_PATH}/ntp.yml``. The list of NTP servers is
configured via ``ntp_config_server``, and by default the ``pool.ntp.org``
servers are used. A list of restrictions may be added via
``ntp_config_restrict``, and a list of interfaces to listen on via
``ntp_config_listen``. Other options and their default values may be found in
the `stackhpc.ntp <https://galaxy.ansible.com/stackhpc/ntp>`__ Ansible role.
.. code-block:: yaml
:caption: ``ntp.yml``
ntp_config_server:
- 1.ubuntu.pool.ntp.org
- 2.ubuntu.pool.ntp.org
ntp_config_restrict:
- '-4 default kod notrap nomodify nopeer noquery'
ntp_config_listen:
- eth0
The NTP service may be disabled as follows:
.. code-block:: yaml
:caption: ``ntp.yml``
ntp_service_enabled: false
.. _configuration-hosts-chrony:
Chrony
------
Kolla Ansible can deploy a chrony container. This is disabled by default in
Kayobe on CentOS 7 to avoid conflicting with the NTP daemon on the host. On
CentOS 8 Chrony is enabled by default.
To use the containerised chrony daemon and disable the host NTP daemon on
CentOS 7, set the following in ``${KAYOBE_CONFIG_PATH}/kolla.yml``:
To disable the containerised chrony daemon, set the following in
``${KAYOBE_CONFIG_PATH}/kolla.yml``:
.. code-block:: yaml
kolla_enable_chrony: true
kolla_enable_chrony: false
.. _configuration-hosts-mdadm:

View File

@ -564,11 +564,6 @@ consideration:
lookup the interface for the cloud-init network configuration that occurs
during bifrost provisioning of the overcloud.
* If the admin network is configured as a tagged VLAN, you must configure Kayobe
to upgrade cloud-init. This is a temporary workaround for a bug in the current
version of cloud-init shipped with CentOS 7.5. Please see :ref:`workaround-cloud-init`
for more details.
Overcloud Provisioning Network
------------------------------

View File

@ -161,42 +161,6 @@ argument.
Information on configuration of Kolla for building container images is
available :ref:`here <configuration-kolla>`.
.. _workaround-cloud-init:
Workaround VLAN cloud-init issue
--------------------------------
If you wish to configure the overcloud hosts to use a tagged VLAN for the admin
network interface, you must set
``overcloud_host_image_workaround_cloud_init_enabled``
to True in ``${KAYOBE_CONFIG_PATH}/etc/kayobe/overcloud.yml``::
overcloud_host_image_workaround_cloud_init_enabled: True
prior to deploying the containerised services with::
(kayobe) $ kayobe seed service deploy
Kayobe will then patch the overcloud host image to include a more recent
version of cloud-init. This is to workaround a bug in the version of
cloud-init currently shipped with CentOS 7.5 (0.7.9-24 at the time of writing),
which doesn't set the IP address of VLAN subinterfaces. See:
https://bugs.centos.org/view.php?id=14964.
The default repository used to obtain the package is currently hosted on github
in the `cloud-init-repo <https://github.com/stackhpc/cloud-init-repo>`_
repository. You can override this by setting ``overcloud_host_image_workaround_cloud_init_repo``
in ``${KAYOBE_CONFIG_PATH}/etc/kayobe/overcloud.yml``::
overcloud_host_image_workaround_cloud_init_repo: https://stackhpc.github.io/cloud-init-repo/
The source code used to build the updated package can be obtained from
the `cloud-init-repo-source <https://github.com/stackhpc/cloud-init-repo-source>`_
repository.
As this is not an offical package, there may be latent bugs when using
functionality the kayobe developers have not used themselves.
Deploying Containerised Services
--------------------------------

View File

@ -260,7 +260,7 @@ The machines and networking created by Tenks can be cleaned up via
Seed Hypervisor
===============
The seed hypervisor development environment is supported for CentOS 7. The
The seed hypervisor development environment is supported for CentOS 8. The
system must be either bare metal, or a VM on a system with nested
virtualisation enabled.

View File

@ -16,19 +16,15 @@ running kayobe's tests.
* Ubuntu/Debian::
sudo apt-get install build-essential python-dev libssl-dev python-pip git
sudo apt-get install build-essential python3-dev libssl-dev python3-pip git
* Fedora 21/RHEL7/CentOS7::
* Fedora or CentOS/RHEL 8::
sudo yum install python-devel openssl-devel python-pip git gcc
* Fedora 22 or higher::
sudo dnf install python-devel openssl-devel python-pip git gcc
sudo dnf install python3-devel openssl-devel python3-pip git gcc
* OpenSUSE/SLE 12::
sudo zypper install python-devel python-pip libopenssl-devel git
sudo zypper install python3-devel python3-pip libopenssl-devel git
Python Prerequisites
--------------------

View File

@ -15,7 +15,7 @@ Prerequisites
Currently Kayobe supports the following Operating Systems on the Ansible
control host:
- CentOS 7.6
- CentOS 8
- Ubuntu 16.04
To avoid conflicts with python packages installed by the system package manager
@ -26,18 +26,18 @@ some of kayobe's python dependencies.
On CentOS::
$ yum install -y python-devel python-virtualenv gcc libffi-devel
$ dnf install -y python3-devel python3-virtualenv gcc libffi-devel
On Ubuntu::
$ apt install -y python-dev python-virtualenv gcc libffi-dev
$ apt install -y python3-dev python3-virtualenv gcc libffi-dev
If installing Kayobe from source, then Git is required for cloning and working
with the source code repository.
On CentOS::
$ yum install -y git
$ dnf install -y git
On Ubuntu::

View File

@ -22,7 +22,7 @@ OpenStack using Kolla, Ansible and Kayobe. The guide makes use of
baremetal environment running on a single hypervisor.
To complete the walkthrough you will require a baremetal or VM hypervisor
running CentOS 7 with at least 32GB RAM & 40GB disk space.
running CentOS 8 with at least 32GB RAM & 80GB disk space.
Preparing the deployment can take some time - where possible it is
beneficial to snapshot the hypervisor. We advise making a snapshot after
creating the initial 'seed' VM as this will make additional deployments

View File

@ -185,10 +185,6 @@
#kolla_enable_cadf_notifications:
#kolla_enable_ceilometer:
#kolla_enable_central_logging:
# The chrony container is disabled by default on CentOS 7 because we enable an
# NTP daemon on the host. On CentOS 8 the chrony container is enabled by
# default because the NTP daemon is not supported. Setting this to true will
# disable NTP on the host.
#kolla_enable_chrony:
#kolla_enable_cinder:
#kolla_enable_cinder_backend_hnas_iscsi:

View File

@ -10,34 +10,9 @@
###############################################################################
# Network Time Protocol (NTP).
# Whether to enable the NTP daemon on the host. On CentOS 7 the default is true
# unless 'kolla_enable_chrony' has been set to true on overcloud hosts. On
# CentOS 8 the host NTP daemon is not supported, and kolla_enable_chrony is set
# to true by default.
#ntp_service_enabled:
# List of names of NTP servers.
#ntp_config_server:
# List of NTP restrictions to add to ntp.conf.
#ntp_config_restrict:
# List of addresses for NTP daemon to listen on.
#ntp_config_listen:
# Other NTP configuration options.
#ntp_config_filegen:
#ntp_config_statistics:
#ntp_config_crypto:
#ntp_config_includefile:
#ntp_config_keys:
#ntp_config_trustedkey:
#ntp_config_requestkey:
#ntp_config_controlkey:
#ntp_config_broadcast:
#ntp_config_broadcastclient:
#ntp_config_multicastclient:
#ntp_config_tinker_panic_enabled:
# Support for running an NTP daemon on the host is no longer available.
# Instead the Kolla Ansible 'chrony' container is deployed by default. Set
# 'kolla_enable_chrony' to 'false' in kolla.yml to disable it.
###############################################################################
# Dummy variable to allow Ansible to accept this file.

View File

@ -27,14 +27,6 @@
# virt-customize, if it exists. See https://bugs.centos.org/view.php?id=14369.
#overcloud_host_image_workaround_resolv_enabled:
# Workaround a CentOS 7.5 bug: cloud-init 0.7.9-24 does not correctly set
# an IP address for VLAN subinterfaces configured with the Openstack metadata
# format/Config drive. # See, https://bugs.centos.org/view.php?id=14964.
#overcloud_host_image_workaround_cloud_init_enabled:
# cloud-init repository for overcloud_host_image_workaround_cloud_init_enabled
#overcloud_host_image_workaround_cloud_init_repo:
###############################################################################
# Dummy variable to allow Ansible to accept this file.
workaround_ansible_issue_8743: yes

View File

@ -281,6 +281,8 @@ def prune_galaxy_roles(parsed_args):
"""
LOG.info("Removing unnecessary galaxy roles from kayobe")
roles_to_remove = [
'resmo.ntp',
'stackhpc.ntp',
'stackhpc.os-shade',
]
LOG.debug("Removing roles: %s", ",".join(roles_to_remove))

View File

@ -410,10 +410,10 @@ class SeedHypervisorHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin,
* Optionally, wipe unmounted disk partitions (--wipe-disks).
* Configure user accounts, group associations, and authorised SSH keys.
* Configure a PyPI mirror.
* Configure Yum repos.
* Configure package repos.
* Configure the host's network interfaces.
* Set sysctl parameters.
* Configure NTP.
* Configure timezone.
* Optionally, configure software RAID arrays.
* Optionally, configure encryption.
* Configure LVM volumes.
@ -452,7 +452,7 @@ class SeedHypervisorHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin,
if parsed_args.wipe_disks:
playbooks += _build_playbook_list("wipe-disks")
playbooks += _build_playbook_list(
"users", "yum", "dnf", "dev-tools", "network", "sysctl", "ntp",
"users", "dnf", "dev-tools", "network", "sysctl", "timezone",
"mdadm", "luks", "lvm", "seed-hypervisor-libvirt-host")
self.run_kayobe_playbooks(parsed_args, playbooks,
limit="seed-hypervisor")
@ -564,13 +564,13 @@ class SeedHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
* Optionally, wipe unmounted disk partitions (--wipe-disks).
* Configure user accounts, group associations, and authorised SSH keys.
* Configure a PyPI mirror.
* Configure Yum repos.
* Configure package repos.
* Disable SELinux.
* Configure the host's network interfaces.
* Set sysctl parameters.
* Configure IP routing and source NAT.
* Disable bootstrap interface configuration.
* Configure NTP.
* Configure timezone.
* Optionally, configure software RAID arrays.
* Optionally, configure encryption.
* Configure LVM volumes.
@ -603,10 +603,10 @@ class SeedHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
if parsed_args.wipe_disks:
playbooks += _build_playbook_list("wipe-disks")
playbooks += _build_playbook_list(
"users", "yum", "dnf", "dev-tools", "disable-selinux", "network",
"sysctl", "ip-routing", "snat", "disable-glean", "ntp", "mdadm",
"luks", "lvm", "docker-devicemapper", "kolla-ansible-user",
"kolla-pip", "kolla-target-venv")
"users", "dnf", "dev-tools", "disable-selinux", "network",
"sysctl", "ip-routing", "snat", "disable-glean", "timezone",
"mdadm", "luks", "lvm", "docker-devicemapper",
"kolla-ansible-user", "kolla-pip", "kolla-target-venv")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="seed")
self.generate_kolla_ansible_config(parsed_args, service_config=False)
@ -694,7 +694,6 @@ class SeedServiceDeploy(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
* Deploys the bifrost container using kolla-ansible.
* Builds disk images for the overcloud hosts using Diskimage Builder (DIB).
* Performs a workaround in the overcloud host image to fix resolv.conf.
* Performs a workaround in the overcloud host image to update cloud-init
* Configures ironic inspector introspection rules in the bifrost inspector
service.
* When enabled, configures a Bare Metal Provisioning (BMP) environment for
@ -709,7 +708,6 @@ class SeedServiceDeploy(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
self.run_kolla_ansible_seed(parsed_args, "deploy-bifrost")
playbooks = _build_playbook_list(
"overcloud-host-image-workaround-resolv",
"overcloud-host-image-workaround-cloud-init",
"seed-introspection-rules",
"dell-switch-bmp")
self.run_kayobe_playbooks(parsed_args, playbooks)
@ -725,7 +723,6 @@ class SeedServiceUpgrade(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
* Deploys the bifrost container using kolla-ansible.
* Builds disk images for the overcloud hosts using Diskimage Builder (DIB).
* Performs a workaround in the overcloud host image to fix resolv.conf.
* Performs a workaround in the overcloud host image to update cloud-init
* Configures ironic inspector introspection rules in the bifrost inspector
service.
* When enabled, configures a Bare Metal Provisioning (BMP) environment for
@ -743,7 +740,6 @@ class SeedServiceUpgrade(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
self.run_kolla_ansible_seed(parsed_args, "upgrade-bifrost")
playbooks = _build_playbook_list(
"overcloud-host-image-workaround-resolv",
"overcloud-host-image-workaround-cloud-init",
"seed-introspection-rules",
"dell-switch-bmp")
self.run_kayobe_playbooks(parsed_args, playbooks)
@ -931,12 +927,12 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
* Optionally, wipe unmounted disk partitions (--wipe-disks).
* Configure user accounts, group associations, and authorised SSH keys.
* Configure a PyPI mirror.
* Configure Yum repos.
* Configure package repos.
* Disable SELinux.
* Configure the host's network interfaces.
* Set sysctl parameters.
* Disable bootstrap interface configuration.
* Configure NTP.
* Configure timezone.
* Optionally, configure software RAID arrays.
* Optionally, configure encryption.
* Configure LVM volumes.
@ -968,10 +964,10 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
if parsed_args.wipe_disks:
playbooks += _build_playbook_list("wipe-disks")
playbooks += _build_playbook_list(
"users", "yum", "dnf", "dev-tools", "disable-selinux", "network",
"sysctl", "disable-glean", "disable-cloud-init", "ntp", "mdadm",
"luks", "lvm", "docker-devicemapper", "kolla-ansible-user",
"kolla-pip", "kolla-target-venv")
"users", "dnf", "dev-tools", "disable-selinux", "network",
"sysctl", "disable-glean", "disable-cloud-init", "timezone",
"mdadm", "luks", "lvm", "docker-devicemapper",
"kolla-ansible-user", "kolla-pip", "kolla-target-venv")
self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
self.generate_kolla_ansible_config(parsed_args, service_config=False)

View File

@ -321,12 +321,11 @@ class TestCase(unittest.TestCase):
utils.get_data_files_path(
"ansible", "kayobe-target-venv.yml"),
utils.get_data_files_path("ansible", "users.yml"),
utils.get_data_files_path("ansible", "yum.yml"),
utils.get_data_files_path("ansible", "dnf.yml"),
utils.get_data_files_path("ansible", "dev-tools.yml"),
utils.get_data_files_path("ansible", "network.yml"),
utils.get_data_files_path("ansible", "sysctl.yml"),
utils.get_data_files_path("ansible", "ntp.yml"),
utils.get_data_files_path("ansible", "timezone.yml"),
utils.get_data_files_path("ansible", "mdadm.yml"),
utils.get_data_files_path("ansible", "luks.yml"),
utils.get_data_files_path("ansible", "lvm.yml"),
@ -490,7 +489,6 @@ class TestCase(unittest.TestCase):
utils.get_data_files_path(
"ansible", "kayobe-target-venv.yml"),
utils.get_data_files_path("ansible", "users.yml"),
utils.get_data_files_path("ansible", "yum.yml"),
utils.get_data_files_path("ansible", "dnf.yml"),
utils.get_data_files_path("ansible", "dev-tools.yml"),
utils.get_data_files_path(
@ -500,7 +498,7 @@ class TestCase(unittest.TestCase):
utils.get_data_files_path("ansible", "ip-routing.yml"),
utils.get_data_files_path("ansible", "snat.yml"),
utils.get_data_files_path("ansible", "disable-glean.yml"),
utils.get_data_files_path("ansible", "ntp.yml"),
utils.get_data_files_path("ansible", "timezone.yml"),
utils.get_data_files_path("ansible", "mdadm.yml"),
utils.get_data_files_path("ansible", "luks.yml"),
utils.get_data_files_path("ansible", "lvm.yml"),
@ -798,8 +796,6 @@ class TestCase(unittest.TestCase):
[
utils.get_data_files_path(
"ansible", "overcloud-host-image-workaround-resolv.yml"), # noqa
utils.get_data_files_path(
"ansible", "overcloud-host-image-workaround-cloud-init.yml"), # noqa
utils.get_data_files_path(
"ansible", "seed-introspection-rules.yml"),
utils.get_data_files_path(
@ -854,9 +850,6 @@ class TestCase(unittest.TestCase):
utils.get_data_files_path(
"ansible",
"overcloud-host-image-workaround-resolv.yml"),
utils.get_data_files_path(
"ansible",
"overcloud-host-image-workaround-cloud-init.yml"),
utils.get_data_files_path(
"ansible",
"seed-introspection-rules.yml"),
@ -1006,7 +999,6 @@ class TestCase(unittest.TestCase):
utils.get_data_files_path(
"ansible", "kayobe-target-venv.yml"),
utils.get_data_files_path("ansible", "users.yml"),
utils.get_data_files_path("ansible", "yum.yml"),
utils.get_data_files_path("ansible", "dnf.yml"),
utils.get_data_files_path("ansible", "dev-tools.yml"),
utils.get_data_files_path(
@ -1016,7 +1008,7 @@ class TestCase(unittest.TestCase):
utils.get_data_files_path("ansible", "disable-glean.yml"),
utils.get_data_files_path(
"ansible", "disable-cloud-init.yml"),
utils.get_data_files_path("ansible", "ntp.yml"),
utils.get_data_files_path("ansible", "timezone.yml"),
utils.get_data_files_path("ansible", "mdadm.yml"),
utils.get_data_files_path("ansible", "luks.yml"),
utils.get_data_files_path("ansible", "lvm.yml"),

View File

@ -522,6 +522,8 @@ class TestCase(unittest.TestCase):
ansible.prune_galaxy_roles(parsed_args)
expected_roles = [
'resmo.ntp',
'stackhpc.ntp',
'stackhpc.os-shade',
]
mock_remove.assert_called_once_with(expected_roles,

View File

@ -24,18 +24,6 @@ from kayobe import utils
class TestCase(unittest.TestCase):
@mock.patch.object(utils, "run_command")
def test_yum_install(self, mock_run):
utils.yum_install(["package1", "package2"])
mock_run.assert_called_once_with(["sudo", "yum", "-y", "install",
"package1", "package2"])
@mock.patch.object(utils, "run_command")
def test_yum_install_failure(self, mock_run):
mock_run.side_effect = subprocess.CalledProcessError(1, "command")
self.assertRaises(SystemExit,
utils.yum_install, ["package1", "package2"])
@mock.patch.object(utils, "run_command")
def test_galaxy_install(self, mock_run):
utils.galaxy_install("/path/to/role/file", "/path/to/roles")
@ -135,7 +123,7 @@ key2: value2
self.assertEqual(expected, utils.escape_jinja(value))
def test_detect_install_prefix(self):
path = "/tmp/test/local/lib/python2.7/dist-packages"
path = "/tmp/test/local/lib/python3.6/dist-packages"
expected = os.path.normpath("/tmp/test/local/")
result = utils._detect_install_prefix(path)
self.assertEqual(expected, os.path.normpath(result))

View File

@ -70,18 +70,6 @@ def _get_base_path():
return os.path.join(os.path.realpath(__file__), "..")
def yum_install(packages):
"""Install a list of packages via Yum."""
cmd = ["sudo", "yum", "-y", "install"]
cmd += packages
try:
run_command(cmd)
except subprocess.CalledProcessError as e:
print("Failed to install packages %s via Yum: returncode %d" %
(", ".join(packages), e.returncode))
sys.exit(e.returncode)
def galaxy_install(role_file, roles_path, force=False):
"""Install Ansible roles via Ansible Galaxy."""
cmd = ["ansible-galaxy", "install"]

View File

@ -87,28 +87,6 @@ docker_storage_driver: devicemapper
timezone: Pacific/Honolulu
{% if ansible_os_family == 'RedHat' %}
{% if ansible_distribution_major_version | int == 7 %}
# Use a local Yum mirror.
yum_use_local_mirror: true
# Mirror FQDN for Yum repos.
yum_centos_mirror_host: "{{ zuul_site_mirror_fqdn }}"
# Mirror directory for Yum CentOS repos.
yum_centos_mirror_directory: 'centos'
# Mirror FQDN for Yum EPEL repos.
yum_epel_mirror_host: "{{ zuul_site_mirror_fqdn }}"
# Mirror directory for Yum EPEL repos.
yum_epel_mirror_directory: 'epel'
# Configure a custom Yum repository.
yum_custom_repos:
td-agent:
baseurl: http://packages.treasuredata.com/3/redhat/$releasever/$basearch
gpgkey: https://packages.treasuredata.com/GPG-KEY-td-agent
gpgcheck: yes
# Don't install EPEL repositories.
yum_install_epel: false
# Enable yum-cron.
yum_cron_enabled: true
{% else %}
# Use a local DNF mirror.
dnf_use_local_mirror: true
# Mirror FQDN for DNF repos.
@ -130,4 +108,3 @@ dnf_install_epel: false
# Enable DNF Automatic.
dnf_automatic_enabled: true
{% endif %}
{% endif %}

View File

@ -10,11 +10,6 @@ import distro
import pytest
def _is_yum():
info = distro.linux_distribution()
return info[0] == 'CentOS Linux' and info[1].startswith('7')
def _is_dnf():
info = distro.linux_distribution()
return info[0] == 'CentOS Linux' and info[1].startswith('8')
@ -130,14 +125,6 @@ def test_timezone(host):
assert "Pacific/Honolulu" in status
@pytest.mark.parametrize('repo', ["base", "extras", "updates", "epel"])
@pytest.mark.skipif(not _is_yum(), reason="Yum only supported on CentOS 7")
def test_yum_local_package_mirrors(host, repo):
assert os.getenv('SITE_MIRROR_FQDN')
info = host.check_output("yum repoinfo %s", repo)
assert os.getenv('SITE_MIRROR_FQDN') in info
@pytest.mark.parametrize('repo', ["AppStream", "BaseOS", "Extras", "epel",
"epel-modular"])
@pytest.mark.skipif(not _is_dnf(), reason="DNF only supported on CentOS 8")
@ -152,13 +139,6 @@ def test_dnf_local_package_mirrors(host, repo):
assert os.getenv('SITE_MIRROR_FQDN') in info
@pytest.mark.skipif(not _is_yum(), reason="YUM only supported on CentOS 7")
def test_yum_custom_package_repository_is_available(host):
with host.sudo():
host.check_output("yum -y install td-agent")
assert host.package("td-agent").is_installed
@pytest.mark.skipif(not _is_dnf(), reason="DNF only supported on CentOS 8")
def test_dnf_custom_package_repository_is_available(host):
with host.sudo():
@ -166,13 +146,6 @@ def test_dnf_custom_package_repository_is_available(host):
assert host.package("td-agent").is_installed
@pytest.mark.skipif(not _is_yum(), reason="YUM only supported on CentOS 7")
def test_yum_cron(host):
assert host.package("yum-cron").is_installed
assert host.service("yum-cron").is_enabled
assert host.service("yum-cron").is_running
@pytest.mark.skipif(not _is_dnf(), reason="DNF only supported on CentOS 8")
def test_dnf_automatic(host):
assert host.package("dnf-automatic").is_installed

View File

@ -1,9 +0,0 @@
---
fixes:
- |
Fixes an issue where chronyd would be enabled as a systemd service in
addition to ntpd. This causes issues in deployments where the NTP servers
have been customized, as chronyd would win the race on startup, but its
configuration file would not have been configured by Kayobe. `See story
2005272 <https://storyboard.openstack.org/#!/story/2005272>`_ for more
details.

View File

@ -1,8 +1,9 @@
---
upgrade:
- |
Support for configuring an NTP daemon on the seed and overcloud hosts is no
longer present for CentOS 8, as appropriate packages are not available.
Instead, Kolla Ansible is configured to deploy the ``chrony`` container on
overcloud hosts by default. This may be disabled by setting
``kolla_enable_chrony`` to ``false``.
Support for configuring an NTP daemon on the seed, seed hypervisor and
overcloud hosts is no longer present, as appropriate packages are not
available for CentOS 8. Instead, Kolla Ansible is configured to deploy the
``chrony`` container on overcloud hosts by default. This may be disabled by
setting ``kolla_enable_chrony`` to ``false``. There is no support for
running a ``chrony`` container on the seed or seed hypervisor hosts.

View File

@ -0,0 +1,8 @@
---
upgrade:
- |
Removes the workaround for using a tagged VLAN as the admin network
interface on overcloud hosts. This was necessary for CentOS 7.5 which
shipped an old version of ``cloud-init``. The variables
``overcloud_host_image_workaround_cloud_init_enabled`` and
``overcloud_host_image_workaround_cloud_init_repo`` have been removed.

View File

@ -1,7 +0,0 @@
---
fixes:
- |
Fixes an issue where host configuration would fail if
``ntp_service_enabled`` is set to ``false`` or ``kolla_enable_chrony`` is
set to ``true``. See `story 2007384
<https://storyboard.openstack.org/#!/story/2007384>`__ for details.

View File

@ -11,8 +11,6 @@
- src: mrlesmithjr.mdadm
# There are no versioned releases of this role.
version: 5be3ee7d330aa17317897bd104dc87ff0df11915
- src: stackhpc.ntp
version: 2.0.0
- src: singleplatform-eng.users
version: v1.2.5
- src: stackhpc.dell-powerconnect-switch

View File

@ -10,12 +10,12 @@
# NOTE(mgoddard): The CentOS image used in CI has epel-release installed,
# but the configure-mirrors role used by Zuul disables epel. Since we
# install epel-release and expect epel to be enabled, enable it here.
- name: Ensure yum-utils is installed
yum:
name: yum-utils
state: installed
- name: Ensure dnf-plugins-core is installed
package:
name: dnf-plugins-core
state: present
- name: Enable the EPEL yum repository
command: yum-config-manager --enable epel
- name: Enable the EPEL repository
command: dnf config-manager --enable epel
when: ansible_os_family == 'RedHat'
become: true

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
"""
Sphinx documentation style checker.

View File

@ -1,10 +1,4 @@
---
- nodeset:
name: kayobe-centos
nodes:
- name: primary
label: centos-7
- nodeset:
name: kayobe-centos8
nodes: