Add container engine option to scripts

This patch add a way to choose container engine inside tool and test
scripts. This is in preparation for Podman introduction but still
leaves Docker as default container engine.

Signed-off-by: Martin Hiner <m.hiner@partner.samsung.com>
Change-Id: I395d2bdb0dfb4b325b6ad197c8893c8a0f768324
This commit is contained in:
Martin Hiner 2022-11-21 18:38:57 +01:00
parent 92ddbdfbc1
commit 53e8b80ed3
27 changed files with 237 additions and 99 deletions

View File

@ -1,3 +1,3 @@
--- ---
- name: Destroying all Kolla containers and volumes - name: Destroying all Kolla containers and volumes
script: ../tools/cleanup-containers script: ../tools/cleanup-containers "{{ kolla_container_engine }}"

View File

@ -1,5 +1,5 @@
--- ---
- name: Removing Kolla images - name: Removing Kolla images
script: ../tools/cleanup-images --all script: ../tools/cleanup-images --all -e "{{ kolla_container_engine }}"
when: when:
- destroy_include_images | bool - destroy_include_images | bool

View File

@ -1,3 +1,5 @@
--- ---
- name: Ensure the docker service is running - name: Ensure the docker service is running
environment:
CONTAINER_ENGINE: "{{ kolla_container_engine }}"
script: ../tools/validate-docker-execute.sh script: ../tools/validate-docker-execute.sh

View File

@ -1,3 +1,5 @@
--- ---
- name: Ensure the docker service is running - name: Ensure the docker service is running
environment:
CONTAINER_ENGINE: "{{ kolla_container_engine }}"
script: ../tools/validate-docker-execute.sh script: ../tools/validate-docker-execute.sh

View File

@ -7,7 +7,22 @@ set -o errexit
export PYTHONUNBUFFERED=1 export PYTHONUNBUFFERED=1
check_failure() { check_podman_failures() {
failed_containers=$(sudo podman ps -a --format "{{.Names}}" \
--filter status=created \
--filter status=paused \
--filter status=exited \
--filter status=unknown)
}
check_podman_unhealthies() {
unhealthy_containers=$(sudo podman ps -a --format "{{.Names}}" \
--filter health=unhealthy)
}
check_docker_failures() {
# All docker container's status are created, restarting, running, removing, # All docker container's status are created, restarting, running, removing,
# paused, exited and dead. Containers without running status are treated as # paused, exited and dead. Containers without running status are treated as
# failure. removing is added in docker 1.13, just ignore it now. # failure. removing is added in docker 1.13, just ignore it now.
@ -19,9 +34,26 @@ check_failure() {
--filter status=paused \ --filter status=paused \
--filter status=exited \ --filter status=exited \
--filter status=dead) --filter status=dead)
}
check_docker_unhealthies() {
unhealthy_containers=$(sudo docker ps -a --format "{{.Names}}" \ unhealthy_containers=$(sudo docker ps -a --format "{{.Names}}" \
--filter health=unhealthy) --filter health=unhealthy)
}
check_failure() {
if [ "$CONTAINER_ENGINE" = "docker" ]; then
check_docker_failures
check_docker_unhealthies
elif [ "$CONTAINER_ENGINE" = "podman" ]; then
check_podman_failures
check_podman_unhealthies
else
echo "Invalid container engine: ${CONTAINER_ENGINE}"
exit 1
fi
if [[ -n "$unhealthy_containers" ]]; then if [[ -n "$unhealthy_containers" ]]; then
exit 1; exit 1;

View File

@ -24,7 +24,7 @@ function deploy_tenks_logged {
# Script installed onto the host to fool tenks into using the containerised # Script installed onto the host to fool tenks into using the containerised
# Open vSwitch rather than installing its own. # Open vSwitch rather than installing its own.
sudo docker exec openvswitch_vswitchd ovs-vsctl "\$@" sudo ${CONTAINER_ENGINE} exec openvswitch_vswitchd ovs-vsctl "\$@"
EOF EOF
sudo chmod 755 /usr/bin/ovs-vsctl sudo chmod 755 /usr/bin/ovs-vsctl
@ -57,7 +57,7 @@ EOF
function deploy_tenks { function deploy_tenks {
echo "Configuring virtual bare metal via Tenks" echo "Configuring virtual bare metal via Tenks"
deploy_tenks_logged > /tmp/logs/ansible/deploy-tenks 2>&1 deploy_tenks_logged $1 > /tmp/logs/ansible/deploy-tenks 2>&1
result=$? result=$?
if [[ $result != 0 ]]; then if [[ $result != 0 ]]; then
echo "Deploying tenks failed. See ansible/deploy-tenks for details" echo "Deploying tenks failed. See ansible/deploy-tenks for details"
@ -67,4 +67,4 @@ function deploy_tenks {
return $result return $result
} }
deploy_tenks deploy_tenks $1

View File

@ -5,12 +5,25 @@ set +o errexit
copy_logs() { copy_logs() {
LOG_DIR=${LOG_DIR:-/tmp/logs} LOG_DIR=${LOG_DIR:-/tmp/logs}
cp -rnL /var/lib/docker/volumes/kolla_logs/_data/* ${LOG_DIR}/kolla/ if [ "$CONTAINER_ENGINE" = "docker" ]; then
VOLUMES_DIR="/var/lib/docker/volumes"
elif [ "$CONTAINER_ENGINE" = "podman" ]; then
VOLUMES_DIR="/var/lib/containers/storage/volumes"
else
echo "Invalid container engine: ${CONTAINER_ENGINE}"
exit 1
fi
cp -rnL ${VOLUMES_DIR}/kolla_logs/_data/* ${LOG_DIR}/kolla/
cp -rnL /etc/kolla/* ${LOG_DIR}/kolla_configs/ cp -rnL /etc/kolla/* ${LOG_DIR}/kolla_configs/
# Don't save the IPA images. # Don't save the IPA images.
rm ${LOG_DIR}/kolla_configs/config/ironic/ironic-agent.{kernel,initramfs} rm ${LOG_DIR}/kolla_configs/config/ironic/ironic-agent.{kernel,initramfs}
mkdir ${LOG_DIR}/system_configs/ mkdir ${LOG_DIR}/system_configs/
cp -rL /etc/{hostname,hosts,host.conf,resolv.conf,nsswitch.conf,docker,systemd} ${LOG_DIR}/system_configs/ cp -rL /etc/{hostname,hosts,host.conf,resolv.conf,nsswitch.conf,systemd} ${LOG_DIR}/system_configs/
# copy docker configs if used
if [ "$CONTAINER_ENGINE" = "docker" ]; then
cp -rL /etc/docker/ ${LOG_DIR}/system_configs/
fi
# Remove /var/log/kolla link to not double the data uploaded # Remove /var/log/kolla link to not double the data uploaded
unlink /var/log/kolla unlink /var/log/kolla
cp -rvnL /var/log/* ${LOG_DIR}/system_logs/ cp -rvnL /var/log/* ${LOG_DIR}/system_logs/
@ -18,11 +31,15 @@ copy_logs() {
if [[ -x "$(command -v journalctl)" ]]; then if [[ -x "$(command -v journalctl)" ]]; then
journalctl --no-pager > ${LOG_DIR}/system_logs/syslog.txt journalctl --no-pager > ${LOG_DIR}/system_logs/syslog.txt
journalctl --no-pager -u docker.service > ${LOG_DIR}/system_logs/docker.log journalctl --no-pager -u ${CONTAINER_ENGINE}.service > ${LOG_DIR}/system_logs/${CONTAINER_ENGINE}.log
if [ "$CONTAINER_ENGINE" = "docker" ]; then
journalctl --no-pager -u containerd.service > ${LOG_DIR}/system_logs/containerd.log journalctl --no-pager -u containerd.service > ${LOG_DIR}/system_logs/containerd.log
fi
else else
if [ "$CONTAINER_ENGINE" = "docker" ]; then
cp /var/log/upstart/docker.log ${LOG_DIR}/system_logs/docker.log cp /var/log/upstart/docker.log ${LOG_DIR}/system_logs/docker.log
fi fi
fi
cp -r /etc/sudoers.d ${LOG_DIR}/system_logs/ cp -r /etc/sudoers.d ${LOG_DIR}/system_logs/
cp /etc/sudoers ${LOG_DIR}/system_logs/sudoers.txt cp /etc/sudoers ${LOG_DIR}/system_logs/sudoers.txt
@ -81,8 +98,12 @@ copy_logs() {
# final memory usage and process list # final memory usage and process list
ps -eo user,pid,ppid,lwp,%cpu,%mem,size,rss,cmd > ${LOG_DIR}/system_logs/ps.txt ps -eo user,pid,ppid,lwp,%cpu,%mem,size,rss,cmd > ${LOG_DIR}/system_logs/ps.txt
# docker related information # container engine related information
(docker info && docker images && docker ps -a && docker network ls && docker inspect $(docker ps -aq)) > ${LOG_DIR}/system_logs/docker-info.txt (${CONTAINER_ENGINE} info &&
${CONTAINER_ENGINE} images &&
${CONTAINER_ENGINE} ps -a &&
${CONTAINER_ENGINE} network ls &&
${CONTAINER_ENGINE} inspect $(${CONTAINER_ENGINE} ps -aq)) > ${LOG_DIR}/system_logs/${CONTAINER_ENGINE}-info.txt
# save dbus services # save dbus services
dbus-send --system --print-reply --dest=org.freedesktop.DBus /org/freedesktop/DBus org.freedesktop.DBus.ListNames > ${LOG_DIR}/system_logs/dbus-services.txt dbus-send --system --print-reply --dest=org.freedesktop.DBus /org/freedesktop/DBus org.freedesktop.DBus.ListNames > ${LOG_DIR}/system_logs/dbus-services.txt
@ -98,28 +119,28 @@ copy_logs() {
fi fi
# bifrost related logs # bifrost related logs
if [[ $(docker ps --filter name=bifrost_deploy --format "{{.Names}}") ]]; then if [[ $(${CONTAINER_ENGINE} ps --filter name=bifrost_deploy --format "{{.Names}}") ]]; then
for service in dnsmasq ironic ironic-api ironic-conductor ironic-inspector mariadb nginx; do for service in dnsmasq ironic ironic-api ironic-conductor ironic-inspector mariadb nginx; do
mkdir -p ${LOG_DIR}/kolla/$service mkdir -p ${LOG_DIR}/kolla/$service
docker exec bifrost_deploy systemctl status $service > ${LOG_DIR}/kolla/$service/systemd-status-$service.txt ${CONTAINER_ENGINE} exec bifrost_deploy systemctl status $service > ${LOG_DIR}/kolla/$service/systemd-status-$service.txt
done done
docker exec bifrost_deploy journalctl -u mariadb > ${LOG_DIR}/kolla/mariadb/mariadb.txt ${CONTAINER_ENGINE} exec bifrost_deploy journalctl -u mariadb > ${LOG_DIR}/kolla/mariadb/mariadb.txt
fi fi
# haproxy related logs # haproxy related logs
if [[ $(docker ps --filter name=haproxy --format "{{.Names}}") ]]; then if [[ $(${CONTAINER_ENGINE} ps --filter name=haproxy --format "{{.Names}}") ]]; then
mkdir -p ${LOG_DIR}/kolla/haproxy mkdir -p ${LOG_DIR}/kolla/haproxy
docker exec haproxy bash -c 'echo show stat | socat stdio /var/lib/kolla/haproxy/haproxy.sock' > ${LOG_DIR}/kolla/haproxy/stats.txt ${CONTAINER_ENGINE} exec haproxy bash -c 'echo show stat | socat stdio /var/lib/kolla/haproxy/haproxy.sock' > ${LOG_DIR}/kolla/haproxy/stats.txt
fi fi
# FIXME: remove # FIXME: remove
if [[ $(docker ps -a --filter name=ironic_inspector --format "{{.Names}}") ]]; then if [[ $(${CONTAINER_ENGINE} ps -a --filter name=ironic_inspector --format "{{.Names}}") ]]; then
mkdir -p ${LOG_DIR}/kolla/ironic-inspector mkdir -p ${LOG_DIR}/kolla/ironic-inspector
ls -lR /var/lib/docker/volumes/ironic_inspector_dhcp_hosts > ${LOG_DIR}/kolla/ironic-inspector/var-lib-ls.txt ls -lR ${VOLUMES_DIR}/ironic_inspector_dhcp_hosts > ${LOG_DIR}/kolla/ironic-inspector/var-lib-ls.txt
fi fi
for container in $(docker ps -a --format "{{.Names}}"); do for container in $(${CONTAINER_ENGINE} ps -a --format "{{.Names}}"); do
docker logs --timestamps --tail all ${container} &> ${LOG_DIR}/docker_logs/${container}.txt ${CONTAINER_ENGINE} logs --timestamps --tail all ${container} &> ${LOG_DIR}/container_logs/${container}.txt
done done
# Rename files to .txt; this is so that when displayed via # Rename files to .txt; this is so that when displayed via
@ -128,7 +149,7 @@ copy_logs() {
# download it, etc. # download it, etc.
# Rename all .log files to .txt files # Rename all .log files to .txt files
for f in $(find ${LOG_DIR}/{system_logs,kolla,docker_logs} -name "*.log"); do for f in $(find ${LOG_DIR}/{system_logs,kolla,${CONTAINER_ENGINE}_logs} -name "*.log"); do
mv $f ${f/.log/.txt} mv $f ${f/.log/.txt}
done done

View File

@ -10,7 +10,7 @@ function init_swift_logged {
# the order is important due to port incrementation # the order is important due to port incrementation
for ring in object account container; do for ring in object account container; do
# create the *.builder files # create the *.builder files
sudo docker run \ sudo ${CONTAINER_ENGINE} run \
--rm \ --rm \
-v /etc/kolla/config/swift/:/etc/kolla/config/swift/ \ -v /etc/kolla/config/swift/:/etc/kolla/config/swift/ \
$KOLLA_SWIFT_BASE_IMAGE \ $KOLLA_SWIFT_BASE_IMAGE \
@ -19,7 +19,7 @@ function init_swift_logged {
# add nodes to them # add nodes to them
for node in ${STORAGE_NODES[@]}; do for node in ${STORAGE_NODES[@]}; do
sudo docker run \ sudo ${CONTAINER_ENGINE} run \
--rm \ --rm \
-v /etc/kolla/config/swift/:/etc/kolla/config/swift/ \ -v /etc/kolla/config/swift/:/etc/kolla/config/swift/ \
$KOLLA_SWIFT_BASE_IMAGE \ $KOLLA_SWIFT_BASE_IMAGE \
@ -28,7 +28,7 @@ function init_swift_logged {
done done
# create the *.ring.gz files # create the *.ring.gz files
sudo docker run \ sudo ${CONTAINER_ENGINE} run \
--rm \ --rm \
-v /etc/kolla/config/swift/:/etc/kolla/config/swift/ \ -v /etc/kolla/config/swift/:/etc/kolla/config/swift/ \
$KOLLA_SWIFT_BASE_IMAGE \ $KOLLA_SWIFT_BASE_IMAGE \
@ -36,7 +36,7 @@ function init_swift_logged {
/etc/kolla/config/swift/$ring.builder rebalance /etc/kolla/config/swift/$ring.builder rebalance
# display contents for debugging # display contents for debugging
sudo docker run \ sudo ${CONTAINER_ENGINE} run \
--rm \ --rm \
-v /etc/kolla/config/swift/:/etc/kolla/config/swift/ \ -v /etc/kolla/config/swift/:/etc/kolla/config/swift/ \
$KOLLA_SWIFT_BASE_IMAGE \ $KOLLA_SWIFT_BASE_IMAGE \

View File

@ -69,6 +69,7 @@
environment: environment:
LOG_DIR: "{{ logs_dir }}/pre" LOG_DIR: "{{ logs_dir }}/pre"
KOLLA_INTERNAL_VIP_ADDRESS: "{{ kolla_internal_vip_address }}" KOLLA_INTERNAL_VIP_ADDRESS: "{{ kolla_internal_vip_address }}"
CONTAINER_ENGINE: "{{ container_engine }}"
script: get_logs.sh script: get_logs.sh
register: get_logs_result register: get_logs_result
become: true become: true
@ -84,7 +85,7 @@
state: "directory" state: "directory"
mode: 0777 mode: 0777
with_items: with_items:
- "docker_logs" - "container_logs"
- "kolla_configs" - "kolla_configs"
- "system_logs" - "system_logs"
- "kolla" - "kolla"

View File

@ -132,6 +132,7 @@
path: "/etc/docker" path: "/etc/docker"
state: directory state: directory
become: true become: true
when: container_engine == 'docker'
- name: Ensure configuration directories exist - name: Ensure configuration directories exist
file: file:
@ -321,6 +322,7 @@
SCENARIO: "{{ scenario }}" SCENARIO: "{{ scenario }}"
UPPER_CONSTRAINTS: "{{ upper_constraints_file }}" UPPER_CONSTRAINTS: "{{ upper_constraints_file }}"
KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}"
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run init-swift.sh script - name: Run init-swift.sh script
script: script:
@ -333,6 +335,7 @@
STORAGE_NODES: "{{ groups['all'] | map('extract', hostvars, STORAGE_NODES: "{{ groups['all'] | map('extract', hostvars,
['ansible_'+api_interface_name, 'ipv4', 'address']) ['ansible_'+api_interface_name, 'ipv4', 'address'])
| join(' ') }}" | join(' ') }}"
CONTAINER_ENGINE: "{{ container_engine }}"
when: scenario == 'swift' when: scenario == 'swift'
# At this point we have generated all necessary configuration, and are # At this point we have generated all necessary configuration, and are
@ -439,6 +442,7 @@
when: scenario == 'zun' when: scenario == 'zun'
environment: environment:
BASE_DISTRO: "{{ base_distro }}" BASE_DISTRO: "{{ base_distro }}"
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run test-swift.sh script - name: Run test-swift.sh script
script: script:
@ -466,6 +470,7 @@
# by default to ~/tenks-venv # by default to ~/tenks-venv
TENKS_VENV_PATH: "{{ ansible_env.HOME }}/kolla-ansible-tenks-venv" TENKS_VENV_PATH: "{{ ansible_env.HOME }}/kolla-ansible-tenks-venv"
TENKS_SRC_PATH: "{{ ansible_env.HOME }}/src/opendev.org/openstack/tenks" TENKS_SRC_PATH: "{{ ansible_env.HOME }}/src/opendev.org/openstack/tenks"
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run test-ironic.sh script - name: Run test-ironic.sh script
script: script:
@ -498,6 +503,8 @@
executable: /bin/bash executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}" chdir: "{{ kolla_ansible_src_dir }}"
when: scenario == "masakari" when: scenario == "masakari"
environment:
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run test-ovn.sh script - name: Run test-ovn.sh script
script: script:
@ -505,6 +512,8 @@
executable: /bin/bash executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}" chdir: "{{ kolla_ansible_src_dir }}"
when: scenario == "ovn" when: scenario == "ovn"
environment:
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run test-mariadb.sh script - name: Run test-mariadb.sh script
script: script:
@ -514,6 +523,7 @@
when: scenario == "mariadb" when: scenario == "mariadb"
environment: environment:
KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}"
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run test-prometheus-opensearch.sh script - name: Run test-prometheus-opensearch.sh script
script: script:
@ -545,6 +555,8 @@
cmd: tests/check-failure.sh cmd: tests/check-failure.sh
executable: /bin/bash executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}" chdir: "{{ kolla_ansible_src_dir }}"
environment:
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run pre-upgrade check-config.sh script - name: Run pre-upgrade check-config.sh script
shell: shell:
@ -735,6 +747,8 @@
cmd: tests/test-bifrost.sh cmd: tests/test-bifrost.sh
executable: /bin/bash executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}" chdir: "{{ kolla_ansible_src_dir }}"
environment:
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run upgrade-bifrost.sh script - name: Run upgrade-bifrost.sh script
shell: shell:
@ -756,6 +770,8 @@
cmd: tests/check-failure.sh cmd: tests/check-failure.sh
executable: /bin/bash executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}" chdir: "{{ kolla_ansible_src_dir }}"
environment:
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run check-config.sh script - name: Run check-config.sh script
shell: shell:
@ -794,6 +810,8 @@
cmd: tests/check-failure.sh cmd: tests/check-failure.sh
executable: /bin/bash executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}" chdir: "{{ kolla_ansible_src_dir }}"
environment:
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run check-config.sh script - name: Run check-config.sh script
shell: shell:

View File

@ -109,7 +109,7 @@ EOF
mkdir -p /tmp/logs/build mkdir -p /tmp/logs/build
sudo docker run -d --net=host -e REGISTRY_HTTP_ADDR=0.0.0.0:4000 --restart=always -v /opt/kolla_registry/:/var/lib/registry --name registry registry:2 sudo $CONTAINER_ENGINE run -d --net=host -e REGISTRY_HTTP_ADDR=0.0.0.0:4000 --restart=always -v /opt/kolla_registry/:/var/lib/registry --name registry registry:2
python3 -m venv ~/kolla-venv python3 -m venv ~/kolla-venv
. ~/kolla-venv/bin/activate . ~/kolla-venv/bin/activate
@ -121,8 +121,8 @@ EOF
# NOTE(yoctozepto): due to debian buster we push after images are built # NOTE(yoctozepto): due to debian buster we push after images are built
# see https://github.com/docker/for-linux/issues/711 # see https://github.com/docker/for-linux/issues/711
if [[ "debian" == $BASE_DISTRO ]]; then if [[ "debian" == $BASE_DISTRO ]]; then
for img in $(sudo docker image ls --format '{{ .Repository }}:{{ .Tag }}' | grep lokolla/); do for img in $(sudo ${CONTAINER_ENGINE} image ls --format '{{ .Repository }}:{{ .Tag }}' | grep lokolla/); do
sudo docker push $img; sudo $CONTAINER_ENGINE push $img;
done done
fi fi

View File

@ -13,6 +13,7 @@ kolla_base_distro: "{{ base_distro }}"
# Zed dropped install_type so we have it only on upgrades # Zed dropped install_type so we have it only on upgrades
network_interface: "{{ api_interface_name }}" network_interface: "{{ api_interface_name }}"
network_address_family: "{{ address_family }}" network_address_family: "{{ address_family }}"
kolla_container_engine: "{{ container_engine }}"
docker_restart_policy: "no" docker_restart_policy: "no"
docker_custom_config: docker_custom_config:
debug: true debug: true

View File

@ -8,11 +8,13 @@ export PYTHONUNBUFFERED=1
function test_bifrost { function test_bifrost {
container_engine="${1:-docker}"
# TODO(mgoddard): More testing, deploy bare metal nodes. # TODO(mgoddard): More testing, deploy bare metal nodes.
# TODO(mgoddard): Use openstackclient when clouds.yaml works. See # TODO(mgoddard): Use openstackclient when clouds.yaml works. See
# https://bugs.launchpad.net/bifrost/+bug/1754070. # https://bugs.launchpad.net/bifrost/+bug/1754070.
attempts=0 attempts=0
while [[ $(sudo docker exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal driver list -f value" | wc -l) -eq 0 ]]; do while [[ $(sudo ${container_engine} exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal driver list -f value" | wc -l) -eq 0 ]]; do
attempts=$((attempts + 1)) attempts=$((attempts + 1))
if [[ $attempts -gt 6 ]]; then if [[ $attempts -gt 6 ]]; then
echo "Timed out waiting for ironic conductor to become active" echo "Timed out waiting for ironic conductor to become active"
@ -20,10 +22,10 @@ function test_bifrost {
fi fi
sleep 10 sleep 10
done done
sudo docker exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal node list" sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal node list"
sudo docker exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal node create --driver ipmi --name test-node" sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal node create --driver ipmi --name test-node"
sudo docker exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal node delete test-node" sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal node delete test-node"
} }
test_bifrost test_bifrost $1

View File

@ -12,7 +12,7 @@ export PYTHONUNBUFFERED=1
function mariadb_stop { function mariadb_stop {
echo "Stopping the database cluster" echo "Stopping the database cluster"
kolla-ansible -i ${RAW_INVENTORY} -vvv stop --yes-i-really-really-mean-it --tags mariadb --skip-tags common kolla-ansible -i ${RAW_INVENTORY} -vvv stop --yes-i-really-really-mean-it --tags mariadb --skip-tags common
if [[ $(sudo docker ps -q | grep mariadb | wc -l) -ne 0 ]]; then if [[ $(sudo ${container_engine} ps -q | grep mariadb | wc -l) -ne 0 ]]; then
echo "Failed to stop MariaDB cluster" echo "Failed to stop MariaDB cluster"
return 1 return 1
fi fi
@ -48,4 +48,5 @@ function test_mariadb {
return $result return $result
} }
container_engine="${1:-docker}"
test_mariadb test_mariadb

View File

@ -8,6 +8,7 @@ set -o pipefail
export PYTHONUNBUFFERED=1 export PYTHONUNBUFFERED=1
function test_hacluster_logged { function test_hacluster_logged {
container_engine="${1:-docker}"
local cluster_failure local cluster_failure
cluster_failure=0 cluster_failure=0
@ -17,17 +18,17 @@ function test_hacluster_logged {
# var setting from the container which would cause these commands to log up # var setting from the container which would cause these commands to log up
# to 'trace' (likely a pacemaker bug) # to 'trace' (likely a pacemaker bug)
if ! sudo docker exec hacluster_pacemaker cibadmin -VVVVVV --query --local; then if ! sudo ${container_engine} exec hacluster_pacemaker cibadmin -VVVVVV --query --local; then
cluster_failure=1 cluster_failure=1
fi fi
local mon_output local mon_output
if ! mon_output=$(sudo docker exec -e PCMK_debug=no hacluster_pacemaker crm_mon -VVVVV --one-shot); then if ! mon_output=$(sudo ${container_engine} exec -e PCMK_debug=no hacluster_pacemaker crm_mon -VVVVV --one-shot); then
cluster_failure=1 cluster_failure=1
fi fi
if ! sudo docker exec -e PCMK_debug=no hacluster_pacemaker crm_verify -VVVVV --live-check; then if ! sudo ${container_engine} exec -e PCMK_debug=no hacluster_pacemaker crm_verify -VVVVV --live-check; then
cluster_failure=1 cluster_failure=1
fi fi
@ -81,7 +82,7 @@ function test_masakari_logged {
function test_masakari { function test_masakari {
echo "Testing Masakari" echo "Testing Masakari"
test_hacluster_logged > /tmp/logs/ansible/test-hacluster 2>&1 test_hacluster_logged $1 > /tmp/logs/ansible/test-hacluster 2>&1
test_masakari_logged > /tmp/logs/ansible/test-masakari 2>&1 test_masakari_logged > /tmp/logs/ansible/test-masakari 2>&1
result=$? result=$?
if [[ $result != 0 ]]; then if [[ $result != 0 ]]; then
@ -92,4 +93,4 @@ function test_masakari {
return $result return $result
} }
test_masakari test_masakari $1

View File

@ -15,18 +15,18 @@ function test_ovn {
# List OVN NB/SB entries # List OVN NB/SB entries
echo "OVN NB DB entries:" echo "OVN NB DB entries:"
sudo docker exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" show sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" show
echo "OVN SB DB entries:" echo "OVN SB DB entries:"
sudo docker exec ovn_northd ovn-sbctl --db "$ovn_sb_connection" show sudo ${container_engine} exec ovn_northd ovn-sbctl --db "$ovn_sb_connection" show
# Test OVSDB cluster state # Test OVSDB cluster state
if [[ $BASE_DISTRO =~ ^(debian|ubuntu)$ ]]; then if [[ $BASE_DISTRO =~ ^(debian|ubuntu)$ ]]; then
OVNNB_STATUS=$(sudo docker exec ovn_nb_db ovs-appctl -t /var/run/openvswitch/ovnnb_db.ctl cluster/status OVN_Northbound) OVNNB_STATUS=$(sudo ${container_engine} exec ovn_nb_db ovs-appctl -t /var/run/openvswitch/ovnnb_db.ctl cluster/status OVN_Northbound)
OVNSB_STATUS=$(sudo docker exec ovn_sb_db ovs-appctl -t /var/run/openvswitch/ovnsb_db.ctl cluster/status OVN_Southbound) OVNSB_STATUS=$(sudo ${container_engine} exec ovn_sb_db ovs-appctl -t /var/run/openvswitch/ovnsb_db.ctl cluster/status OVN_Southbound)
else else
OVNNB_STATUS=$(sudo docker exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound) OVNNB_STATUS=$(sudo ${container_engine} exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound)
OVNSB_STATUS=$(sudo docker exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound) OVNSB_STATUS=$(sudo ${container_engine} exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound)
fi fi
if [[ $(grep -o "at tcp:" <<< ${OVNNB_STATUS} | wc -l) != "3" ]]; then if [[ $(grep -o "at tcp:" <<< ${OVNNB_STATUS} | wc -l) != "3" ]]; then
@ -84,9 +84,9 @@ function test_octavia {
openstack floating ip set $lb_fip --port $lb_port_id openstack floating ip set $lb_fip --port $lb_port_id
echo "OVN NB entries for LB:" echo "OVN NB entries for LB:"
sudo docker exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list load_balancer sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list load_balancer
echo "OVN NB entries for NAT:" echo "OVN NB entries for NAT:"
sudo docker exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list nat sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list nat
echo "Attempt to access the load balanced HTTP server." echo "Attempt to access the load balanced HTTP server."
attempts=12 attempts=12
@ -133,5 +133,5 @@ function test_ovn_setup {
} }
container_engine=${1:-docker}
test_ovn_setup test_ovn_setup

View File

@ -9,12 +9,14 @@ function test_zun_logged {
. /etc/kolla/admin-openrc.sh . /etc/kolla/admin-openrc.sh
. ~/openstackclient-venv/bin/activate . ~/openstackclient-venv/bin/activate
container_engine="${1:-docker}"
echo "TESTING: Zun" echo "TESTING: Zun"
openstack appcontainer service list openstack appcontainer service list
openstack appcontainer host list openstack appcontainer host list
openstack subnet set --no-dhcp demo-subnet openstack subnet set --no-dhcp demo-subnet
sudo docker pull alpine sudo ${container_engine} pull alpine
sudo docker save alpine | openstack image create alpine --public --container-format docker --disk-format raw sudo ${container_engine} save alpine | openstack image create alpine --public --container-format docker --disk-format raw
openstack appcontainer run --net network=demo-net --name test alpine sleep 1000 openstack appcontainer run --net network=demo-net --name test alpine sleep 1000
attempt=1 attempt=1
while [[ $(openstack appcontainer show test -f value -c status) != "Running" ]]; do while [[ $(openstack appcontainer show test -f value -c status) != "Running" ]]; do
@ -160,7 +162,7 @@ function test_zun {
if [[ -f $log_file ]]; then if [[ -f $log_file ]]; then
log_file=${log_file}-upgrade log_file=${log_file}-upgrade
fi fi
test_zun_logged > $log_file 2>&1 test_zun_logged $1 > $log_file 2>&1
result=$? result=$?
if [[ $result != 0 ]]; then if [[ $result != 0 ]]; then
echo "Testing Zun failed. See ansible/test-zun for details" echo "Testing Zun failed. See ansible/test-zun for details"
@ -170,4 +172,4 @@ function test_zun {
return $result return $result
} }
test_zun test_zun $1

View File

@ -1,32 +1,43 @@
#!/usr/bin/env bash #!/usr/bin/env bash
containers_running=$(sudo docker ps --filter "label=kolla_version" --format "{{.Names}}") # default to docker if not specified
engine="${1:-docker}"
shift 1
if ! [[ "$engine" =~ ^(docker|podman)$ ]]; then
echo "Invalid container engine: ${engine}"
exit 1
fi
echo "Using container engine: $engine"
containers_running=$(sudo $engine ps --filter "label=kolla_version" --format "{{.Names}}")
QEMU_PIDS=$(pgrep -l qemu | awk '!/qemu-ga/ && !/qemu-img/ {print $1}') QEMU_PIDS=$(pgrep -l qemu | awk '!/qemu-ga/ && !/qemu-img/ {print $1}')
if [[ "${containers_running}" =~ "nova_libvirt" ]] && [[ $QEMU_PIDS ]] && [[ $(ps --no-headers wwwup $QEMU_PIDS | grep --invert-match '\-xen\-domid 0') ]]; then if [[ "${containers_running}" =~ "nova_libvirt" ]] && [[ $QEMU_PIDS ]] && [[ $(ps --no-headers wwwup $QEMU_PIDS | grep --invert-match '\-xen\-domid 0') ]]; then
echo "Some qemu processes were detected." echo "Some qemu processes were detected."
echo "Docker will not be able to stop the nova_libvirt container with those running." echo "Container engine ($engine) will not be able to stop the nova_libvirt container with those running."
echo "Please clean them up before rerunning this script." echo "Please clean them up before rerunning this script."
exit 1 exit 1
fi fi
if [ -n "$1" ]; then if [ -n "$1" ]; then
containers_to_kill=$(sudo docker ps --filter "label=kolla_version" --format "{{.Names}}" -a | grep -E "$1" | awk '{print $1}') containers_to_kill=$(sudo $engine ps --filter "label=kolla_version" --format "{{.Names}}" -a | grep -E "$1" | awk '{print $1}')
volumes_to_remove=$(sudo docker inspect -f '{{range .Mounts}} {{printf "%s\n" .Name }}{{end}}' ${containers_to_kill} | \ volumes_to_remove=$(sudo $engine inspect -f '{{range .Mounts}} {{printf "%s\n" .Name }}{{end}}' ${containers_to_kill} | \
egrep -v '(^\s*$)' | sort | uniq) egrep -v '(^\s*$)' | sort | uniq)
else else
containers_to_kill=$(sudo docker ps --filter "label=kolla_version" --format "{{.Names}}" -a) containers_to_kill=$(sudo $engine ps --filter "label=kolla_version" --format "{{.Names}}" -a)
volumes_to_remove=$(sudo docker inspect -f '{{range .Mounts}} {{printf "%s\n" .Name }}{{end}}' ${containers_to_kill} | \ volumes_to_remove=$(sudo $engine inspect -f '{{range .Mounts}} {{printf "%s\n" .Name }}{{end}}' ${containers_to_kill} | \
egrep -v '(^\s*$)' | sort | uniq) egrep -v '(^\s*$)' | sort | uniq)
fi fi
if [[ "${containers_to_kill}" =~ "openvswitch_vswitchd" ]] && [[ "${containers_running}" =~ "neutron_openvswitch_agent" ]]; then if [[ "${containers_to_kill}" =~ "openvswitch_vswitchd" ]] && [[ "${containers_running}" =~ "neutron_openvswitch_agent" ]]; then
echo "Removing ovs bridge..." echo "Removing ovs bridge..."
(sudo docker exec -u root neutron_openvswitch_agent neutron-ovs-cleanup \ (sudo $engine exec -u root neutron_openvswitch_agent neutron-ovs-cleanup \
--config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini \ --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini \
--ovs_all_ports) > /dev/null --ovs_all_ports) > /dev/null
(sudo docker exec -it openvswitch_vswitchd bash -c 'for br in `ovs-vsctl list-br`;do ovs-vsctl --if-exists del-br $br;done') > /dev/null (sudo $engine exec -it openvswitch_vswitchd bash -c 'for br in `ovs-vsctl list-br`;do ovs-vsctl --if-exists del-br $br;done') > /dev/null
fi fi
echo "Stopping containers..." echo "Stopping containers..."
@ -35,15 +46,15 @@ sudo systemctl stop kolla-${container}-container.service
done done
echo "Removing containers..." echo "Removing containers..."
(sudo docker rm -v -f ${containers_to_kill} 2>&1) > /dev/null (sudo $engine rm -v -f ${containers_to_kill} 2>&1) > /dev/null
echo "Disconnecting containers from docker host network" echo "Disconnecting containers from $engine host network"
for container in ${containers_to_kill}; do for container in ${containers_to_kill}; do
(sudo docker network disconnect -f host $container 2>&1) > /dev/null (sudo $engine network disconnect -f host $container 2>&1) > /dev/null
done done
echo "Removing volumes..." echo "Removing volumes..."
(sudo docker volume rm ${volumes_to_remove} 2>&1) > /dev/null (sudo $engine volume rm ${volumes_to_remove} 2>&1) > /dev/null
echo "Removing link of kolla_log volume..." echo "Removing link of kolla_log volume..."
(sudo rm -f /var/log/kolla 2>&1) > /dev/null (sudo rm -f /var/log/kolla 2>&1) > /dev/null

View File

@ -1,7 +1,9 @@
#!/usr/bin/env bash #!/usr/bin/env bash
container_engine="docker"
# Move to top level directory # Move to top level directory
REAL_PATH=$(python -c "import os;print os.path.realpath('$0')") REAL_PATH=$(python3 -c "import os;print(os.path.realpath('$0'))")
cd "$(dirname "$REAL_PATH")/.." cd "$(dirname "$REAL_PATH")/.."
function process_cmd { function process_cmd {
@ -28,34 +30,50 @@ Options:
--help, -h Show this usage information --help, -h Show this usage information
--image, -i <image> Delete selected images --image, -i <image> Delete selected images
--image-version <image_version> Set Kolla image version --image-version <image_version> Set Kolla image version
--engine, -e <container_engine> Container engine to be used
EOF EOF
} }
SHORT_OPTS="ahi:" SHORT_OPTS="ahi:e:"
LONG_OPTS="all,dangling,help,image:,image-version:" LONG_OPTS="all,dangling,help,image:,image-version:,engine:"
ARGS=$(getopt -o "${SHORT_OPTS}" -l "${LONG_OPTS}" --name "$0" -- "$@") || { usage >&2; exit 2; } ARGS=$(getopt -o "${SHORT_OPTS}" -l "${LONG_OPTS}" --name "$0" -- "$@") || { usage >&2; exit 2; }
eval set -- "$ARGS"
for arg do
shift
if [ "$arg" = "-e" ] || [ "$arg" = "--engine" ]; then
container_engine="$1"
continue
elif [ "$arg" = "$container_engine" ]; then
continue
fi
eval set -- "$@" "$arg"
done
# catch empty arguments
if [ "$ARGS" = " --" ]; then
eval set -- "$ARGS"
fi
case "$1" in case "$1" in
(--all|-a) (--all|-a)
KOLLA_IMAGES="$(sudo docker images -a --filter "label=kolla_version" --format "{{.ID}}")" KOLLA_IMAGES="$(sudo ${container_engine} images -a --filter "label=kolla_version" --format "{{.ID}}")"
shift shift
;; ;;
(--dangling) (--dangling)
KOLLA_IMAGES="$(sudo docker images -a --filter dangling=true --format "{{.ID}}")" KOLLA_IMAGES="$(sudo ${container_engine} images -a --filter dangling=true --format "{{.ID}}")"
shift shift
;; ;;
(--image|-i) (--image|-i)
KOLLA_IMAGES="$(sudo docker images -a --filter "label=kolla_version" --format "{{.Repository}}\t{{.ID}}" | grep -E "$2" | awk '{print $2}')" KOLLA_IMAGES="$(sudo ${container_engine} images -a --filter "label=kolla_version" --format "{{.Repository}}\t{{.ID}}" | grep -E "$2" | awk '{print $2}')"
shift 2 shift 2
;; ;;
(--image-version) (--image-version)
KOLLA_IMAGES="$(sudo docker images -a --filter "label=kolla_version=${2}" --format "{{.ID}}")" KOLLA_IMAGES="$(sudo ${container_engine} images -a --filter "label=kolla_version=${2}" --format "{{.ID}}")"
shift 2 shift 2
;; ;;
@ -73,5 +91,5 @@ case "$1" in
esac esac
CMD="sudo docker rmi -f $@ $KOLLA_IMAGES" CMD="sudo ${container_engine} rmi -f $@ -- $KOLLA_IMAGES"
process_cmd process_cmd

View File

@ -1,13 +1,15 @@
#!/usr/bin/env bash #!/usr/bin/env bash
container_engine="${1:-docker}"
echo "##### System Identification #####" echo "##### System Identification #####"
egrep -w 'PRETTY_NAME|VERSION_ID' /etc/os-release egrep -w 'PRETTY_NAME|VERSION_ID' /etc/os-release
echo "##### Docker Version #####" echo "##### ${container_engine^} Version #####"
docker --version $container_engine --version
echo "##### Docker Info #####" echo "##### ${container_engine^} Info #####"
docker info $container_engine info
echo "##### Ansible Version #####" echo "##### Ansible Version #####"
ansible --version ansible --version
@ -19,11 +21,11 @@ echo "##### Globals.yml file #####"
egrep -w 'kolla_base_distro|openstack_release' /etc/kolla/globals.yml egrep -w 'kolla_base_distro|openstack_release' /etc/kolla/globals.yml
cat /etc/kolla/globals.yml | grep ^enable_ cat /etc/kolla/globals.yml | grep ^enable_
echo "##### Docker Images #####" echo "##### ${container_engine^} Images #####"
docker images -a --filter "label=kolla_version" --filter "dangling=false" --format "{{.ID}} - {{.Repository}}:{{.Tag}} - {{.CreatedSince}}" $container_engine images -a --filter "label=kolla_version" --filter "dangling=false" --format "{{.ID}} - {{.Repository}}:{{.Tag}} - {{.CreatedSince}}"
echo "##### All Docker Containers #####" echo "##### All ${container_engine^} Containers #####"
docker ps -a $container_engine ps -a
echo "##### Ip Link Show #####" echo "##### Ip Link Show #####"
ip link show ip link show

View File

@ -0,0 +1,17 @@
[Unit]
Description=Manage libvirt guests in kolla safely
After=systemd-machined.service
After=virt-guest-shutdown.target
Requires=virt-guest-shutdown.target
[Install]
WantedBy=multi-user.target
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStopSec=610
ExecStart=-/usr/bin/podman exec nova_libvirt /bin/rm -f /var/lib/libvirt/libvirt-guests
ExecStart=-/usr/bin/podman start nova_compute
ExecStop=/usr/bin/podman stop nova_compute
ExecStop=/usr/bin/podman exec nova_libvirt /bin/sh -x /usr/libexec/libvirt-guests.sh shutdown

View File

@ -2,6 +2,7 @@
ANSIBLE=`find ansible -type f -exec cat {} \; | wc -l` ANSIBLE=`find ansible -type f -exec cat {} \; | wc -l`
DOCKER=`find docker -type f -exec cat {} \; | wc -l` DOCKER=`find docker -type f -exec cat {} \; | wc -l`
PODMAN=`find podman -type f -exec cat {} \; | wc -l`
DOC=`find doc -type f -exec cat {} \; | wc -l` DOC=`find doc -type f -exec cat {} \; | wc -l`
TESTS=`find tests -type f -exec cat {} \; | wc -l` TESTS=`find tests -type f -exec cat {} \; | wc -l`
BUILD=`find kolla -type f -exec cat {} \; | wc -l` BUILD=`find kolla -type f -exec cat {} \; | wc -l`
@ -12,7 +13,7 @@ ETC=`find etc -type f -exec cat {} \; | wc -l`
TOOLS=`find tools -type f -exec cat {} \; | wc -l` TOOLS=`find tools -type f -exec cat {} \; | wc -l`
VAGRANT=`find contrib/dev/vagrant -type f -exec cat {} \; | wc -l` VAGRANT=`find contrib/dev/vagrant -type f -exec cat {} \; | wc -l`
CORE=$(($ANSIBLE+$DOCKER+$TESTS+$DOCS+$BUILD)) CORE=$(($ANSIBLE+$DOCKER+$PODMAN+$TESTS+$DOCS+$BUILD))
SUPP=$(($DEMOS+$HEAT+$SPECS+$ETC+$TOOLS+$VAGRANT)) SUPP=$(($DEMOS+$HEAT+$SPECS+$ETC+$TOOLS+$VAGRANT))
TOTAL=$(($CORE+$SUPP)) TOTAL=$(($CORE+$SUPP))
@ -23,6 +24,7 @@ echo "Demos $DEMOS"
echo "Doc $DOC" echo "Doc $DOC"
echo "Etc $ETC" echo "Etc $ETC"
echo "Docker $DOCKER" echo "Docker $DOCKER"
echo "Podman $PODMAN"
echo "Specs $SPECS" echo "Specs $SPECS"
echo "Tests $TESTS" echo "Tests $TESTS"
echo "Tools $TOOLS" echo "Tools $TOOLS"

View File

@ -7,7 +7,7 @@
become: true become: true
vars: vars:
systemd_artifacts: systemd_artifacts:
- kolla-libvirt-guests.service - "kolla-libvirt-guests-{{ container_engine }}.service"
- virt-guest-shutdown.target - virt-guest-shutdown.target
tasks: tasks:
- name: Install systemd artifacts - name: Install systemd artifacts

View File

@ -154,7 +154,7 @@ def check_docker_become():
for x in YAML_INCLUDE_PATTERNS]) for x in YAML_INCLUDE_PATTERNS])
excludes = r'|'.join([fnmatch.translate(x) excludes = r'|'.join([fnmatch.translate(x)
for x in YAML_EXCLUDE_PATTERNS]) for x in YAML_EXCLUDE_PATTERNS])
docker_modules = ('kolla_docker', 'kolla_container_facts', 'kolla_toolbox') ce_modules = ('kolla_docker', 'kolla_container_facts', 'kolla_toolbox')
cmd_modules = ('command', 'shell') cmd_modules = ('command', 'shell')
return_code = 0 return_code = 0
roles_path = os.path.join(PROJECT_ROOT, 'ansible', 'roles') roles_path = os.path.join(PROJECT_ROOT, 'ansible', 'roles')
@ -168,24 +168,27 @@ def check_docker_become():
tasks = yaml.safe_load(fp) tasks = yaml.safe_load(fp)
tasks = tasks or [] tasks = tasks or []
for task in tasks: for task in tasks:
for module in docker_modules: for module in ce_modules:
if module in task and not task.get('become'): if module in task and not task.get('become'):
return_code = 1 return_code = 1
LOG.error("Use of %s module without become in " LOG.error("Use of %s module without become in "
"task %s in %s", "task %s in %s",
module, task['name'], fullpath) module, task['name'], fullpath)
for module in cmd_modules: for module in cmd_modules:
docker_without_become = False ce_without_become = False
if (module in task and not task.get('become')): if (module in task and not task.get('become')):
if (isinstance(task[module], str) and if (isinstance(task[module], str) and
(task[module]).startswith('docker')): ((task[module]).startswith('docker') or
docker_without_become = True (task[module]).startswith('podman'))):
ce_without_become = True
if (isinstance(task[module], dict) and if (isinstance(task[module], dict) and
task[module]['cmd'].startswith('docker')): (task[module]['cmd'].startswith('docker') or
docker_without_become = True task[module]['cmd'].startswith('podman'))):
if docker_without_become: ce_without_become = True
if ce_without_become:
return_code = 1 return_code = 1
LOG.error("Use of docker in %s module without " LOG.error("Use of container engine in %s "
"module without "
"become in task %s in %s", "become in task %s in %s",
module, task['name'], fullpath) module, task['name'], fullpath)

View File

@ -1,13 +1,14 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# #
# This script can be used to check user privilege to execute # This script can be used to check user privilege to execute
# docker commands # docker or podman commands depending on CONTAINER_ENGINE
# environment variable
function check_dockerexecute { function check_dockerexecute {
sudo docker ps &>/dev/null sudo $CONTAINER_ENGINE ps &>/dev/null
return_val=$? return_val=$?
if [ $return_val -ne 0 ]; then if [ $return_val -ne 0 ]; then
echo "User $USER can't seem to run Docker commands. Verify product documentation to allow user to execute docker commands" 1>&2 echo "User $USER can't seem to run ${CONTAINER_ENGINE^} commands. Verify product documentation to allow user to execute ${CONTAINER_ENGINE^} commands" 1>&2
exit 1 exit 1
fi fi
} }

View File

@ -28,6 +28,7 @@
- ^zuul\.d/ - ^zuul\.d/
vars: vars:
previous_release: zed previous_release: zed
container_engine: "docker"
scenario: core scenario: core
virt_type: qemu virt_type: qemu
is_upgrade: no is_upgrade: no