Support multi-arch image builds with docker buildx

Docker has experimental support for building multi-arch
container images with a buildx command. Currently it only
supports pushing to a registry after running and the images
don't end up in the local docker images list. To work around
that, push to the buildset registry then pull back. This
is the inverse of the normal case where we build, then
retag, then push. The end result should be the same.

Change-Id: I6a4c4f9e262add909d2d5c2efa33ec69b9d9364a
This commit is contained in:
Monty Taylor 2020-04-23 09:46:29 -05:00
parent 3a1e660e62
commit 63bd307e63
15 changed files with 267 additions and 62 deletions

View File

@ -136,4 +136,15 @@ using this role.
A list of labels to attach to the built image, in the form of "key=value".
.. zuul:rolevar:: arch
:type: list
:default: []
A list of architectures to build on. When enabling this on any
image, all of them will be built with ``docker buildx``.
Valid values are ``linux/amd64``, ``linux/arm64``, ``linux/riscv64``,
``linux/ppc64le``, ``linux/s390x``, ``linux/386``,
``linux/arm/v7``, ``linux/arm/v6``.
.. _anchors: https://yaml.org/spec/1.2/spec.html#&%20anchor//

View File

@ -1,49 +1,20 @@
- name: Check sibling directory
stat:
path: '{{ zuul_work_dir }}/{{ item.context }}/.zuul-siblings'
register: _dot_zuul_siblings
# This should have been cleaned up; multiple builds may specify
# different siblings to include so we need to start fresh.
- name: Check for clean build
assert:
that: not _dot_zuul_siblings.stat.exists
- name: Create sibling source directory
file:
path: '{{ zuul_work_dir }}/{{ item.context }}/.zuul-siblings'
state: directory
mode: 0755
when: item.siblings is defined
# NOTE(ianw): could use recursive copy: with remote_src, but it's
# Ansible 2.8 only. take the simple approach.
- name: Copy sibling source directories
command:
cmd: 'cp --parents -r {{ zj_sibling }} {{ ansible_user_dir }}/{{ zuul_work_dir }}/{{ item.context }}/.zuul-siblings'
chdir: '~/src'
loop: '{{ item.siblings }}'
loop_control:
loop_var: zj_sibling
when: item.siblings is defined
- name: Build a docker image
command: >-
docker build {{ item.path | default('.') }} -f {{ item.dockerfile | default(docker_dockerfile) }}
{% if item.target | default(false) -%}
--target {{ item.target }}
docker build {{ zj_image.path | default('.') }} -f {{ zj_image.dockerfile | default(docker_dockerfile) }}
{% if zj_image.target | default(false) -%}
--target {{ zj_image.target }}
{% endif -%}
{% for build_arg in item.build_args | default([]) -%}
{% for build_arg in zj_image.build_args | default([]) -%}
--build-arg {{ build_arg }}
{% endfor -%}
{% if item.siblings | default(false) -%}
--build-arg "ZUUL_SIBLINGS={{ item.siblings | join(' ') }}"
{% if zj_image.siblings | default(false) -%}
--build-arg "ZUUL_SIBLINGS={{ zj_image.siblings | join(' ') }}"
{% endif -%}
{% for tag in item.tags | default(['latest']) -%}
{% for tag in zj_image.tags | default(['latest']) -%}
{% if zuul.change | default(false) -%}
--tag {{ item.repository }}:change_{{ zuul.change }}_{{ tag }}
--tag {{ zj_image.repository }}:change_{{ zuul.change }}_{{ tag }}
{% endif -%}
--tag {{ item.repository }}:{{ tag }}
--tag {{ zj_image.repository }}:{{ tag }}
{% endfor -%}
{% for label in zj_image.labels | default([]) -%}
--label "{{ label }}"
@ -51,10 +22,4 @@
--label "org.zuul-ci.change={{ zuul.change }}"
--label "org.zuul-ci.change_url={{ zuul.change_url }}"
args:
chdir: "{{ zuul_work_dir }}/{{ item.context }}"
- name: Cleanup sibling source directory
file:
path: '{{ zuul_work_dir }}/.zuul-siblings'
state: absent
chdir: "{{ zuul_work_dir }}/{{ zj_image.context }}"

View File

@ -0,0 +1,43 @@
- name: Build a docker image
command: >-
docker buildx build {{ zj_image.path | default('.') }} -f {{ zj_image.dockerfile | default(docker_dockerfile) }}
--platform={{ zj_image.arch | join(',') }}
{% if zj_image.target | default(false) -%}
--target {{ zj_image.target }}
{% endif -%}
{% for build_arg in zj_image.build_args | default([]) -%}
--build-arg {{ build_arg }}
{% endfor -%}
{% if zj_image.siblings | default(false) -%}
--build-arg "ZUUL_SIBLINGS={{ zj_image.siblings | join(' ') }}"
{% endif -%}
{% for tag in zj_image.tags | default(['latest']) -%}
{% if zuul.change | default(false) -%}
--tag {{ buildset_registry_alias }}:{{ buildset_registry.port }}/{{ zj_image.repository }}:change_{{ zuul.change }}_{{ tag }}
{% endif -%}
--tag {{ buildset_registry_alias }}:{{ buildset_registry.port }}/{{ zj_image.repository }}:{{ tag }}
{% endfor -%}
{% for label in zj_image.labels | default([]) -%}
--label "{{ label }}"
{% endfor %}
--label "org.zuul-ci.change={{ zuul.change }}"
--label "org.zuul-ci.change_url={{ zuul.change_url }}"
--push
args:
chdir: "{{ zuul_work_dir }}/{{ zj_image.context }}"
environment:
DOCKER_CLI_EXPERIMENTAL: enabled
- name: Pull images from buildset registry
command: >-
docker pull {{ buildset_registry_alias }}:{{ buildset_registry.port }}/{{ zj_image.repository }}:{{ zj_image_tag }}
loop: "{{ zj_image.tags | default(['latest']) }}"
loop_control:
loop_var: zj_image_tag
- name: Tag image for local registry
command: >-
docker tag {{ buildset_registry_alias }}:{{ buildset_registry.port }}/{{ zj_image.repository }}:{{ zj_image_tag }} {{ zj_image.repository }}:{{ zj_image_tag }}
loop: "{{ zj_image.tags | default(['latest']) }}"
loop_control:
loop_var: zj_image_tag

View File

@ -5,9 +5,11 @@
buildset_registry: "{{ (lookup('file', zuul.executor.work_root + '/results.json') | from_json)['buildset_registry'] }}"
ignore_errors: true
- name: Build docker images
include_tasks: build.yaml
- name: Set up siblings
include_tasks: siblings.yaml
loop: "{{ docker_images }}"
loop_control:
loop_var: zj_image
# Docker doesn't understand docker push [1234:5678::]:5000/image/path:tag
# so we set up /etc/hosts with a registry alias name to support ipv6 and 4.
@ -28,10 +30,49 @@
set_fact:
buildset_registry_alias: "{{ buildset_registry.host }}"
when: buildset_registry is defined and not ( buildset_registry.host | ipaddr )
# Push each image.
- name: Push image to buildset registry
when: buildset_registry is defined
include_tasks: push.yaml
loop: "{{ docker_images }}"
loop_control:
loop_var: image
- name: Determine if we need to use buildx or normal build
set_fact:
use_buildx: "{{ docker_images | selectattr('arch', 'defined') | list }}"
- name: Normal docker block
when: not use_buildx
block:
- name: Build docker images
include_tasks: build.yaml
loop: "{{ docker_images }}"
loop_control:
loop_var: zj_image
# Push each image.
- name: Push image to buildset registry
when: buildset_registry is defined
include_tasks: push.yaml
loop: "{{ docker_images }}"
loop_control:
loop_var: zj_image
- name: Buildx block
when: use_buildx
block:
- name: Assert buildset registry is defined for buildx
assert:
that:
- buildset_registry is defined
fail_msg: "Building multi-arch images requires a buildset registry"
- name: Set up buildx builders
include_tasks: setup-buildx.yaml
- name: Build and push each image using buildx.
include_tasks: buildx.yaml
loop: "{{ docker_images }}"
loop_control:
loop_var: zj_image
- name: Cleanup sibling source directory
file:
path: '{{ zuul_work_dir }}/.zuul-siblings'
state: absent

View File

@ -1,12 +1,12 @@
- name: Tag image for buildset registry
command: >-
docker tag {{ image.repository }}:{{ zj_image_tag }} {{ buildset_registry_alias }}:{{ buildset_registry.port }}/{{ image.repository }}:{{ zj_image_tag }}
loop: "{{ image.tags | default(['latest']) }}"
docker tag {{ zj_image.repository }}:{{ zj_image_tag }} {{ buildset_registry_alias }}:{{ buildset_registry.port }}/{{ zj_image.repository }}:{{ zj_image_tag }}
loop: "{{ zj_image.tags | default(['latest']) }}"
loop_control:
loop_var: zj_image_tag
- name: Push tag to buildset registry
command: >-
docker push {{ buildset_registry_alias }}:{{ buildset_registry.port }}/{{ image.repository }}:{{ zj_image_tag }}
loop: "{{ image.tags | default(['latest']) }}"
docker push {{ buildset_registry_alias }}:{{ buildset_registry.port }}/{{ zj_image.repository }}:{{ zj_image_tag }}
loop: "{{ zj_image.tags | default(['latest']) }}"
loop_control:
loop_var: zj_image_tag

View File

@ -0,0 +1,56 @@
- name: Write buildkit.toml file
template:
dest: /tmp/buildkitd.toml
src: buildkitd.toml.j2
- name: Run binfmt container
command: docker run --rm --privileged docker/binfmt:a7996909642ee92942dcd6cff44b9b95f08dad64
environment:
DOCKER_CLI_EXPERIMENTAL: enabled
- name: Create builder
command: docker buildx create --name mybuilder --driver-opt network=host --config /tmp/buildkitd.toml
environment:
DOCKER_CLI_EXPERIMENTAL: enabled
- name: Use builder
command: docker buildx use mybuilder
environment:
DOCKER_CLI_EXPERIMENTAL: enabled
- name: Bootstrap builder
command: docker buildx inspect --bootstrap
environment:
DOCKER_CLI_EXPERIMENTAL: enabled
- name: Copy buildset registry TLS cert into worker container
command: "docker cp {{ ca_dir }}/buildset-registry.crt buildx_buildkit_mybuilder0:/usr/local/share/ca-certificates"
- name: Update CA certs in worker container
command: docker exec buildx_buildkit_mybuilder0 update-ca-certificates
- name: Copy /etc/hosts for editing
command: docker cp buildx_buildkit_mybuilder0:/etc/hosts /tmp/mybuilder-hosts
# Docker buildx has its own /etc/hosts in the builder image.
- name: Configure /etc/hosts for buildset_registry to workaround docker not understanding ipv6 addresses
become: yes
lineinfile:
path: /tmp/mybuilder-hosts
state: present
regex: "^{{ buildset_registry.host }}\tzuul-jobs.buildset-registry$"
line: "{{ buildset_registry.host }}\tzuul-jobs.buildset-registry"
insertafter: EOF
when: buildset_registry is defined and buildset_registry.host | ipaddr
- name: Unmount the /etc/hosts mount
command: docker exec buildx_buildkit_mybuilder0 umount /etc/hosts
# NOTE(mordred) This is done in two steps. Even though we've unmounted /etc/hosts
# in the previous step, when we try to copy the file back directly, we get:
# unlinkat /etc/hosts: device or resource busy
- name: Copy modified hosts file back in
command: docker cp /tmp/mybuilder-hosts buildx_buildkit_mybuilder0:/etc/new-hosts
- name: Copy modified hosts file into place
command: docker exec buildx_buildkit_mybuilder0 cp /etc/new-hosts /etc/hosts

View File

@ -0,0 +1,28 @@
- name: Check sibling directory
stat:
path: '{{ zuul_work_dir }}/{{ zj_image.context }}/.zuul-siblings'
register: _dot_zuul_siblings
# This should have been cleaned up; multiple builds may specify
# different siblings to include so we need to start fresh.
- name: Check for clean build
assert:
that: not _dot_zuul_siblings.stat.exists
- name: Create sibling source directory
file:
path: '{{ zuul_work_dir }}/{{ zj_image.context }}/.zuul-siblings'
state: directory
mode: 0755
when: zj_image.siblings is defined
# NOTE(ianw): could use recursive copy: with remote_src, but it's
# Ansible 2.8 only. take the simple approach.
- name: Copy sibling source directories
command:
cmd: 'cp --parents -r {{ zj_sibling }} {{ ansible_user_dir }}/{{ zuul_work_dir }}/{{ zj_image.context }}/.zuul-siblings'
chdir: '~/src'
loop: '{{ zj_image.siblings }}'
loop_control:
loop_var: zj_sibling
when: zj_image.siblings is defined

View File

@ -0,0 +1,8 @@
[registry."docker.io"]
mirrors = ["{{ buildset_registry_alias }}:{{ buildset_registry.port }}"]
[registry."quay.io"]
mirrors = ["{{ buildset_registry_alias }}:{{ buildset_registry.port }}/quay.io"]
[registry."gcr.io"]
mirrors = ["{{ buildset_registry_alias }}:{{ buildset_registry.port }}/gcr.io"]

View File

@ -0,0 +1,2 @@
ca_dir: /etc/pki/ca-trust/source/anchors
ca_command: update-ca-trust

View File

@ -0,0 +1,2 @@
ca_dir: /usr/local/share/ca-certificates
ca_command: update-ca-certificates

View File

@ -50,7 +50,7 @@
--publish="1{{ buildset_registry_port }}:5000"
--volume="{{ buildset_registry_root }}/tls:/tls"
--volume="{{ buildset_registry_root }}/conf:/conf"
docker.io/zuul/zuul-registry:latest
docker.io/zuul/zuul-registry:latest zuul-registry -d
# Start a socat tunnel to the buildset registry to work around
# https://github.com/containers/libpod/issues/4311

View File

@ -1,6 +1,7 @@
FROM docker.io/library/debian:testing
FROM docker.io/upstream/image
ARG ZUUL_SIBLINGS=""
RUN echo "Zuul siblings: ${ZUUL_SIBLINGS}"
RUN cp /test-nonce /test-nonce-is-there
COPY .zuul-siblings/opendev.org/project/fake-sibling/file /target
COPY .zuul-siblings/openstack.org/project/fake-sibling/file /target
CMD echo "Zuul container test"; sleep infinity

View File

@ -1,2 +1,3 @@
FROM docker.io/library/debian:testing
RUN touch "/test-nonce"
CMD echo "Zuul container test"; sleep infinity

View File

@ -134,12 +134,20 @@
include_role:
name: "build-{{ (container_command == 'docker') | ternary('docker', 'container') }}-image"
vars:
docker_images:
_normal_docker_images:
- context: test-playbooks/registry/docker-siblings
repository: downstream/image
siblings:
- opendev.org/project/fake-sibling
- openstack.org/project/fake-sibling
_arch_docker_images:
- context: test-playbooks/registry/docker-siblings
repository: downstream/image
siblings:
- opendev.org/project/fake-sibling
- openstack.org/project/fake-sibling
arch: ['linux/amd64', 'linux/arm64']
docker_images: "{{ multiarch | ternary(_arch_docker_images, _normal_docker_images) }}"
container_images: "{{ docker_images }}"
- hosts: executor
name: Test pushing to the intermediate registry
@ -150,9 +158,14 @@
include_role:
name: push-to-intermediate-registry
vars:
docker_images:
_normal_docker_images:
- context: playbooks/registry/docker
repository: downstream/image
_arch_docker_images:
- context: playbooks/registry/docker
repository: downstream/image
arch: ['linux/amd64', 'linux/arm64']
docker_images: "{{ multiarch | ternary(_arch_docker_images, _normal_docker_images) }}"
container_images: "{{ docker_images }}"
# And finally an external verification step.

View File

@ -82,6 +82,38 @@
post-run: test-playbooks/registry/test-registry-post.yaml
vars:
container_command: docker
multiarch: false
nodeset:
nodes:
- name: intermediate-registry
label: ubuntu-bionic
- name: executor
label: ubuntu-bionic
- name: builder
label: ubuntu-bionic
- job:
name: zuul-jobs-test-registry-docker-multiarch
description: |
Test the intermediate registry roles with multiarch.
This job tests changes to the intermediate registry roles. It
is not meant to be used directly but rather run on changes to
roles in the zuul-jobs repo.
files:
- roles/pull-from-intermediate-registry/.*
- roles/push-to-intermediate-registry/.*
- roles/ensure-docker/.*
- roles/build-docker-image/.*
- roles/run-buildset-registry/.*
- roles/use-buildset-registry/.*
- test-playbooks/registry/.*
pre-run: test-playbooks/registry/test-registry-pre.yaml
run: test-playbooks/registry/test-registry.yaml
post-run: test-playbooks/registry/test-registry-post.yaml
vars:
container_command: docker
multiarch: true
nodeset:
nodes:
- name: intermediate-registry
@ -112,6 +144,7 @@
post-run: test-playbooks/registry/test-registry-post.yaml
vars:
container_command: podman
multiarch: false
nodeset:
nodes:
- name: intermediate-registry
@ -311,6 +344,7 @@
- zuul-jobs-test-ensure-docker-ubuntu-bionic
- zuul-jobs-test-ensure-docker-ubuntu-xenial
- zuul-jobs-test-registry-docker
- zuul-jobs-test-registry-docker-multiarch
- zuul-jobs-test-registry-podman
- zuul-jobs-test-registry-buildset-registry
- zuul-jobs-test-registry-buildset-registry-k8s-docker