image-builder for ubuntu based airship hosts

Builds ephemeral ISO and target host images.

Also replaces isogen (iso-builder) with a single ansible-driven build
tool for both types of airship images used: ephemeral ISO as well as
target QCOW2 images deployed to base nodes.

Change-Id: I6d0368de771869e4e645a03d8a20f470b34602ab
This commit is contained in:
Craig Anderson 2020-07-21 21:18:05 -07:00 committed by Anderson, Craig (ca846m)
parent 3b351b1aa1
commit caa1cffc0a
87 changed files with 2295 additions and 0 deletions

5
.gitignore vendored
View File

@ -5,3 +5,8 @@ bootstrap_capz/.vscode/launch.json
bootstrap_capz/go.mod bootstrap_capz/go.mod
bootstrap_capz/go.sum bootstrap_capz/go.sum
bootstrap_capo/capo-ephemeral bootstrap_capo/capo-ephemeral
# image-builder artifacts to ignore
image-builder/examples/output-metadata.yaml
image-builder/examples/*.iso
image-builder/examples/*.qcow2

View File

@ -34,6 +34,7 @@
- job: - job:
name: airship-images-build name: airship-images-build
nodeset: airship-images-single-node nodeset: airship-images-single-node
timeout: 3600
pre-run: playbooks/airship-images-deploy-docker.yaml pre-run: playbooks/airship-images-deploy-docker.yaml
run: playbooks/airship-images-build.yaml run: playbooks/airship-images-build.yaml
post-run: playbooks/airship-collect-logs.yaml post-run: playbooks/airship-collect-logs.yaml

View File

@ -0,0 +1,124 @@
FROM ubuntu:focal as base-image
LABEL org.opencontainers.image.authors='airship-discuss@lists.airshipit.org, irc://#airshipit@freenode' \
org.opencontainers.image.url='https://airshipit.org' \
org.opencontainers.image.documentation='https://airship-images.readthedocs.org' \
org.opencontainers.image.source='https://opendev.org/airship/images' \
org.opencontainers.image.vendor='The Airship Authors' \
org.opencontainers.image.licenses='Apache-2.0'
SHELL ["bash", "-exc"]
ENV DEBIAN_FRONTEND noninteractive
# Update distro and install ansible
RUN apt-get update ;\
apt-get dist-upgrade -y ;\
apt-get install -y --no-install-recommends \
python3-minimal \
python3-pip \
python3-apt \
python3-setuptools ;\
pip3 install --upgrade wheel ;\
pip3 install --upgrade ansible ;\
rm -rf /var/lib/apt/lists/*
FROM base-image as rootfs-builder
# install requirements for building chroot
RUN apt-get update ;\
apt-get install -y --no-install-recommends \
multistrap \
equivs \
curl \
ca-certificates \
build-essential \
gnupg2 \
dosfstools;\
rm -rf /var/lib/apt/lists/*
COPY assets/playbooks/inventory.yaml /opt/assets/playbooks/inventory.yaml
COPY assets/playbooks/base-chroot.yaml /opt/assets/playbooks/base-chroot.yaml
COPY assets/playbooks/roles/multistrap /opt/assets/playbooks/roles/multistrap
RUN ansible-playbook -i /opt/assets/playbooks/inventory.yaml /opt/assets/playbooks/base-chroot.yaml
COPY assets/playbooks/base-osconfig.yaml /opt/assets/playbooks/base-osconfig.yaml
COPY assets/playbooks/roles/osconfig /opt/assets/playbooks/roles/osconfig
RUN ansible-playbook -i /opt/assets/playbooks/inventory.yaml /opt/assets/playbooks/base-osconfig.yaml --tags "pre_install"
FROM base-image as squashfs-builder
ENV root_chroot /mnt/rootfs
ENV root_image /mnt/image
ENV boot_src="/opt/grub"
RUN apt-get update ;\
apt-get install -y --no-install-recommends \
squashfs-tools \
grub-common \
grub2-common \
grub-pc-bin \
grub-efi-amd64-signed;\
rm -rf /var/lib/apt/lists/*
COPY --from=rootfs-builder ${root_chroot} ${root_chroot}
COPY assets/playbooks/inventory.yaml /opt/assets/playbooks/inventory.yaml
COPY assets/playbooks/base-livecdcontent.yaml /opt/assets/playbooks/base-livecdcontent.yaml
COPY assets/playbooks/roles/livecdcontent /opt/assets/playbooks/roles/livecdcontent
RUN ansible-playbook -i /opt/assets/playbooks/inventory.yaml /opt/assets/playbooks/base-livecdcontent.yaml
FROM base-image as image-builder
ENV boot_src="/opt/grub"
ENV root_image /mnt/image
RUN apt-get update ;\
apt-get install -y --no-install-recommends \
xorriso \
grub-pc-bin \
python3-minimal \
python3-yaml ;\
rm -rf /var/lib/apt/lists/*
COPY --from=squashfs-builder ${root_image} ${root_image}
COPY assets/playbooks/inventory.yaml /opt/assets/playbooks/inventory.yaml
COPY assets/playbooks/iso.yaml /opt/assets/playbooks/iso.yaml
COPY assets/playbooks/roles/iso /opt/assets/playbooks/roles/iso
RUN apt-get update ;\
apt-get install -y --no-install-recommends \
coreutils \
curl \
qemu-utils \
parted \
squashfs-tools \
extlinux \
syslinux-common \
xfsprogs \
vim \
kmod \
efivar \
dosfstools ;\
rm -rf /var/lib/apt/lists/*
COPY assets/playbooks/base-osconfig.yaml /opt/assets/playbooks/base-osconfig.yaml
COPY assets/playbooks/roles/osconfig /opt/assets/playbooks/roles/osconfig
COPY assets/playbooks/qcow.yaml /opt/assets/playbooks/qcow.yaml
COPY assets/playbooks/roles/qcow /opt/assets/playbooks/roles/qcow
RUN curl -L https://github.com/mikefarah/yq/releases/download/2.4.0/yq_linux_amd64 -o /bin/yq \
&& chmod +x /bin/yq
COPY assets/*.sh /usr/bin/local/
COPY assets/*.json /usr/bin/local/
CMD /usr/bin/local/entrypoint.sh

104
image-builder/Makefile Normal file
View File

@ -0,0 +1,104 @@
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SHELL := /bin/bash
COMMIT ?= $(shell git rev-parse HEAD)
LABEL ?= org.airshipit.build=community
IMAGE_NAME ?= image-builder
IMAGE_REGISTRY ?= quay.io
IMAGE_REPO ?= image-builder
IMAGE_TAG ?= latest
IMAGE_TYPE ?= iso # iso | qcow
PUSH_IMAGE ?= false
DISTRO ?= ubuntu_focal
IMAGE ?= $(IMAGE_REGISTRY)/$(IMAGE_REPO):$(IMAGE_TAG)-${DISTRO}
IMAGE_ALIAS ?= $(IMAGE_REGISTRY)-$(IMAGE_REPO)-$(IMAGE_TAG)-${DISTRO}-${IMAGE_TYPE}
UEFI_BOOT ?=
PROXY ?=
NO_PROXY ?= localhost,127.0.0.1
.PHONY: help build images install_prereqs cut_image run
.ONESHELL:
help: ## This help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z0-9_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
# Make target name that zuul expects for each project in this repo
images: build
install_prereqs:
ifneq ($(PROXY), )
export http_proxy=$(PROXY)
export https_proxy=$(PROXY)
export no_proxy=$(NO_PROXY)
export HTTP_PROXY=$(PROXY)
export HTTPS_PROXY=$(PROXY)
export NO_PROXY=$(NO_PROXY)
endif
sudo -E tools/install_prereqs.$(DISTRO)
build:
ifneq ($(PROXY), )
sudo -E ./tools/docker_proxy.sh $(PROXY)
export http_proxy=$(PROXY)
export https_proxy=$(PROXY)
export no_proxy=$(NO_PROXY)
export HTTP_PROXY=$(PROXY)
export HTTPS_PROXY=$(PROXY)
export NO_PROXY=$(NO_PROXY)
sudo -E docker build --tag $(IMAGE) -f Dockerfile.$(DISTRO) . \
--label $(LABEL) \
--label "org.opencontainers.image.revision=$(COMMIT)" \
--label "org.opencontainers.image.created=\
$(shell date --rfc-3339=seconds --utc)" \
--label "org.opencontainers.image.title=$(IMAGE_NAME)" \
--build-arg http_proxy=$(PROXY) \
--build-arg https_proxy=$(PROXY) \
--build-arg HTTP_PROXY=$(PROXY) \
--build-arg HTTPS_PROXY=$(PROXY) \
--build-arg no_proxy=$(NO_PROXY) \
--build-arg NO_PROXY=$(NO_PROXY) \
--build-arg UEFI_BOOT=$(UEFI_BOOT) || exit 1
else
sudo -E docker build --tag $(IMAGE) -f Dockerfile.$(DISTRO) . \
--label $(LABEL) \
--label "org.opencontainers.image.revision=$(COMMIT)" \
--label "org.opencontainers.image.created=\
$(shell date --rfc-3339=seconds --utc)" \
--label "org.opencontainers.image.title=$(IMAGE_NAME)" \
--build-arg UEFI_BOOT=$(UEFI_BOOT) || exit 1
endif
ifeq ($(PUSH_IMAGE), true)
sudo -E docker push $(IMAGE)
endif
cut_image: install_prereqs
ifneq ($(PROXY), )
sudo -E ./tools/docker_proxy.sh $(PROXY)
export http_proxy=$(PROXY)
export https_proxy=$(PROXY)
export no_proxy=$(NO_PROXY)
export HTTP_PROXY=$(PROXY)
export HTTPS_PROXY=$(PROXY)
export NO_PROXY=$(NO_PROXY)
endif
sudo -E tools/cut_image.sh $(IMAGE_TYPE) ./examples $(IMAGE) $(IMAGE_ALIAS) "$(UEFI_BOOT)" "$(PROXY)" "$(NO_PROXY)"
run: ## Run the iso in kvm for testing
virsh start $(IMAGE_ALIAS)
tests:
true

86
image-builder/README.md Normal file
View File

@ -0,0 +1,86 @@
# Overview
Image Builder is a utility used to produce two types of artifacts needed for an airshipctl deployment: an iso (for the ephemeral node), and qcow2s (used by metal3io to deploy all other nodes). This is accomplished through several stages as follows:
1. Build docker image containing the base operating system and basic configuration management
1. Run configuration management again with customized user-supplied inputs in container runtime
- A more accessible layer for user customization that doesn't require rebuilding the container
- Users may make their own decisions as to whether making a customized docker image build is worthwhile
1. Container runtime produces a final image artifact (ISO or QCOW2)
# Airship Image Variations
The ISO is built using the network information defined by the ephemeral node in the supplied airship manifests. Therefore, each airship deployment should have its own ISO created.
The QCOW2s have such networking information driven by cloud-init during metal3io deployment, and therefore is not contained in the image itself. These QCOWs would therefore not necessarily be generated for each unique airship deployment, but rather for each for unique host profile.
Note that we will refer to the QCOW2s as the “base OS” or “target OS”, rather than “baremetal OS”, since the same process can be used to build QCOW2s for baremetal and for a virtualized environment.
# Building the image-builder container locally
If you do not wish to use the image-builder container published on quay.io, you may build your own locally as follows:
```
sudo apt -y install sudo git make
git clone https://review.opendev.org/airship/images
cd images/image-builder
sudo make IMAGE_REGISTRY=mylocalreg build
```
By default, both the ISO and QCOW share the same base container image. Therefore in most cases it should be sufficient to generate a single container that's reused for all image types and further differentiated in the container runtime phase described in the next section.
# Executing the image-builder container
The following makefile target may be used to execute the image-builder container in order to produce an ISO or QCOW2 output.
```
sudo apt -y install sudo git make
git clone https://review.opendev.org/airship/images
cd images/image-builder
sudo make IMAGE_TYPE=qcow cut_image
```
In the above example, set ``IMAGE_TYPE`` to ``iso`` or ``qcow`` as appropriate. This will be passed into the container to instruct it which type of image to build. Also include ``IMAGE_REGISTRY`` override if you wish to use a local docker image as described in the previous section.
This makefile target uses config files provided in the images/image-builder/examples directory. Modify these files as needed in order to customize your iso and qcow generation.
# Building behind a proxy
Example building docker container locally, plus ISO and qcow behind a proxy:
```
sudo apt -y install sudo git make
git clone https://review.opendev.org/airship/images
cd images/image-builder
# Create container
sudo make IMAGE_REGISTRY=mylocalreg PROXY=http://proxy.example.com:8080 build
# Create ephemeral ISO
sudo make IMAGE_REGISTRY=mylocalreg PROXY=http://proxy.example.com:8080 IMAGE_TYPE=iso cut_image
# Create qcow
sudo make IMAGE_REGISTRY=mylocalreg PROXY=http://proxy.example.com:8080 IMAGE_TYPE=qcow cut_image
```
# Division of Configuration Management responsibilities
Configuration management of the base OS is divided into several realms, each with their own focus:
1. Image-builder configuration data, i.e. data baked into the QCOW2 base image. The following should be used to drive this phase:
1. The storage and compute elements of NCv1 host and hardware profiles (kernel boot params, cpu pinning, hugepage settings, disk partitioning, etc), and
1. the NCv1 divingbell apparmor, security limits, file/dir permissions, sysctl, and
1. custom-built kernel modules (e.g. dkms based installations, i40e driver, etc)
1. Necessary components for the nodes bootstrap to k8s cluster, e.g. k8s, CNI, containerd, etc
1. any other operating system setting which would require a reboot or cannot otherwise be accomodated in #2 below
1. cloud-init driven configuration for site-specific data. Examples include:
1. Hostnames, domain names, FQDNs, IP addresses, etc
1. Network configuration data (bonding, MTU settings, VLANs, DNS, NTP, ethtool settings, etc)
1. Certificates, SSH keys, user accounts and/or passwords, etc.
1. HCA (host-config agent) for limited day-2 base-OS management
1. Cron jobs, such as the Roomba cleanup script used in NCv1, or SACT/gstools scripts
1. Possible overlapping of configuration-management items with #1 - #2, but for zero-disruption day-2 management (kept to a minimum to reduce design & testing complexity, only essential things to minimize overhead.)
1. Eventually HCA may be phased out if #1 and #2 become streamlined enough and impact minimized to the degree that SLAs can be met, and use of HCA may be reduced or eliminated over time.
# Supported OSes
- Ubuntu 20.04 LTS

View File

@ -0,0 +1,65 @@
#!/bin/bash
set -ex
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
BASEDIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )"
cd "$BASEDIR"
BASEDIR="$(dirname "$(realpath "$0")")"
source "${BASEDIR}/functions.sh"
: "${uefi_boot:=}"
if [[ -n $uefi_boot ]]; then
extra_vars="uefi=$uefi_boot"
fi
export http_proxy
export https_proxy
export HTTP_PROXY
export HTTPS_PROXY
export no_proxy
export NO_PROXY
# Instruct ansible to output the image artifact to the container's host mount
extra_vars="$extra_vars img_output_dir=${VOLUME}"
echo "Begin Ansible plays"
if [[ "${IMAGE_TYPE}" == "iso" ]]; then
_process_input_data_set_vars_iso
# Instruct ansible how to name image output artifact
extra_vars="$extra_vars img_name=${IMG_NAME}"
echo "Executing Step 1"
ansible-playbook -i /opt/assets/playbooks/inventory.yaml /opt/assets/playbooks/iso.yaml --extra-vars "$extra_vars" -vvvv
elif [[ "${IMAGE_TYPE}" == "qcow" ]]; then
_process_input_data_set_vars_qcow
_process_input_data_set_vars_osconfig
# Instruct ansible how to name image output artifact
extra_vars="$extra_vars img_name=${IMG_NAME}"
echo "Executing Step 1: Create qcow2 partitions and filesystems"
ansible-playbook -i /opt/assets/playbooks/inventory.yaml /opt/assets/playbooks/qcow.yaml --extra-vars "$extra_vars" --tags "prep_img" -vvvv
echo "Executing Step 2: Applying changes from base-osconfig playbook"
ansible-playbook -i /opt/assets/playbooks/inventory.yaml /opt/assets/playbooks/base-osconfig.yaml --extra-vars "$extra_vars" -vvvv
echo "Executing Step 3: Close image and write qcow2"
ansible-playbook -i /opt/assets/playbooks/inventory.yaml /opt/assets/playbooks/qcow.yaml --extra-vars "$extra_vars" --tags "close_img" -vvvv
else
echo "\${IMAGE_TYPE} value '${IMAGE_TYPE}' does not match an expected value: [ 'iso', 'qcow' ]"
exit 1
fi
# Write metadata output file containing host path to image and md5sum
_make_metadata "${IMG_NAME}"
echo "All Ansible plays completed successfully"

149
image-builder/assets/functions.sh Executable file
View File

@ -0,0 +1,149 @@
#!/bin/bash
# Defaults
OUTPUT_METADATA_FILE_NAME_DEFAULT='output-metadata.yaml'
ISO_NAME_DEFAULT='ephemeral.iso'
# Common
echo "${BUILDER_CONFIG:?}"
if [ ! -f "${BUILDER_CONFIG}" ]
then
echo "file ${BUILDER_CONFIG} not found"
exit 1
fi
_validate_param(){
PARAM_VAL="$1"
PARAM_NAME="$2"
# Validate that a paramter is defined (default) or that
# it is defined and represents the path of a file or
# directory that is found on the filesystem (VAL_TYPE=file)
VAL_TYPE="$3"
NO_NULL_EXIT="$4"
echo "${PARAM_VAL:?}"
# yq will return the 'null' string if a key is either undefined or defined with no value
if [[ "${PARAM_VAL}" =~ null$ ]]
then
echo "variable ${PARAM_NAME} is not present in ${BUILDER_CONFIG}"
if [[ "${NO_NULL_EXIT}" == 'no_null_exit' ]]; then
echo "Using defaults"
else
exit 1
fi
else
if [[ ${VAL_TYPE} == 'file' ]]; then
if [[ ! -e "${PARAM_VAL}" ]]
then
echo "${PARAM_VAL} not exist"
exit 1
fi
fi
fi
}
IFS=':' read -ra ADDR <<<"$(yq r "${BUILDER_CONFIG}" container.volume)"
HOST_PATH="${ADDR[0]}"
VOLUME="${ADDR[1]}"
_validate_param "${VOLUME}" "container.volume" file
# Read IMAGE_TYPE from the builder config yaml if not supplied as an env var
if [[ -z "${IMAGE_TYPE}" ]]; then
IMAGE_TYPE="$(yq r "${BUILDER_CONFIG}" "builder.imageType")"
# Make iso builds the default for backwards compatibility
if [[ "${IMAGE_TYPE}" == 'null' ]]; then
echo "NOTE: No builder.imageType specified. Assuming 'iso'."
IMAGE_TYPE='iso'
fi
fi
if [[ -z "${OUTPUT_METADATA_FILE_NAME}" ]]; then
OUTPUT_METADATA_FILE_NAME="$(yq r "${BUILDER_CONFIG}" builder.outputMetadataFileName)"
if [[ "${OUTPUT_METADATA_FILE_NAME}" == 'null' ]]; then
echo "NOTE: No builder.outputMetadataFileName specified. Assuming '${OUTPUT_METADATA_FILE_NAME_DEFAULT}'."
OUTPUT_METADATA_FILE_NAME="${OUTPUT_METADATA_FILE_NAME_DEFAULT}"
fi
fi
OUTPUT_FILE_NAME="$(yq r "${BUILDER_CONFIG}" builder.outputFileName)"
_make_metadata(){
IMG_NAME="$1"
OUTPUT_METADATA_FILE_PATH="${VOLUME}/${OUTPUT_METADATA_FILE_NAME}"
# Instruct airshipctl where to locate the output image artifact
echo "bootImagePath: ${HOST_PATH}/${IMG_NAME}" > "${OUTPUT_METADATA_FILE_PATH}"
# Also include the image md5sum
md5sum=$(md5sum "${VOLUME}/${IMG_NAME}" | awk '{print $1}')
echo "md5sum: $md5sum" | tee -a "${OUTPUT_METADATA_FILE_PATH}"
}
_process_input_data_set_vars_osconfig(){
if [[ -z "${OSCONFIG_FILE}" ]]; then
OSCONFIG_FILE="$(yq r "${BUILDER_CONFIG}" builder.osconfigVarsFileName)"
fi
OSCONFIG_FILE="${VOLUME}/${OSCONFIG_FILE}"
_validate_param "${OSCONFIG_FILE}" builder.osconfigVarsFileName file no_null_exit
# Optional user-supplied playbook vars
if [[ -f "${OSCONFIG_FILE}" ]]; then
cp "${OSCONFIG_FILE}" /opt/assets/playbooks/roles/osconfig/vars/main.yaml
fi
}
_process_input_data_set_vars_iso(){
# Required user provided input
if [[ -z "${USER_DATA_FILE}" ]]; then
USER_DATA_FILE="$(yq r "${BUILDER_CONFIG}" builder.userDataFileName)"
fi
USER_DATA_FILE="${VOLUME}/${USER_DATA_FILE}"
_validate_param "${USER_DATA_FILE}" builder.userDataFileName file
# Required user provided input
if [[ -z "${NET_CONFIG_FILE}" ]]; then
NET_CONFIG_FILE="$(yq r "${BUILDER_CONFIG}" builder.networkConfigFileName)"
fi
NET_CONFIG_FILE="${VOLUME}/${NET_CONFIG_FILE}"
_validate_param "${NET_CONFIG_FILE}" builder.networkConfigFileName file
# cloud-init expects net confing specifically in json format
NET_CONFIG_JSON_FILE=/tmp/network_data.json
yq r -j "${NET_CONFIG_FILE}" > "${NET_CONFIG_JSON_FILE}"
# Optional user provided input
if [[ ${OUTPUT_FILE_NAME} != null ]]; then
IMG_NAME="${OUTPUT_FILE_NAME}"
else
IMG_NAME="${ISO_NAME_DEFAULT}"
fi
cat << EOF > /opt/assets/playbooks/roles/iso/vars/main.yaml
meta_data_file: ${BASEDIR}/meta_data.json
user_data_file: ${USER_DATA_FILE}
network_data_file: ${NET_CONFIG_JSON_FILE}
EOF
}
_process_input_data_set_vars_qcow(){
IMG_NAME=null
if [[ -z "${QCOW_CONFIG_FILE}" ]]; then
QCOW_CONFIG_FILE="$(yq r "${BUILDER_CONFIG}" builder.qcowVarsFileName)"
fi
QCOW_CONFIG_FILE="${VOLUME}/${QCOW_CONFIG_FILE}"
_validate_param "${QCOW_CONFIG_FILE}" builder.qcowVarsFileName file no_null_exit
# Optional user-supplied playbook vars
if [[ -f "${QCOW_CONFIG_FILE}" ]]; then
cp "${QCOW_CONFIG_FILE}" /opt/assets/playbooks/roles/qcow/vars/main.yaml
# Extract the image output name in the ansible vars file provided
IMG_NAME="$(yq r "${QCOW_CONFIG_FILE}" img_name)"
fi
# Retrieve from playbook defaults if not provided in user input
if [[ "${IMG_NAME}" == 'null' ]]; then
IMG_NAME="$(yq r /opt/assets/playbooks/roles/qcow/defaults/main.yaml img_name)"
fi
# User-supplied image output name in builder-config takes precedence
if [[ ${OUTPUT_FILE_NAME} != null ]]; then
IMG_NAME="${OUTPUT_FILE_NAME}"
else
_validate_param "${IMG_NAME}" img_name
fi
}

View File

@ -0,0 +1 @@
{"hostname": "ephemeral", "name": "ephemeral", "uuid": "83679162-1378-4288-a2d4-70e13ec132aa"}

View File

@ -0,0 +1,4 @@
---
- hosts: localhost
roles:
- multistrap

View File

@ -0,0 +1,4 @@
---
- hosts: localhost
roles:
- livecdcontent

View File

@ -0,0 +1,5 @@
---
- hosts: /mnt/rootfs
gather_facts: false
roles:
- osconfig

View File

@ -0,0 +1,10 @@
all:
hosts:
localhost:
ansible_connection: local
ansible_python_interpreter: /usr/bin/python3
chroots:
hosts:
/mnt/rootfs:
ansible_connection: chroot
ansible_python_interpreter: /usr/bin/python3

View File

@ -0,0 +1,4 @@
---
- hosts: localhost
roles:
- iso

View File

@ -0,0 +1,4 @@
---
- hosts: localhost
roles:
- qcow

View File

@ -0,0 +1,7 @@
img_output_dir: /config
img_name: ephemeral.iso
root_image: /mnt/image
meta_data_file: /config/meta_data.json
user_data_file: /config/user_data
network_data_file: /config/network_data.json

View File

@ -0,0 +1,26 @@
- name: "Cloud Init | creating {{ root_image }}/openstack/latest directory"
file:
path: "{{ root_image }}/openstack/latest"
state: directory
mode: '0755'
#- name: "Cloud Init | Setting cloud-init datasource list"
# copy:
# content: "datasource_list: [ ConfigDrive, None ]"
# dest: "{{ root_image }}/etc/cloud/cloud.cfg.d/95_no_cloud_ds.cfg"
- name: "Cloud Init | seeding meta data"
copy:
src: "{{ meta_data_file }}"
dest: "{{ root_image }}/openstack/latest/meta_data.json"
- name: "Cloud Init | seeding user data"
copy:
src: "{{ user_data_file }}"
dest: "{{ root_image }}/openstack/latest/user_data"
- name: "Cloud Init | seeding network data"
copy:
src: "{{ network_data_file }}"
dest: "{{ root_image }}/openstack/latest/network_data.json"

View File

@ -0,0 +1,29 @@
- name: "ISO | Ensure any old iso image at target location is removed"
file:
state: absent
path: "{{ img_output_dir }}/{{ img_name }}"
- name: "ISO | Ensuring {{ img_output_dir }} directory exists"
file:
path: "{{ img_output_dir }}"
state: directory
mode: '0755'
- name: "ISO | Writing ISO with xorriso"
shell:
cmd: |
xorriso \
-as mkisofs \
-iso-level 3 \
-full-iso9660-filenames \
-volid "config-2" \
--grub2-boot-info \
--grub2-mbr /usr/lib/grub/i386-pc/boot_hybrid.img \
-eltorito-boot boot/grub/bios.img \
-no-emul-boot \
-boot-load-size 4 \
-boot-info-table \
--eltorito-catalog boot/grub/boot.cat \
-output {{ img_output_dir }}/{{ img_name }} \
-graft-points \
{{ root_image }}

View File

@ -0,0 +1,10 @@
- name: "Task | Including any user-defined vars"
include_vars:
file: main.yaml
name: user-vars
- name: "Task | Preparing Cloud-Init data"
include_tasks: cloud-init.yaml
- name: "Task | ISO production"
include_tasks: iso.yaml

View File

@ -0,0 +1 @@
# This file will be overwritten by the container entrypoint with user-provided vars, if any are defined.

View File

@ -0,0 +1,3 @@
root_chroot: /mnt/rootfs
root_image: /mnt/image
boot_src: /opt/grub

View File

@ -0,0 +1,54 @@
- name: ansible copy file locally.
copy:
src: "{{ item }}"
dest: "{{ root_image }}/vmlinuz"
remote_src: yes
with_fileglob: "{{ root_chroot }}/boot/vmlinuz-*"
- name: ansible copy file locally.
copy:
src: "{{ item }}"
dest: "{{ root_image }}/initrd"
remote_src: yes
with_fileglob: "{{ root_chroot }}/boot/initrd.img-*"
- name: "Stamp out a marker file for grub to use when identifying the desired boot volume"
copy:
#TODO: populate this with meaningful content
content: "{{ ansible_date_time.date }}"
dest: "{{ root_image }}/AIRSHIP_EPHEMERAL"
- name: "create directory for boot image assembly"
tempfile:
state: directory
suffix: bootimg
register: bootimg_builddir
- name: "write out grub config"
template:
src: grub-livecd.cfg.j2
dest: "{{ bootimg_builddir.path }}/grub.cfg"
- name: "making standalone grub"
shell:
cmd: |
grub-mkstandalone \
--format=i386-pc \
--output="{{ bootimg_builddir.path }}/core.img" \
--install-modules="linux normal iso9660 biosdisk memdisk search tar ls all_video" \
--modules="linux normal iso9660 biosdisk search" \
--locales="" \
--fonts="" \
boot/grub/grub.cfg="{{ bootimg_builddir.path }}/grub.cfg"
- name: "ensuring directory {{ root_image }}/boot/grub exists"
file:
path: "{{ root_image }}/boot/grub"
state: directory
mode: '0755'
- name: "assembling boot img"
shell:
cmd: cat /usr/lib/grub/i386-pc/cdboot.img {{ bootimg_builddir.path }}/core.img > {{ root_image }}/boot/grub/bios.img

View File

@ -0,0 +1,10 @@
- name: "Including any user-defined vars"
include_vars:
file: main.yaml
name: user-vars
- name: "building squshfs"
include_tasks: squashfs.yaml
- name: "building livecd"
include_tasks: livecd.yaml

View File

@ -0,0 +1,14 @@
- name: "ensuring directory {{ root_image }}/live exists"
file:
path: "{{ root_image }}/live"
state: directory
mode: '0755'
- name: "Building squashfs"
shell:
cmd: |
mksquashfs \
"{{ root_chroot }}" \
"{{ root_image }}/live/filesystem.squashfs" \
-processors {{ ansible_processor_vcpus }} \
-e boot

View File

@ -0,0 +1,11 @@
search --set=root --file /AIRSHIP_EPHEMERAL
insmod all_video
set default="0"
set timeout=1
menuentry "Airship Ephemeral" {
linux /vmlinuz boot=live quiet nomodeset overlay-size=70% systemd.unified_cgroup_hierarchy=0 ds=ConfigDrive
initrd /initrd
}

View File

@ -0,0 +1 @@
# This file will be overwritten by the container entrypoint with user-provided vars, if any are defined.

View File

@ -0,0 +1,94 @@
rootfs_root: /mnt/rootfs
rootfs_arch: amd64
repos:
- register_repo_with_rootfs: true
name: Ubuntu
packages:
- apt-file
- apt-utils
- apt-transport-https
- arptables
- bash-completion
- bc
- bridge-utils
- chrony
- cloud-init
- conntrack
- curl
- dnsutils
- dosfstools
- e2fsprogs
- ebtables
- ethtool
- file
- gettext-base
#- kdump-tools # cannot install until after kernel is available
#- grub2 # cannot install until after boot partition is available
- ifenslave
- isc-dhcp-client
- iproute2
- iptables
- iputils-arping
- iputils-ping
- iputils-tracepath
- ipvsadm
- less
- linux-image-generic # this will be reinstalled later when the boot partition is available
- live-boot
- locales
- locales-all
- lsb-release
- lsof
- man-db
- mbr
- netplan.io
- net-tools
- networkd-dispatcher # required for netplan post-up scripts
- openssh-server
- passwd
- python3
- python3-apt
- socat
- systemd
- systemd-sysv
- strace
- sudo
- tcpdump
- traceroute
- vim
- vlan
- xfsprogs
- xz-utils
source: http://archive.ubuntu.com/ubuntu/
keyring_pkg: ubuntu-keyring
suite: focal
components: main restricted universe
- register_repo_with_rootfs: true
name: Ubuntu-Updates
packages: []
source: http://archive.ubuntu.com/ubuntu/
# NOTE: We comment this out as the package comes from the "focal" suite
# keyring_pkg: ubuntu-keyring
suite: focal-updates
omitdebsrc: "true"
components: main restricted universe
- register_repo_with_rootfs: true
name: Ubuntu-Security
packages: []
source: http://archive.ubuntu.com/ubuntu/
# NOTE: We comment this out as the package comes from the "focal" suite
# keyring_pkg: ubuntu-keyring
suite: focal-security
omitdebsrc: "true"
components: main restricted universe
- register_repo_with_rootfs: true
name: Docker
packages:
- docker-ce
- docker-ce-cli
- containerd.io
source: https://download.docker.com/linux/ubuntu
keyring_url: https://download.docker.com/linux/ubuntu/gpg
suite: focal
omitdebsrc: "true"
components: stable

View File

@ -0,0 +1,30 @@
- when: item.keyring_url is defined
block:
- name: "ensuring directory {{ rootfs_root }}/etc/apt/trusted.gpg.d exists"
file:
path: "{{ rootfs_root }}/etc/apt/trusted.gpg.d"
state: directory
mode: '0755'
- name: "create temporary directory for {{ item.name }}'s key'"
tempfile:
state: directory
suffix: aptkey
register: aptkey_tmpdir
- name: "Download {{ item.keyring_url }} for {{ item.name }} repo"
get_url:
url: "{{ item.keyring_url }}"
dest: "{{ aptkey_tmpdir.path }}/Release.key"
mode: '0440'
- name: "Installing keyring {{ item.name }}"
shell:
cmd: gpg --no-options --no-default-keyring --no-auto-check-trustdb --trustdb-name {{ rootfs_root }}/etc/apt/trusted.gpg --no-keyring --import-options import-export --import --import {{ aptkey_tmpdir.path }}/Release.key > {{ rootfs_root }}/etc/apt/trusted.gpg.d/{{ item.name }}.gpg
- when: item.keyring_pkg is defined
block:
- name: Update the apt cache
apt:
update_cache: yes
- name: "Apt keyring package defined for {{ item.name }} repo, ensuring that this is present on the build host (note that this means you need access to it in the apt sources of the builder)"
apt:
name: "{{ item.keyring_pkg }}"
state: present

View File

@ -0,0 +1,51 @@
- name: "Including any user-defined vars"
include_vars:
file: main.yaml
name: user-vars
- name: "ensuring directory {{ rootfs_root }} exists for rootfs"
file:
path: "{{ rootfs_root }}"
state: directory
mode: '0755'
- name: "create temporary directory for multistrap config"
tempfile:
state: directory
suffix: multistrap
register: multistrap_tempdir
- name: "write out multistrap config"
template:
src: multistrap.conf.j2
dest: "{{ multistrap_tempdir.path }}/multistrap.conf"
validate: multistrap --simulate -f %s
- name: "install required apt keys manually"
include_tasks: apt-key-install.yaml
loop: "{{ repos }}"
- name: "ensuring directory {{ rootfs_root }}/dev exists for chroot"
file:
path: "{{ rootfs_root }}/dev"
state: directory
mode: '0755'
- name: "Setting up devices for chroot"
shell: |
mknod "{{ rootfs_root }}/dev/random" c 1 8
chmod 640 "{{ rootfs_root }}/dev/random"
chown 0:0 "{{ rootfs_root }}/dev/random"
mknod "{{ rootfs_root }}/dev/urandom" c 1 9
chmod 640 "{{ rootfs_root }}/dev/urandom"
chown 0:0 "{{ rootfs_root }}/dev/urandom"
- name: "Running multistrap"
shell:
cmd: "multistrap -f {{ multistrap_tempdir.path }}/multistrap.conf"
- name: "Lock sources.list to prevent conflict and duplicates with multistrap repo list"
shell: |
set -e
if [ -f {{ rootfs_root }}/etc/apt/sources.list ]; then rm {{ rootfs_root }}/etc/apt/sources.list; fi
ln -s /dev/null {{ rootfs_root }}/etc/apt/sources.list

View File

@ -0,0 +1,31 @@
#jinja2: trim_blocks:False
[General]
arch={{ rootfs_arch }}
directory={{ rootfs_root }}
# same as --tidy-up option if set to true
cleanup=true
# same as --no-auth option if set to true
# keyring packages listed in each bootstrap will
# still be installed.
noauth=false
# extract all downloaded archives (default is true)
unpack=true
#omitrequired=true
# enable MultiArch for the specified architectures
# default is empty
#multiarch=allowed
# the order of sections is not important.
# the bootstrap option determines which repository
# is used to calculate the list of Priority: required packages.
# "bootstrap" lists the repos which will be used to create the multistrap itself. Only
# Packages listed in "bootstrap" will be downloaded and unpacked by multistrap.
bootstrap={% set space = joiner(" ") %}{% for repo in repos %}{{ space() }}{{ repo.name }}{% endfor %}
# aptsources is a list of sections to be used for downloading packages
# and lists and placed in the /etc/apt/sources.list.d/multistrap.sources.list
# of the target. Order is not important
aptsources={% set space = joiner(" ") %}{% for repo in repos %}{% if repo.register_repo_with_rootfs == true %}{{ space() }}{{ repo.name }}{% endif %}{% endfor %}
{% for repo in repos %}
[{{ repo.name }}]
{% set newline = joiner("\n") %}{% for key, value in repo.items() %}{% if ( key != 'name' ) and ( key != 'keyring_url' ) %}{{ newline() }}{% if key == 'keyring_pkg' %}keyring{% else %}{{ key }}{% endif %}={% if value %}{% if key == 'packages' %}{{ value|join(' ') }}{% else %}{{ value }}{% endif %}{% endif %}{% endif %}{% endfor %}
{% endfor %}

View File

@ -0,0 +1 @@
# This file will be overwritten by the container entrypoint with user-provided vars, if any are defined.

View File

@ -0,0 +1,113 @@
rootfs_root: /mnt/rootfs
cni_version: v0.8.2
k8s_version: v1.18.6
kernel:
base_pkg: linux-image-generic
headers_pkg: linux-headers-generic
modules:
load:
- name: 8021q
- name: bonding
- name: ip_vs
- name: ip_vs_rr
- name: ip_vs_wrr
- name: ip_vs_sh
- name: br_netfilter
blacklist:
- name: krbd
banners:
login: |
Airship Node \l: \n.\o
Kernel: \s \m \r \v
IP address: \4
motd: |
#!/bin/sh
. /etc/lsb-release
printf "Airship Node, based on: %s (%s %s %s)\n" "$DISTRIB_DESCRIPTION" "$(uname -o)" "$(uname -r)" "$(uname -m)"
kubelet:
# Add only image-builder appropriate kubelet args here.
# Add all others to kubeadmcontrolplane.yaml
extra_systemd_args: []
#- name: reserved-cpus
# value: '0-3'
grub:
GRUB_TIMEOUT: 5
GRUB_CMDLINE_LINUX_DEFAULT:
- name: cgroup_disable
value: 'hugetlb'
limits:
- name: core_dump
domain: '0:'
type: 'hard'
item: 'core'
value: 0
sysctl:
- name: net.ipv4.ip_forward
value: '1'
# Any directories to create on disk can be defined here
directories:
# Full path to file to create
- name: /tmp/testdir
permissions: '0755'
owner: root
group: root
# Any files to write to disk can be defined here
files:
# Full path to file to create
- name: /tmp/testdir/test.sh
file_content: |
#!/bin/bash
echo hello world
permissions: '0755'
owner: root
group: root
systemd:
# Full name, including systemd suffix. sample.service. sample.mount, sample.timer, etc.
- name: sample.service
file_content: |
[Unit]
Description=sample service
After=network.target
[Service]
ExecStart=/bin/sleep infinity
[Install]
WantedBy=multi-user.target
# whether the target image should run this service on boot
enabled: yes
# whether to override existing symlinks (e.g. name collision).
# Use only if you are intenting to overwrite an existing systemd unit
force: no
# Note: You are encouraged to build your own image-builder container, where your desired
# package list can be supplied to multistrap during the container build. However, this
# option will allow you to layer additional packages (installed during container runtime,
# instead of during the container build) where a customized container build is not possible
# or not desired.
# This is also needed for a specific subset of packages that fail to install successfully
# with multistrap (e.g., kdump-tools).
post_install_package_list:
- kdump-tools
- apparmor
- dbus
- rsyslog
- logrotate
# Any other adjustments to file or directory permissions, for files that already exist.
file_permissions:
# Full path to file to create
- name: /tmp/testdir/test.sh
permissions: '0700'
owner: root
group: root

View File

@ -0,0 +1,29 @@
- name: "MOTD | Set Login Prompt"
copy:
content: "{{ banners.login }}\n"
dest: "/etc/issue"
owner: root
group: root
mode: '0644'
- name: "Finalise | Reset MOTD"
file:
state: "{{ item }}"
path: "/etc/update-motd.d/"
owner: root
group: root
mode: '0755'
with_items:
- absent
- directory
- name: "Finalise | Remove MOTD News config"
file:
state: "absent"
path: "/etc/default/motd-news"
- name: "MOTD | Set MOTD"
copy:
content: "{{ banners.motd }}"
dest: "/etc/update-motd.d/00-motd"
owner: root
group: root
mode: '0755'

View File

@ -0,0 +1,12 @@
- name: "Cloud-Init | configure network renderer"
copy:
content: |
# prefer to render via netplan instead of /etc/network/interfaces even if ifupdown is present
system_info:
network:
renderers: ['netplan', 'eni', 'sysconfig']
dest: "/etc/cloud/cloud.cfg.d/90-override-renderer.cfg"
- name: "Cloud-Init | Mask ssh.socket allowing cloud-init to configure without failures"
systemd:
masked: yes
name: ssh.socket

View File

@ -0,0 +1,12 @@
- name: "CNI | ensuring directory /opt/cni/bin exists"
file:
path: "/opt/cni/bin"
state: directory
mode: '0755'
- name: "CNI | downloading and installing CNI binaries"
delegate_to: localhost
unarchive:
src: "https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-amd64-{{ cni_version }}.tgz"
dest: "{{ rootfs_root }}/opt/cni/bin"
remote_src: yes

View File

@ -0,0 +1,40 @@
- name: "CRI-O | ensuring directory /etc/crio exists"
file:
path: "/etc/crio"
state: directory
mode: '0755'
- name: "CRI-O | Setting up crio"
shell:
cmd: "crio config > /etc/crio/crio.conf"
- name: "CRI-O | configure runc path"
ini_file:
path: /etc/crio/crio.conf
section: "crio.runtime.runtimes.runc"
option: runtime_path
value: "\"/usr/sbin/runc\""
- name: "CRI-O | configure cgroup manager"
ini_file:
path: /etc/crio/crio.conf
section: "crio.runtime"
option: cgroup_manager
value: "\"systemd\""
- name: "CRI-O | configure logs to also output to journald"
ini_file:
path: /etc/crio/crio.conf
section: "crio"
option: log_to_journald
value: "true"
- name: "CRI-O | Disabling systemd unit"
systemd:
enabled: no
name: crio.service
- name: "CRI-O | Ensuring systemd preset directory exists"
file:
path: "/etc/systemd/system-preset"
state: directory
mode: '0755'
- name: "CRI-O | Dont enable kubelet unit by default"
copy:
content: 'disable crio.service'
dest: /etc/systemd/system-preset/00-crio.preset

View File

@ -0,0 +1,8 @@
- name: "File Permissions | Modifying file or directory permissions for {{ item.name }}"
file:
path: "{{ item.name }}"
state: file
mode: "{{ item.permissions }}"
owner: "{{ item.owner }}"
group: "{{ item.group }}"
with_items: "{{ file_permissions }}"

View File

@ -0,0 +1,5 @@
- name: "Finalise | Removing .pyc files"
shell:
cmd: |
find "/usr/" "/var/" \( -name "*.pyc" -o -name "__pycache__" \) -delete

View File

@ -0,0 +1,6 @@
# Settings here will be applied to /boot/grub/grub.cfg when grub is installed
- name: "Grub | Grub config"
template:
src: grub.j2
dest: "/etc/default/grub"
mode: 0644

View File

@ -0,0 +1,11 @@
# airshipctl cloud-init will overwrite with its own /etc/hostname and /etc/hosts fqdn
- name: "hostname and hosts | write out hostname file"
template:
src: hostname.j2
dest: "/etc/hostame"
mode: 0644
- name: "hostname and hosts | write out hosts file"
template:
src: hosts.j2
dest: "/etc/hosts"
mode: 0644

View File

@ -0,0 +1,7 @@
# TODO: Move to airshipctl cloud-init to support customized post-up cmds (ethtool, etc)
#- name: "ifup-hooks | Defining ifup-hooks: routes, ethtool, etc"
# template:
# src: ifup-hooks.j2
# dest: "/etc/networkd-dispatcher/routable.d/50-ifup-hooks"
# mode: 0755
# when: ifup-hooks is defined

View File

@ -0,0 +1,25 @@
- name: "Kubernetes | Download and install binaries for {{ k8s_version }}"
delegate_to: localhost
get_url:
url: "https://storage.googleapis.com/kubernetes-release/release/{{ k8s_version }}/bin/linux/amd64/{{ item }}"
dest: "{{ rootfs_root }}/usr/bin/{{ item }}"
mode: '0755'
with_items:
- kubeadm
- kubelet
- kubectl
- name: "Kubernetes | write out kubelet unit file"
template:
src: kubelet.service.j2
dest: "/etc/systemd/system/kubelet.service"
mode: 0644
- name: "Kubernetes | Ensuring systemd preset directory exists"
file:
path: "/etc/systemd/system-preset"
state: directory
mode: '0755'
- name: "Kubernetes | Dont enable kubelet unit by default"
copy:
content: 'disable kubelet.service'
dest: /etc/systemd/system-preset/00-kubelet.preset

View File

@ -0,0 +1,6 @@
- name: "Limits | Defining security limits"
template:
src: limits.j2
dest: "/etc/security/limits.d/99-{{ item.name }}.conf"
mode: 0644
with_items: "{{ limits }}"

View File

@ -0,0 +1,5 @@
- name: "locale | write out locale config"
template:
src: locale.j2
dest: "/etc/default/locale"
mode: 0644

View File

@ -0,0 +1,43 @@
- name: "Including any user-defined vars"
include_vars:
file: main.yaml
name: user-vars
- block:
- name: "write user-provided files"
include_tasks: write-user-files.yaml
- name: "configure cloud-init"
include_tasks: cloud-init.yaml
- name: "configure modules"
include_tasks: modules.yaml
- name: "configure limits"
include_tasks: limits.yaml
- name: "configure sysctl"
include_tasks: sysctl.yaml
- name: "configure grub"
include_tasks: grub.yaml
- name: "install and configure cni binaries"
include_tasks: cni.yaml
- name: "configure kubernetes"
include_tasks: kubernetes.yaml
- name: "configure locale"
include_tasks: locale.yaml
- name: "configure hostname and hosts"
include_tasks: hostname-hosts.yaml
- name: "configure banners"
include_tasks: banners.yaml
- name: "unattended upgrades"
include_tasks: unattended-upgrades.yaml
- name: "configure base systemd"
include_tasks: systemd.yaml
- name: "configure user-defined systemd"
include_tasks: systemd-user.yaml
- name: "configure file permissions"
include_tasks: file-permissions.yaml
- name: "finalise rootfs"
include_tasks: finalise-rootfs.yaml
tags: pre_install
- block:
- name: "POST-INSTALL | Starting post-install"
include_tasks: post-install.yaml
tags: post_install

View File

@ -0,0 +1,12 @@
- name: "Modules | Defining modules to load"
template:
src: kernelmodules.j2
dest: "/etc/modules-load.d/99-{{ item.name }}.conf"
mode: 0644
with_items: "{{ kernel.modules.load }}"
- name: "Modules | Defining modules to blacklist"
kernel_blacklist:
name: "{{ item.name }}"
state: present
with_items: "{{ kernel.modules.blacklist }}"

View File

@ -0,0 +1,97 @@
- name: "POST-INSTALL | DNS sanity check"
shell:
executable: /bin/bash
cmd: |
set -e
proxy="{{ lookup('env', 'HTTP_PROXY') }}"
# Ensure proxy address is resolvable, if supplied as a domain name
if [[ -n $proxy ]]; then
# Extract proxy server address from url
proxy_address="$(echo "$proxy" | awk -F/ '{print $3}' | awk -F: '{print $1}')"
# If first letter of proxy address is a letter, verify that a DNS lookup is possible
if [[ $proxy_address == [a-zA-z]* ]]; then
echo "proxy check for '$proxy_address' ..."
nslookup $proxy_address > /dev/null || (
echo "Failed to resolve proxy '$proxy_address' with dns server '$(cat /etc/resolv.conf)'."
echo "Reconfigure DNS setting provided in the 'qcow' playbook to a DNS server that can resolve '$proxy_address'."
exit 1
)
fi
fi
echo "archive.ubuntu.com DNS check ..."
nslookup archive.ubuntu.com || (
echo "DNS lookup failure for archive.ubuntu.com with '$(cat /etc/resolv.conf)'"
exit 1
)
- name: "POST-INSTALL | update source list"
apt:
update_cache: yes
- name: "POST-INSTALL | generate locales"
shell: |
set -e
locale-gen en_US.UTF-8
- name: "POST-INSTALL | Remove incomplete kernel install by multistrap"
shell: |
set -e
apt-get remove -y '^linux-image-.*'
apt-get remove -y '^linux-modules-.*'
- name: "POST-INSTALL | install grub2 and kernel"
apt:
pkg:
- grub2
- grub-efi-amd64-signed
- efivar
- "{{ kernel.base_pkg }}"
- "{{ kernel.headers_pkg }}"
- kmod
- name: "POST-INSTALL | grub-install LEGACY"
shell: |
set -e
grub-install --target=i386-pc --no-uefi-secure-boot --skip-fs-probe --force "{{ lookup('file', '/tmp/nbd') }}"
grub-install --target=i386-pc --no-uefi-secure-boot --skip-fs-probe --force --recheck "{{ lookup('file', '/tmp/nbd') }}"
when: uefi is not defined
- name: "POST-INSTALL | grub-install UEFI"
shell: |
set -e
grub-install --target=x86_64-efi --uefi-secure-boot --skip-fs-probe --force "{{ lookup('file', '/tmp/nbd') }}"
grub-install --target=x86_64-efi --uefi-secure-boot --skip-fs-probe --force --recheck "{{ lookup('file', '/tmp/nbd') }}"
when: uefi is defined
- name: "POST-INSTALL | generate grub cfg file"
shell: |
set -e
update-grub
- name: "POST-INSTALL | install other user-requested packages, and kernel-dependent pkgs and ones that fail to install with multistrap"
apt:
pkg: "{{ post_install_package_list }}"
- name: "POST-INSTALL | write root partition UUID to grub.cfg"
shell: |
set -e
cp -r /usr/lib/grub/* /boot/grub
blkid -s UUID -o value $(df -h | grep /$ | awk "{print \$1}") > /tmp/root_uuid
sed -i "s@root=/dev/nbd[0-9]p[0-9]@root=UUID=$(cat /tmp/root_uuid)@g" /boot/grub/grub.cfg
rm /tmp/root_uuid
- name: "POST-INSTALL | write boot partition UUID to UEFI grub.cfg"
shell: |
set -e
blkid -s UUID -o value $(df -h | grep /boot$ | awk "{print \$1}") > /tmp/boot_uuid
echo "search.fs_uuid $(cat /tmp/boot_uuid) root hd0,gpt2" > /boot/efi/EFI/ubuntu/grub.cfg
echo 'set prefix=(\$root)'\''/grub'\''' >> /boot/efi/EFI/ubuntu/grub.cfg
echo 'configfile \$prefix/grub.cfg' >> /boot/efi/EFI/ubuntu/grub.cfg
rm /tmp/boot_uuid
when: uefi is defined
- name: "POST-INSTALL | cleanup deb cache"
shell: |
set -e
rm /var/cache/apt/archives/*.deb

View File

@ -0,0 +1,6 @@
- sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: no
with_items: "{{ sysctl }}"

View File

@ -0,0 +1,7 @@
# TODO - move to airshipctl cloud-init process, where domain parameter is available
#- name: "systemd-resolved | Conf file for systemd-resolved DNS settings"
# template:
# src: resolved.j2
# dest: "/etc/systemd/resolved.conf"
# mode: 0644
# when: domain is defined

View File

@ -0,0 +1,13 @@
- name: "Systemd | Writing user-provided systemd unit {{ item.name }}"
template:
src: generic-file-writer.j2
dest: "/etc/systemd/system/{{ item.name }}"
with_items: "{{ systemd }}"
- name: "Systemd | Configuring user-provided systemd unit {{ item.name }}"
systemd:
name: "{{ item.name }}"
enabled: "{{ item.enabled }}"
force: "{{ item.force }}"
with_items: "{{ systemd }}"

View File

@ -0,0 +1,19 @@
- name: "Systemd | Remove machine id"
copy:
content: ""
dest: "/etc/machine-id"
- name: "Systemd | Link systemd to /sbin/init"
file:
src: /bin/systemd
dest: /sbin/init
owner: root
group: root
state: link
- name: "Systemd | Enable Systemd Networkd"
systemd:
enabled: yes
name: systemd-networkd.service
- name: "Systemd | Enable Systemd Networkd-dispatcher"
systemd:
enabled: yes
name: networkd-dispatcher.service

View File

@ -0,0 +1,19 @@
- name: "unattended-upgrades | disable apt-daily timer"
file:
path: /etc/systemd/system/timers.target.wants/apt-daily.timer
state: absent
- name: "unattended-upgrades | disable apt-daily-upgrade timer"
file:
path: /etc/systemd/system/timers.target.wants/apt-daily-upgrade.timer
state: absent
- name: "unattended-upgrades | check for apt-daily cron"
stat:
path: /etc/cron.daily/apt-compat
register: stat_result
- name: "unattended-upgrades | disable apt-daily cron"
file:
path: /etc/cron.daily/apt-compat
mode: '0644'
when: stat_result.stat.exists

View File

@ -0,0 +1,17 @@
- name: "User Directories | Creating user-provided directory {{ item.name }}"
file:
path: "{{ item.name }}"
state: directory
mode: "{{ item.permissions }}"
owner: "{{ item.owner }}"
group: "{{ item.group }}"
with_items: "{{ directories }}"
- name: "User Files | Writing user-provided file {{ item.name }}"
template:
src: generic-file-writer.j2
dest: "{{ item.name }}"
mode: "{{ item.permissions }}"
owner: "{{ item.owner }}"
group: "{{ item.group }}"
with_items: "{{ files }}"

View File

@ -0,0 +1 @@
{{ item.file_content }}

View File

@ -0,0 +1,15 @@
# If you change this file, run 'update-grub' afterwards to update
# /boot/grub/grub.cfg.
# For full documentation of the options in this file, see:
# info -f grub -n 'Simple configuration'
GRUB_DEFAULT=0
GRUB_TIMEOUT_STYLE=menu
GRUB_TIMEOUT={{ grub.GRUB_TIMEOUT }}
GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian`
{% set ns = namespace (content = '') %}
{% for arg in grub.GRUB_CMDLINE_LINUX_DEFAULT %}
{% set ns.content = ns.content + ' ' + arg.name + '=' + arg.value %}
{% endfor %}
GRUB_CMDLINE_LINUX_DEFAULT="{{ ns.content }}"
GRUB_CMDLINE_LINUX=""

View File

@ -0,0 +1 @@
localhost

View File

@ -0,0 +1 @@
127.0.0.1 localhost

View File

@ -0,0 +1,5 @@
#!/bin/bash
{% for cmd in ifup-hooks %}
{{ cmd }}
{% endfor %}

View File

@ -0,0 +1,9 @@
{% if "args" in item %}
{% set content = item.name %}
{% for arg in item.args %}
{% set content = content + ' ' + arg.name + '=' + arg.value %}
{% endfor %}
{{ content }}
{% else %}
{{ item.name }}
{% endif %}

View File

@ -0,0 +1,25 @@
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=https://kubernetes.io/docs/home/
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/default/kubelet
{% set ns = namespace (content = '') %}
{% for arg in kubelet.extra_systemd_args %}
{% set ns.content = ns.content + ' --' + arg.name + '=' + arg.value %}
{% endfor %}
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS {{ ns.content }}
CPUAffinity=
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1 @@
{{ item.domain }} {{ item.type }} {{ item.item }} {{ item.value }}

View File

@ -0,0 +1,4 @@
LANGUAGE=C
LANG=C
LC_ALL=C
LC_TERMINAL=C

View File

@ -0,0 +1,4 @@
# TODO - move to airshipctl cloud-init, where domain paramters etc will be available
#[Resolve]
#Domains={{ domain }}

View File

@ -0,0 +1 @@
# This file will be overwritten by the container entrypoint with user-provided vars, if any are defined.

View File

@ -0,0 +1,46 @@
root_chroot: /mnt/rootfs
nbd_build_dir: /tmp/nbd_build_dir
img_output_dir: /config
img_name: airship-ubuntu.qcow2
dns: 8.8.8.8
qcow_capacity: 5G
partitions:
# Ubuntu default is for a separate vfat partition for efi
- mount: none
mount_order: 99
part_start: 1MiB
part_end: 5MiB
flags:
- bios_grub
- mount: /boot/efi
mount_order: 2
part_start: 5MiB
part_end: 516MiB
flags:
- esp
filesystem:
type: vfat
fstab:
options: "defaults,errors=remount-ro,noatime"
dump: 0
fsck: 1
- mount: /boot
mount_order: 1
part_start: 516MiB
part_end: 1284MiB
filesystem:
type: ext4
fstab:
options: "defaults,errors=remount-ro,noatime"
dump: 0
fsck: 2
- mount: /
mount_order: 0
part_start: 1284MiB
part_end: '100%'
filesystem:
type: ext4
fstab:
options: "defaults,errors=remount-ro,noatime"
dump: 0
fsck: 1

View File

@ -0,0 +1,22 @@
- name: "QCOW | Installing extlinux"
shell: |
mkdir -p "{{ root_chroot }}"/boot/syslinux
extlinux --install "{{ root_chroot }}"/boot/syslinux/ --device /dev/disk/by-partlabel/{{ ( partitions | selectattr('mount', 'equalto', '/boot') | list | first ).mount | hash('md5') }}
- name: "QCOW | Writing out syslinux config"
copy:
content: |
DEFAULT linux
SAY Booting Airship Node
LABEL linux
KERNEL /vmlinuz
APPEND root=/dev/disk/by-partlabel/{{ ( partitions | selectattr('mount', 'equalto', '/') | list | first ).mount | hash('md5') }} initrd=/initrd.img
dest: ""{{ root_chroot }}/boot/syslinux/syslinux.cfg"
- name: "QCOW | Installing kernel and init ramdisk"
shell: |
rm -rf "{{ root_chroot }}"/vmlinuz
cp -f /mnt/image/vmlinuz "{{ root_chroot }}"/boot/
rm -rf /tmp/mnt/initrd.img
cp -f /mnt/image/initrd "{{ root_chroot }}"/boot/initrd.img

View File

@ -0,0 +1,16 @@
- name: "QCOW | copy ansible playbooks to target image"
shell: |
set -e
cp -r /opt/assets "{{ root_chroot }}"/opt
- name: "QCOW | unmount target"
shell: |
set -e
# restore resolv.conf
chroot "{{ root_chroot }}" /bin/bash -c 'rm /etc/resolv.conf; cd /etc; ln -s ../run/systemd/resolve/stub-resolv.conf resolv.conf'
cd "{{ root_chroot }}"
mountpoint dev/pts > /dev/null && umount dev/pts
mountpoint dev > /dev/null && umount dev
mountpoint sys/firmware/efi > /dev/null && umount sys/firmware/efi
mountpoint sys > /dev/null && umount sys
mountpoint proc > /dev/null && umount proc

View File

@ -0,0 +1,30 @@
- name: "QCOW | mount sys LEGACY"
shell: |
set -e
mkdir -p "{{ root_chroot }}"
cd "{{ root_chroot }}"
mountpoint sys > /dev/null || mount -t sysfs /sys sys
# mount an empty dir to efi directory, otherwise grub will try to configure EFI boot for the target image, **iff** the build node was booted with EFI.
if [ -d sys/firmware/efi ]; then mountpoint sys/firmware/efi > /dev/null || mkdir /dummy; mount -o bind /dummy sys/firmware/efi; fi
when: uefi is not defined
- name: "QCOW | mount sys UEFI"
shell: |
set -e
mkdir -p "{{ root_chroot }}"
cd "{{ root_chroot }}"
mountpoint sys > /dev/null || mount -t sysfs /sys sys
# Required for building UEFI targets
ls /sys/firmware/efi > /dev/null || (echo "efivars not present on build system. Build system must be booted into UEFI mode." && exit 1)
mountpoint sys/firmware/efi > /dev/null || mount -o bind /sys/firmware/efi sys/firmware/efi
when: uefi is defined
- name: "QCOW | Mount remaining targets"
shell: |
set -e
cd "{{ root_chroot }}"
mountpoint proc > /dev/null || mount -t proc /proc proc
mountpoint dev > /dev/null || mount -o bind /dev dev
mountpoint dev/pts > /dev/null || mount -t devpts /dev/pts dev/pts
# temporarily override resolv.conf to working dns
chroot "{{ root_chroot }}" /bin/bash -c 'rm /etc/resolv.conf; echo "nameserver {{ dns }}" > /etc/resolv.conf'

View File

@ -0,0 +1,55 @@
- name: "QCOW | Including any user-defined vars"
include_vars:
file: main.yaml
name: user-vars
- block:
- name: "QCOW | Creating and attaching qcow image"
include_tasks:
file: qcow-create-n-attach.yaml
- name: "QCOW | Creating partitions"
include_tasks:
file: partitions-and-filesystems.yaml
with_indexed_items: "{{ partitions }}"
- name: "QCOW | Mounting filesystems"
include_tasks:
file: mount-helper.yaml
with_items: "{{ partitions | sort( case_sensitive=True, attribute='mount_order' ) }}"
vars:
mount_offset: "{{ root_chroot }}"
state: mounted
fstab: /tmp/junkfstab
- name: "QCOW | Writing image content"
include_tasks:
file: writing-image-content.yaml
- name: "QCOW | chroot prep"
include_tasks:
file: chroot-prep.yaml
tags: prep_img
- block:
- name: "QCOW | chroot cleanup"
include_tasks:
file: chroot-cleanup.yaml
- name: "QCOW | Unmounting filesystems"
include_tasks:
file: mount-helper.yaml
with_items: "{{ partitions | sort( reverse=True, case_sensitive=True, attribute='mount' ) }}"
vars:
mount_offset: "{{ root_chroot }}"
state: unmounted
fstab: /tmp/junkfstab
- name: "QCOW | Detaching and compressing QCoW2"
include_tasks:
file: qcow-detach-n-compress.yaml
tags: close_img

View File

@ -0,0 +1,9 @@
- name: "mount-helper | Setting mount state to {{ state }} for /dev/disk/by-partlabel/{{ item.mount | hash('md5') }} at the mountpoint for {{ item.mount }}"
mount:
path: "{{ mount_offset }}{{ item.mount }}"
src: "/dev/disk/by-partlabel/{{ item.mount | hash('md5') }}"
fstype: "{{ item.filesystem.type }}"
opts: "{{ item.filesystem.fstab.options }}"
state: "{{ state }}"
fstab: "{{ fstab }}"
when: item.mount != 'none'

View File

@ -0,0 +1,27 @@
- name: "QCOW | Creating Partitions"
parted:
device: "{{ lookup('file', '/tmp/nbd') }}"
number: "{{ item.0 + 1 }}"
state: present
label: gpt
flags: "{{ item.1.flags | default(omit) }}"
part_start: "{{ item.1.part_start }}"
part_end: "{{ item.1.part_end }}"
name: "{{ item.1.mount | hash('md5') }}"
align: minimal
# For some reason, udev does not honor the partition label for by-partlabel symlinks, so we rename them here
- name: "QCOW | check for symlink"
stat:
path: /dev/disk/by-partlabel/primary
register: symlink
- name: "QCOW | udev symlink rename"
command: mv /dev/disk/by-partlabel/primary /dev/disk/by-partlabel/{{ item.1.mount | hash('md5') }}
when: symlink.stat.exists
- name: "QCOW | Creating Filesystems"
filesystem:
fstype: "{{ item.1.filesystem.type }}"
dev: "/dev/disk/by-partlabel/{{ item.1.mount | hash('md5') }}"
when: item.1.mount != 'none'

View File

@ -0,0 +1,38 @@
- name: "QCOW | Enabling nbd kernel module"
command: modprobe nbd
- name: "QCOW | 3 second pause after loading nbd kernel module"
pause:
seconds: 3
- name: "QCOW | Finding availible NBD device to use"
shell:
executable: /bin/bash
cmd: |
for dev in /sys/class/block/nbd*; do
size="$(cat "$dev"/size)"
device="/dev/nbd${dev: -1}"
if (( size == 0 )) && ! ls ${device}p* >& /dev/null; then
printf "%s" "$device"
exit 0
fi
done
# NOTE: if we have got this far, then we have not been able to find a suitable nbd device to consume.
exit 1
register: role_img_nbd_device
- name: "QCOW | Creating build directory"
file:
state: directory
path: "{{ nbd_build_dir }}"
- name: "QCOW | Creating QCoW2"
command: qemu-img create -f qcow2 {{ nbd_build_dir }}/{{ img_name }} {{ qcow_capacity }}
- name: "QCOW | Connecting QCoW2 to {{ role_img_nbd_device.stdout }}"
command: qemu-nbd --connect={{ role_img_nbd_device.stdout }} {{ nbd_build_dir }}/{{ img_name }}
- name: "QCOW | Store NBD device"
copy:
content: "{{ role_img_nbd_device.stdout }}"
dest: /tmp/nbd

View File

@ -0,0 +1,8 @@
- name: "QCOW | Detaching QCoW from {{ role_img_nbd_device.stdout }}"
shell: |
qemu-nbd -d "{{ lookup('file', '/tmp/nbd') }}"
- name: "QCOW | Compressing QCoW and writing out to {{ img_output_dir }}/{{ img_name }}"
shell: |
qemu-img convert -p -O qcow2 -c {{ nbd_build_dir }}/{{ img_name }} {{ img_output_dir }}/{{ img_name }}

View File

@ -0,0 +1,16 @@
- name: "QCOW | Writing out rootfs from squashfs"
shell: |
unsquashfs -d "{{ root_chroot }}" -f /mnt/image/live/filesystem.squashfs
exit 0
- name: "QCOW | Writing out fstab"
include_tasks: mount-helper.yaml
with_items: "{{ partitions | sort( case_sensitive=True, attribute='mount' ) }}"
vars:
mount_offset: null
state: present
fstab: "{{ root_chroot }}/etc/fstab"
- name: "QCOW | Setting debug password"
shell: |
chroot "{{ root_chroot }}" sh -c "echo \"root:password\" | chpasswd"

View File

@ -0,0 +1 @@
# This file will be overwritten by the container entrypoint with user-provided vars, if any are defined.

View File

@ -0,0 +1 @@
{"hostname": "ephemeral", "name": "ephemeral", "uuid": "83679162-1378-4288-a2d4-70e13ec132aa"}

View File

@ -0,0 +1,6 @@
version: 2
ethernets:
all-en:
match:
name: "en*"
dhcp4: true

View File

@ -0,0 +1,5 @@
#cloud-config
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDCAWBkS5iD7ORK59YUjJlPiWnzZXoFPbxlo8kvXjeGVgtUVD/FORZBvztoB9J1xTgE+DEg0dE2DiVrh3WXMWnUUwyaqjIu5Edo++P7xb53T9xRC7TUfc798NLAGk3CD8XvEGbDB7CD6Tvx7HcAco0WpEcPePcTcv89rZGPjal1nY4kGNT/0TWeECm99cXuWFjKm6WiMrir9ZN1yLcX/gjugrHmAGm8kQ/NJVEDRgSPV6jhppp7P/1+yqIUOOOXLx61d8oVG+ADlXEckXoetqHYjbzisxO/wa2KFM7cb5NTVKHFmxwVKX4kJeRL+I/94yLCiG05PidUFsIMzByPBEe/
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9D1m9eMr75japSYMX0Id/af1pyfDM2I1lPSwi2zZwYo8w0b3AyzV3w4iL8PzHCRmxwcm6/w5TfCxEHu7IzTJ4IkN7vIvJEVFPVCJNunuu1ZYahKkFB8g4q6+nsY6rj2ASpQRNrxkUTN2I4GmTRGB3N21uKe1KqbNuaCt5i0KxW0ydcZgAYZFs56qB8ie053VBeMBhhn3LxROKb7g3+NZ6kHkJiOo6p0q7iXiAOh0nvnSGjuSRGllOx/lPe+rdTN+NzuqWSN4sN9WPMjynqSRBMdI0TD7mI2i7uv67s2XpDIORX9dH6IudrLB4Ypz5QX/5Kxyc7Rk16HLSEn42bplj
hostname: airship

View File

@ -0,0 +1,15 @@
builder:
# Optional. Default imageType = iso
imageType: iso
# Required for imageType = iso
userDataFileName: user_data
# Required for imageType = iso
networkConfigFileName: network_data.json
# Optional. Default outputMetadataFilenName = output-metadata.yaml
outputMetadataFileName: output-metadata.yaml
# Optional. Default ISO name = ephemeral.iso
outputFileName: ephemeral.iso
container:
volume: /host/path/to/examples:/config
image: port/image-builder:latest-ubuntu_focal
containerRuntime: docker

View File

@ -0,0 +1,23 @@
{
"links": [
{
"ethernet_mac_address": "52:54:00:6c:99:85",
"id": "ens3",
"type": "phy"
}
],
"networks": [
{
"id": "network0",
"link": "ens3",
"network_id": "99e88329-f20d-4741-9596-25bf07847b16",
"type": "ipv4_dhcp"
}
],
"services": [
{
"address": "8.8.8.8",
"type": "dns"
}
]
}

View File

@ -0,0 +1,140 @@
rootfs_root: /mnt/rootfs
cni_version: v0.8.2
k8s_version: v1.18.6
kernel:
base_pkg: linux-image-generic
headers_pkg: linux-headers-generic
modules:
load:
- name: 8021q
- name: bonding
- name: ip_vs
- name: ip_vs_rr
- name: ip_vs_wrr
- name: ip_vs_sh
- name: br_netfilter
blacklist:
- name: krbd
banners:
login: |
Airship Node \l: \n.\o
Kernel: \s \m \r \v
IP address: \4
motd: |
#!/bin/sh
. /etc/lsb-release
printf "Airship Node, based on: %s (%s %s %s)\n" "$DISTRIB_DESCRIPTION" "$(uname -o)" "$(uname -r)" "$(uname -m)"
limits:
- name: core_dump
domain: '0:'
type: 'hard'
item: 'core'
value: 0
- name: nofile-root-soft
domain: 'root'
type: 'soft'
item: 'nofile'
value: '65536'
- name: nofile-root-hard
domain: 'root'
type: 'hard'
item: 'nofile'
value: '1048576'
- name: nofile-all-soft
domain: '*'
type: 'soft'
item: 'nofile'
value: '65536'
- name: nofile-all-hard
domain: '*'
type: 'hard'
item: 'nofile'
value: '1048576'
grub:
GRUB_TIMEOUT: 5
GRUB_CMDLINE_LINUX_DEFAULT:
- name: console
value: 'ttyS0,115200n8'
- name: console
value: 'tty0'
- name: amd_iommu
value: 'on'
- name: intel_iommu
value: 'on'
- name: iommu
value: 'pt'
- name: cgroup_disable
value: 'hugetlb'
- name: dpdk-socket-mem
value: '4096,4096'
- name: rcu_nocb_poll
value: 'true'
sysctl:
- name: net.nf_conntrack_max
value: '1048576'
- name: kernel.panic
value: '60'
- name: kernel.pid_max
value: '4194303'
- name: kernel.randomize_va_space
value: '2'
- name: net.ipv4.conf.default.arp_accept
value: '1'
- name: net.ipv4.conf.all.arp_accept
value: '1'
- name: net.core.netdev_max_backlog
value: '261144'
- name: net.ipv4.tcp_keepalive_intvl
value: '3'
- name: net.ipv4.tcp_keepalive_time
value: '30'
- name: net.ipv4.tcp_keepalive_probes
value: '8'
- name: net.ipv4.tcp_retries2
value: '5'
- name: net.ipv4.neigh.default.gc_thresh1
value: '4096'
- name: net.ipv4.neigh.default.gc_thresh2
value: '8192'
- name: net.ipv4.neigh.default.gc_thresh3
value: '16384'
- name: net.ipv4.conf.default.rp_filter
value: '0'
- name: net.ipv6.conf.all.accept_ra
value: '0'
- name: net.ipv6.conf.all.disable_ipv6
value: '1'
- name: net.ipv6.conf.default.accept_ra
value: '0'
- name: net.ipv6.conf.default.disable_ipv6
value: '1'
- name: net.ipv6.conf.lo.accept_ra
value: '0'
- name: net.ipv6.conf.lo.disable_ipv6
value: '0'
- name: net.netfilter.nf_conntrack_acct
value: '1'
- name: fs.suid_dumpable
value: '0'
- name: fs.inotify.max_user_watches
value: '1048576'
- name: fs.protected_hardlinks
value: '1'
- name: fs.protected_symlinks
value: '1'
- name: kernel.sysrq
value: '1'
post_install_package_list:
- kdump-tools
- apparmor
- dbus
- rsyslog
- logrotate

View File

@ -0,0 +1,84 @@
root_chroot: /mnt/rootfs
nbd_build_dir: /tmp/nbd_build_dir
dns: 8.8.8.8
qcow_capacity: 412G
partitions:
# Ubuntu default is for a separate vfat partition for efi
- mount: none
mount_order: 99
part_start: 1MiB
part_end: 5MiB
flags:
- bios_grub
- mount: /boot/efi
mount_order: 2
part_start: 5MiB
part_end: 516MiB
flags:
- esp
filesystem:
type: vfat
fstab:
options: "defaults,errors=remount-ro,noatime"
dump: 0
fsck: 1
- mount: /boot
mount_order: 1
part_start: 516MiB
part_end: 1284MiB
filesystem:
type: ext4
fstab:
options: "defaults,errors=remount-ro,noatime"
dump: 0
fsck: 2
- mount: /
mount_order: 0
part_start: 1284MiB
part_end: 32GiB
filesystem:
type: ext4
fstab:
options: "defaults,errors=remount-ro,noatime"
dump: 0
fsck: 1
- mount: /var
mount_order: 3
part_start: 32GiB
part_end: 232GiB
filesystem:
type: ext4
fstab:
options: "defaults,errors=remount-ro,noatime"
dump: 0
fsck: 1
- mount: /var/crash
mount_order: 4
part_start: 232GiB
part_end: 302GiB
filesystem:
type: ext4
fstab:
options: "defaults,errors=remount-ro,noatime"
dump: 0
fsck: 1
- mount: /var/lib/openstack-helm
mount_order: 5
part_start: 302GiB
part_end: 312GiB
filesystem:
type: ext4
fstab:
options: "defaults,errors=remount-ro,noatime"
dump: 0
fsck: 1
- mount: /var/log
mount_order: 6
part_start: 312GiB
part_end: '100%'
filesystem:
type: ext4
fstab:
options: "defaults,errors=remount-ro,noatime"
dump: 0
fsck: 1

View File

@ -0,0 +1,42 @@
dns: 8.8.8.8
qcow_capacity: 5G
partitions:
# Ubuntu default is for a separate vfat partition for efi
- mount: none
mount_order: 99
part_start: 1MiB
part_end: 5MiB
flags:
- bios_grub
- mount: /boot/efi
mount_order: 2
part_start: 5MiB
part_end: 516MiB
flags:
- esp
filesystem:
type: vfat
fstab:
options: "defaults,errors=remount-ro,noatime"
dump: 0
fsck: 1
- mount: /boot
mount_order: 1
part_start: 516MiB
part_end: 1284MiB
filesystem:
type: ext4
fstab:
options: "defaults,errors=remount-ro,noatime"
dump: 0
fsck: 2
- mount: /
mount_order: 0
part_start: 1284MiB
part_end: '100%'
filesystem:
type: ext4
fstab:
options: "defaults,errors=remount-ro,noatime"
dump: 0
fsck: 1

View File

@ -0,0 +1,15 @@
builder:
# Required for building 'qcow' over the 'iso' default
imageType: qcow
# Optional for imageType = qcow. Specify to override 'qcow' playbook default vars.
qcowVarsFileName: qcow-control-plane-vars.yaml
# Optional for imageType = qcow. Specify to override 'osconfig' playbook default vars.
osconfigVarsFileName: osconfig-control-plane-vars.yaml
# Optional. Default outputMetadataFileName = output-metadata.yaml
outputMetadataFileName: output-metadata.yaml
# Optional. When ommitted, defaults to playbook variable img_name
outputFileName: control-plane.qcow2
container:
volume: /host/path/to/examples:/config
image: port/image-builder:latest-ubuntu_focal
containerRuntime: docker

View File

@ -0,0 +1,26 @@
#cloud-config
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDCAWBkS5iD7ORK59YUjJlPiWnzZXoFPbxlo8kvXjeGVgtUVD/FORZBvztoB9J1xTgE+DEg0dE2DiVrh3WXMWnUUwyaqjIu5Edo++P7xb53T9xRC7TUfc798NLAGk3CD8XvEGbDB7CD6Tvx7HcAco0WpEcPePcTcv89rZGPjal1nY4kGNT/0TWeECm99cXuWFjKm6WiMrir9ZN1yLcX/gjugrHmAGm8kQ/NJVEDRgSPV6jhppp7P/1+yqIUOOOXLx61d8oVG+ADlXEckXoetqHYjbzisxO/wa2KFM7cb5NTVKHFmxwVKX4kJeRL+I/94yLCiG05PidUFsIMzByPBEe/
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9D1m9eMr75japSYMX0Id/af1pyfDM2I1lPSwi2zZwYo8w0b3AyzV3w4iL8PzHCRmxwcm6/w5TfCxEHu7IzTJ4IkN7vIvJEVFPVCJNunuu1ZYahKkFB8g4q6+nsY6rj2ASpQRNrxkUTN2I4GmTRGB3N21uKe1KqbNuaCt5i0KxW0ydcZgAYZFs56qB8ie053VBeMBhhn3LxROKb7g3+NZ6kHkJiOo6p0q7iXiAOh0nvnSGjuSRGllOx/lPe+rdTN+NzuqWSN4sN9WPMjynqSRBMdI0TD7mI2i7uv67s2XpDIORX9dH6IudrLB4Ypz5QX/5Kxyc7Rk16HLSEn42bplj
hostname: airship
password: password
ssh_pwauth: True
ssh_authorized_keys:
- ssh-rsa xyz...
chpasswd:
expire: false
list: |
root:password
ubuntu:password
users:
- default
- name: password
gecos: password
ssh_pwauth: True
ssh_authorized_keys:
- ssh-rsa xyz...
runcmd:
- set -x
- export PATH=$PATH:/usr/sbin:/sbin
- mkdir -p /opt/metal3-dev-env/ironic/html/images /var/lib/ironic-persisted-data-volume

127
image-builder/tools/cut_image.sh Executable file
View File

@ -0,0 +1,127 @@
#!/bin/bash
set -ex
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
BASEDIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )"
# Whether to build an 'iso' or 'qcow'
build_type="${1:-qcow}"
# The host mount to use to exchange data with this container
host_mount_directory="${2:-$BASEDIR/../examples}"
# Docker image to use when launching this container
image="${3:-port/image-builder:latest-ubuntu_focal}"
# Libvirt instance name to use for a new libvirt XML definition that
# will be created to reference the newly created ISO or QCOW2 image.
img_alias="${4:-port-image-builder-latest-ubuntu_focal-$build_type}"
# Whether or not to build the image with UEFI support.
# NOTE: Machines that are not booted with UEFI will be unable to create
# UEFI images.
uefi_boot="$5"
# proxy to use, if applicable
proxy="$6"
# noproxy to use, if applicable
noproxy="$7"
if [ -n "$proxy" ]; then
export http_proxy=$proxy
export https_proxy=$proxy
export HTTP_PROXY=$proxy
export HTTPS_PROXY=$proxy
fi
if [ -n "$noproxy" ]; then
export no_proxy=$noproxy
export NO_PROXY=$noproxy
fi
if [ -n "$uefi_boot" ]; then
uefi_mount='--volume /sys/firmware/efi:/sys/firmware/efi:rw'
fi
workdir="$(realpath ${host_mount_directory})"
if [[ $build_type = iso ]]; then
sudo -E docker run -t --rm \
--volume $workdir:/config \
--env BUILDER_CONFIG=/config/${build_type}.yaml \
--env IMAGE_TYPE="iso" \
--env http_proxy=$proxy \
--env https_proxy=$proxy \
--env HTTP_PROXY=$proxy \
--env HTTPS_PROXY=$proxy \
--env no_proxy=$noproxy \
--env NO_PROXY=$noproxy \
${image}
disk1="--disk path=${workdir}/ephemeral.iso,device=cdrom"
elif [[ $build_type == qcow ]]; then
sudo -E modprobe nbd
sudo -E docker run -t --rm \
--privileged \
--volume /dev:/dev:rw \
--volume /dev/pts:/dev/pts:rw \
--volume /proc:/proc:rw \
--volume /sys:/sys:rw \
--volume /lib/modules:/lib/modules:rw \
--volume $workdir:/config \
${uefi_mount} \
--env BUILDER_CONFIG=/config/${build_type}.yaml \
--env IMAGE_TYPE="qcow" \
--env http_proxy=$proxy \
--env https_proxy=$proxy \
--env HTTP_PROXY=$proxy \
--env HTTPS_PROXY=$proxy \
--env no_proxy=$noproxy \
--env NO_PROXY=$noproxy \
--env uefi_boot=$uefi_boot \
${image}
cloud_init_config_dir='assets/tests/qcow/cloud-init'
sudo -E cloud-localds -v --network-config="${cloud_init_config_dir}/network-config" "${workdir}/airship-ubuntu_config.iso" "${cloud_init_config_dir}/user-data" "${cloud_init_config_dir}/meta-data"
disk1="--disk path=${workdir}/control-plane.qcow2"
disk2="--disk path=${workdir}/airship-ubuntu_config.iso,device=cdrom"
if [ -n "$uefi_boot" ]; then
uefi_boot_arg='--boot uefi'
fi
else
echo Unknown build type: $build_type, exiting.
exit 1
fi
imagePath=$(echo $disk1 | cut -d'=' -f2 | cut -d',' -f1)
echo Image successfully written to $imagePath
sudo -E virsh destroy ${img_alias} 2> /dev/null || true
sudo -E virsh undefine ${img_alias} --nvram 2> /dev/null || true
cpu_type=''
kvm-ok >& /dev/null && cpu_type='--cpu host-passthrough' || true
network='--network network=default,mac=52:54:00:6c:99:85'
if ! sudo -E virsh net-list | grep default | grep active > /dev/null; then
network='--network none'
fi
xml=$(mktemp)
sudo -E virt-install --connect qemu:///system \
--name ${img_alias} \
--memory 1536 \
${network} \
${cpu_type} \
--vcpus 4 \
--import \
${disk1} \
${disk2} \
${virt_type} \
${uefi_boot_arg} \
--noautoconsole \
--graphics vnc,listen=0.0.0.0 \
--print-xml > $xml
virsh define $xml
echo Virsh definition accepted
echo Image artifact located at $imagePath

View File

@ -0,0 +1,10 @@
#!/bin/bash
if [ ! -f /etc/systemd/system/docker.service.d/http-proxy.conf ]; then
sudo mkdir -p /etc/systemd/system/docker.service.d
sudo sh -c 'echo "[Service]" > /etc/systemd/system/docker.service.d/http-proxy.conf'
sudo sh -c "echo \"Environment=\\\"HTTP_PROXY=http://$1/\\\"\" >> /etc/systemd/system/docker.service.d/http-proxy.conf"
sudo sh -c "echo \"Environment=\\\"HTTPS_PROXY=http://$1/\\\"\" >> /etc/systemd/system/docker.service.d/http-proxy.conf"
sudo systemctl daemon-reload
sudo systemctl restart docker
fi

View File

@ -0,0 +1,15 @@
#!/bin/bash
install_pkg(){
dpkg -l $1 >& /dev/null || sudo -E apt-get -y install $1
}
install_pkg qemu-kvm
install_pkg virtinst
install_pkg libvirt-bin
install_pkg cloud-image-utils
install_pkg ovmf
install_pkg efivar
type docker >& /dev/null || install_pkg docker.io
# required for building UEFI image
sudo -E modprobe efivars