Clean up Dockerfiles and how we build them

This commit does several cleanups to the Dockerfiles that we have:

* git is removed from the images after Kuryr packages installation
* jq and wget is removed from kuryr-cni image as those ar no longer used
* explicit setuptools installation is no longer required
* raw Kuryr code is removed from images after it's `pip install`ed
* unnecessary VOLUME line is removed from kuryr-cni Dockerfile
* CNI_CONFIG_DIR and CNI_BIN_DIR build arguments are removed from
  kuryr-cni Dockerfile as they are not used anywhere. Initially we've
  kept them to allow deployer to tell where host's /etc/cni/net.d and
  /opt/cni/bin will be mounted, but one of the refactorings of
  cni_ds_init must have stopped depending on them and we simply started
  to expect the mounts to be in the same paths as on host. We can
  continue to do that.

The build_cni_daemonset_image script was created back in the time when
we have had multi-stage build of the kuryr-cni image. This is no longer
the case and building the image is as easy as:

    `docker build -f cni.Dockerfile .`

Given that this commit removes the script and updates documentation to
recommend using `docker build` directly.

Change-Id: Ib1807344ede11ec6845e5f09c5a87c29a779af03
This commit is contained in:
Michał Dulko 2018-12-14 17:44:34 +01:00
parent fe583c3e6d
commit 8b76509514
9 changed files with 36 additions and 129 deletions

View File

@ -5,27 +5,21 @@ ARG UPPER_CONSTRAINTS_FILE="https://git.openstack.org/cgit/openstack/requirement
ARG OSLO_LOCK_PATH=/var/kuryr-lock
RUN yum install -y epel-release https://rdoproject.org/repos/rdo-release.rpm \
&& yum install -y --setopt=tsflags=nodocs python-pip iproute bridge-utils openvswitch sudo jq \
&& yum install -y --setopt=tsflags=nodocs gcc python-devel git \
&& pip install -U setuptools
&& yum install -y --setopt=tsflags=nodocs python-pip iproute bridge-utils openvswitch sudo \
&& yum install -y --setopt=tsflags=nodocs gcc python-devel git
COPY . /opt/kuryr-kubernetes
RUN cd /opt/kuryr-kubernetes \
&& pip install -c $UPPER_CONSTRAINTS_FILE . \
&& rm -fr .git \
RUN pip install -c $UPPER_CONSTRAINTS_FILE /opt/kuryr-kubernetes \
&& cp /opt/kuryr-kubernetes/cni_ds_init /usr/bin/cni_ds_init \
&& mkdir -p /etc/kuryr-cni \
&& cp /opt/kuryr-kubernetes/etc/cni/net.d/* /etc/kuryr-cni \
&& yum -y history undo last \
&& rm -rf /opt/kuryr-kubernetes \
&& mkdir ${OSLO_LOCK_PATH}
COPY ./cni_ds_init /usr/bin/cni_ds_init
ARG CNI_CONFIG_DIR_PATH=/etc/cni/net.d
ENV CNI_CONFIG_DIR_PATH ${CNI_CONFIG_DIR_PATH}
ARG CNI_BIN_DIR_PATH=/opt/cni/bin
ENV CNI_BIN_DIR_PATH ${CNI_BIN_DIR_PATH}
ARG CNI_DAEMON=True
ENV CNI_DAEMON ${CNI_DAEMON}
ENV OSLO_LOCK_PATH=${OSLO_LOCK_PATH}
VOLUME [ "/sys/fs/cgroup" ]
ENTRYPOINT [ "cni_ds_init" ]

View File

@ -55,7 +55,7 @@ EOF
# Copy the script into the designated location
cp /kuryr-cni "/opt/cni/bin/kuryr-cni"
chmod +x /opt/cni/bin/kuryr-cni
cp /opt/kuryr-kubernetes/etc/cni/net.d/* /etc/cni/net.d
cp /etc/kuryr-cni/* /etc/cni/net.d
}
cleanup

View File

@ -5,26 +5,21 @@ ARG UPPER_CONSTRAINTS_FILE="https://git.openstack.org/cgit/openstack/requirement
ARG OSLO_LOCK_PATH=/var/kuryr-lock
RUN dnf update -y \
&& dnf install -y --setopt=tsflags=nodocs python3-pip iproute bridge-utils openvswitch sudo jq \
&& dnf install -y --setopt=tsflags=nodocs python3-pip iproute bridge-utils openvswitch sudo \
&& dnf install -y --setopt=tsflags=nodocs gcc python3-devel git
COPY . /opt/kuryr-kubernetes
RUN cd /opt/kuryr-kubernetes \
&& pip3 install -c $UPPER_CONSTRAINTS_FILE . \
&& rm -fr .git \
RUN pip3 install -c $UPPER_CONSTRAINTS_FILE /opt/kuryr-kubernetes \
&& cp /opt/kuryr-kubernetes/cni_ds_init /usr/bin/cni_ds_init \
&& mkdir -p /etc/kuryr-cni \
&& cp /opt/kuryr-kubernetes/etc/cni/net.d/* /etc/kuryr-cni \
&& dnf -y history undo last \
&& rm -rf /opt/kuryr-kubernetes \
&& mkdir ${OSLO_LOCK_PATH}
COPY ./cni_ds_init /usr/bin/cni_ds_init
ARG CNI_CONFIG_DIR_PATH=/etc/cni/net.d
ENV CNI_CONFIG_DIR_PATH ${CNI_CONFIG_DIR_PATH}
ARG CNI_BIN_DIR_PATH=/opt/cni/bin
ENV CNI_BIN_DIR_PATH ${CNI_BIN_DIR_PATH}
ARG CNI_DAEMON=True
ENV CNI_DAEMON ${CNI_DAEMON}
ENV OSLO_LOCK_PATH=${OSLO_LOCK_PATH}
VOLUME [ "/sys/fs/cgroup" ]
ENTRYPOINT [ "cni_ds_init" ]

View File

@ -5,22 +5,19 @@ ARG UPPER_CONSTRAINTS_FILE="https://git.openstack.org/cgit/openstack/requirement
RUN yum install -y epel-release \
&& yum install -y --setopt=tsflags=nodocs python-pip \
&& yum install --setopt=tsflags=nodocs --assumeyes inet-tools gcc python-devel wget git \
&& pip install -U setuptools
&& yum install -y --setopt=tsflags=nodocs gcc python-devel git
COPY . /opt/kuryr-kubernetes
RUN cd /opt/kuryr-kubernetes \
&& pip install -c $UPPER_CONSTRAINTS_FILE --no-cache-dir . \
&& rm -fr .git \
RUN pip install -c $UPPER_CONSTRAINTS_FILE --no-cache-dir /opt/kuryr-kubernetes \
&& yum -y history undo last \
&& rm -rf /opt/kuryr-kubernetes \
&& groupadd -r kuryr -g 711 \
&& useradd -u 711 -g kuryr \
-d /opt/kuryr-kubernetes \
-s /sbin/nologin \
-c "Kuryr controller user" \
kuryr \
&& chown kuryr:kuryr /opt/kuryr-kubernetes
kuryr
USER kuryr
CMD ["--config-dir", "/etc/kuryr"]

View File

@ -5,21 +5,19 @@ ARG UPPER_CONSTRAINTS_FILE="https://git.openstack.org/cgit/openstack/requirement
RUN dnf update -y \
&& dnf install -y --setopt=tsflags=nodocs python3-pip \
&& dnf install -y --setopt=tsflags=nodocs gcc python3-devel wget git
&& dnf install -y --setopt=tsflags=nodocs gcc python3-devel git
COPY . /opt/kuryr-kubernetes
RUN cd /opt/kuryr-kubernetes \
&& pip3 install -c $UPPER_CONSTRAINTS_FILE --no-cache-dir . \
&& rm -fr .git \
RUN pip3 install -c $UPPER_CONSTRAINTS_FILE --no-cache-dir /opt/kuryr-kubernetes \
&& dnf -y history undo last \
&& rm -rf /opt/kuryr-kubernetes \
&& groupadd -r kuryr -g 711 \
&& useradd -u 711 -g kuryr \
-d /opt/kuryr-kubernetes \
-s /sbin/nologin \
-c "Kuryr controller user" \
kuryr \
&& chown kuryr:kuryr /opt/kuryr-kubernetes
kuryr
USER kuryr
CMD ["--config-dir", "/etc/kuryr"]

View File

@ -344,45 +344,36 @@ EOF
# the local docker registry as kuryr/controller:latest and
# kuryr/cni:latest respectively
function build_kuryr_containers() {
local cni_buildtool_args
local cni_build_args
local cni_daemon
local build_dir
local use_py3
local controller_dockerfile
local py_suffix
cni_buildtool_args="--bin-dir ${1} --conf-dir ${2}"
cni_daemon=$3
cni_daemon=$1
build_dir="${DEST}/kuryr-kubernetes"
pushd "$build_dir"
use_py3=$(trueorfalse False KURYR_CONTAINERS_USE_PY3)
if [[ "$use_py3" == "True" ]]; then
cni_buildtool_args="${cni_buildtool_args} --dockerfile cni_py3.Dockerfile"
controller_dockerfile="controller_py3.Dockerfile"
else
controller_dockerfile="controller.Dockerfile"
py_suffix="_py3"
fi
if [[ "$cni_daemon" == "False" ]]; then
cni_buildtool_args="${cni_buildtool_args} --no-daemon"
cni_build_args="--build-arg CNI_DAEMON=False"
fi
if [[ "$CONTAINER_ENGINE" == "crio" ]]; then
cni_buildtool_args="${cni_buildtool_args} --podman"
fi
# Build controller image
# Build images
# FIXME(dulek): Until https://github.com/containers/buildah/issues/1206 is
# resolved instead of podman we need to use buildah directly,
# hence this awful if clause.
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
sudo buildah bud -t docker.io/kuryr/controller -f "$controller_dockerfile" .
sudo buildah bud -t docker.io/kuryr/controller -f controller${py_suffix}.Dockerfile .
sudo buildah bud -t docker.io/kuryr/cni -f cni${py_suffix}.Dockerfile .
else
container_runtime build -t kuryr/controller -f "$controller_dockerfile" .
container_runtime build -t kuryr/controller -f controller${py_suffix}.Dockerfile .
container_runtime build -t kuryr/cni -f cni${py_suffix}.Dockerfile ${cni_build_args} .
fi
# Build CNI image
"./tools/build_cni_daemonset_image" $cni_buildtool_args
popd
}

View File

@ -1032,9 +1032,9 @@ if [[ "$1" == "stack" && "$2" == "extra" ]]; then
if is_service_enabled kuryr-kubernetes || [[ ${KURYR_FORCE_IMAGE_BUILD} == "True" ]]; then
if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "True" ]; then
if is_service_enabled kuryr-daemon; then
build_kuryr_containers $CNI_BIN_DIR $CNI_CONF_DIR True
build_kuryr_containers True
else
build_kuryr_containers $CNI_BIN_DIR $CNI_CONF_DIR False
build_kuryr_containers False
fi
fi
fi

View File

@ -13,15 +13,11 @@ For creating controller image on local machine: ::
For creating cni daemonset image on local machine: ::
$ ./tools/build_cni_daemonset_image
You can customize the build by setting some options. In order to list them run: ::
$ ./tools/build_cni_daemonset_image -h
$ docker build -t kuryr/cni -f cni.Dockerfile .
If you want to run kuryr CNI without the daemon, build theimage with: ::
$ ./tools/build_cni_daemonset_image --no-daemon
$ docker build -t kuryr/cni -f cni.Dockerfile --build-arg CNI_DAEMON=False .
Alternatively, you can remove ``imagePullPolicy: Never`` from kuryr-controller
Deployment and kuryr-cni DaemonSet definitions to use pre-built

View File

@ -1,64 +0,0 @@
#!/bin/bash -ex
function print_usage() {
set +ex
echo "$0" "[options]"
if [[ -n "$1" ]]; then
echo "Option $1 not found"
fi
echo "Options -----------------------------"
echo "-h/--help Displays this help message"
echo "-f/--dockerfile Specify the Dockerfile to use for building the CNI container"
echo "-b/--bin-dir Specify the path where to place the CNI executable"
echo "-c/--conf-dir Specify the path where to place the CNI configuration"
echo "-t/--tag Specify string to use as the tag part of the container image name, i.e., kuryr/cni:tag"
echo "-D/--no-daemon Do not run CNI as a daemon"
echo "-p/--podman Use podman instead of docker to build image"
}
for arg in "$@"; do
shift
case "$arg" in
"--help") set -- "$@" "-h" ;;
"--bin-dir") set -- "$@" "-b" ;;
"--conf-dir") set -- "$@" "-c" ;;
"--dockerfile") set -- "$@" "-f" ;;
"--tag") set -- "$@" "-t" ;;
"--no-daemon") set -- "$@" "-D" ;;
"--podman") set -- "$@" "-p" ;;
"--"*) print_usage "$arg" >&2; exit 1 ;;
*) set -- "$@" "$arg"
esac
done
#Default value
dockerfile="cni.Dockerfile"
image_name="kuryr/cni"
daemonized="True"
build_args=()
build_cmd="docker build"
OPTIND=1
while getopts "hf:b:c:t:Dp" opt; do
case "$opt" in
"h") print_usage; exit 0 ;;
"D") daemonized=False ;;
"f") dockerfile=${OPTARG} ;;
"b") build_args+=('--build-arg' "CNI_BIN_DIR_PATH=${OPTARG}") ;;
"c") build_args+=('--build-arg' "CNI_CONFIG_DIR_PATH=${OPTARG}") ;;
# Until https://github.com/containers/buildah/issues/1206 is resolved
# we need to use buildah directly.
"p") build_cmd="sudo buildah bud" && image_name="docker.io/kuryr/cni" ;;
"t") image_name=${image_name}:${OPTARG} ;;
"?") print_usage >&2; exit 1 ;;
esac
done
shift $((OPTIND - 1))
# create cni daemonset image
${build_cmd} -t "$image_name" \
--build-arg "CNI_DAEMON=$daemonized" \
"${build_args[@]}" \
-f "$dockerfile" .