cri-o support

This commit adds support for cri-o by changing the binary initially used
to run CNI plugin to runc and falling back to docker only in case it's
not available.

Also DevStack support for installing and configuring Kubernetes with
cri-o is added.

Implements: blueprint crio-support
Depends-On: Ib049d66058429e499f5d0932c4a749820bec73ff
Depends-On: Ic3c7d355a455298f43e37fb2aceddfd1e7eefaf2
Change-Id: I081edf0dbd4eb57826399c4820376381950080ed
This commit is contained in:
Michał Dulko
2018-11-26 10:52:10 +01:00
parent 7ba64a87da
commit eecd44d335
5 changed files with 156 additions and 73 deletions

View File

@@ -16,20 +16,39 @@ finder="
import json import json
import sys import sys
mode = 'docker' if len(sys.argv) == 1 else sys.argv[1]
if mode == 'docker':
label_key = 'Labels'
id_key = 'Id'
else:
label_key = 'annotations'
id_key = 'id'
containers=json.load(sys.stdin) containers=json.load(sys.stdin)
# Looping over all the containers until we find the right one. We print it.
for container in containers: for container in containers:
if ('Labels' in container and if (label_key in container and
container['Labels'].get('io.kubernetes.pod.name') == '${KURYR_CNI_POD_NAME}' and container[label_key].get('io.kubernetes.pod.name') == '${KURYR_CNI_POD_NAME}' and
container['Labels'].get('io.kubernetes.pod.namespace') == '${POD_NAMESPACE}' and container[label_key].get('io.kubernetes.pod.namespace') == '${POD_NAMESPACE}' and
container['Labels'].get('io.kubernetes.docker.type') == 'container'): container[label_key].get('io.kubernetes.container.name') != 'POD'):
print(container['Id']) print(container[id_key])
break break
" "
# TODO(dulek): We might want to fetch socket path from config.
CONTAINERID=\`curl --unix-socket /var/run/docker.sock http://v1.24/containers/json 2> /dev/null | python -c "\${finder}"\`
envs=(\$(env | grep ^CNI_)) envs=(\$(env | grep ^CNI_))
if command -v runc > /dev/null; then
# We have runc binary, let's see if that works.
CONTAINERID=\`runc list -f json 2> /dev/null | python -c "\${finder}" runc\`
if [[ ! -z \${CONTAINERID} ]]; then
exec runc exec \${envs[@]/#/--env } "\${CONTAINERID}" kuryr-cni --config-file /etc/kuryr/kuryr.conf
fi
fi
# Fall back to using Docker binary.
# TODO(dulek): We might want to fetch socket path from config.
CONTAINERID=\`curl --unix-socket /var/run/docker.sock http://v1.24/containers/json 2> /dev/null | python -c "\${finder}" docker\`
docker exec \${envs[@]/#/--env } -i "\${CONTAINERID}" kuryr-cni --config-file /etc/kuryr/kuryr.conf docker exec \${envs[@]/#/--env } -i "\${CONTAINERID}" kuryr-cni --config-file /etc/kuryr/kuryr.conf
EOF EOF

View File

@@ -117,8 +117,8 @@ function get_container {
fi fi
image="${image_name}:${version}" image="${image_name}:${version}"
if [ -z "$(docker images -q "$image")" ]; then if [ -z "$(container_runtime images -q "$image")" ]; then
docker pull "$image" container_runtime pull "$image"
fi fi
} }
@@ -131,16 +131,17 @@ function run_container {
# Runs a detached container and uses devstack's run process to monitor # Runs a detached container and uses devstack's run process to monitor
# its logs # its logs
local name local name
local docker_bin
docker_bin=$(which docker)
name="$1" name="$1"
shift shift
args="$@" args="$@"
$docker_bin create --name $name $args container_runtime create --name $name $args
run_process "$name" \ if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
"$docker_bin start --attach $name" run_process "$name" "$(which podman) start --attach $name" root root
else
run_process "$name" "$(which docker) start --attach $name"
fi
} }
# stop_container # stop_container
@@ -151,8 +152,8 @@ function stop_container {
local name local name
name="$1" name="$1"
docker kill "$name" container_runtime kill "$name"
docker rm "$name" container_runtime rm "$name"
stop_process "$name" stop_process "$name"
} }
@@ -366,12 +367,22 @@ function build_kuryr_containers() {
cni_buildtool_args="${cni_buildtool_args} --no-daemon" cni_buildtool_args="${cni_buildtool_args} --no-daemon"
fi fi
if [[ "$CONTAINER_ENGINE" == "crio" ]]; then
cni_buildtool_args="${cni_buildtool_args} --podman"
fi
# Build controller image # Build controller image
sudo docker build \ # FIXME(dulek): Until https://github.com/containers/buildah/issues/1206 is
-t kuryr/controller -f "$controller_dockerfile" . # resolved instead of podman we need to use buildah directly,
# hence this awful if clause.
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
sudo buildah bud -t docker.io/kuryr/controller -f "$controller_dockerfile" .
else
container_runtime build -t kuryr/controller -f "$controller_dockerfile" .
fi
# Build CNI image # Build CNI image
sudo "./tools/build_cni_daemonset_image" $cni_buildtool_args "./tools/build_cni_daemonset_image" $cni_buildtool_args
popd popd
} }
@@ -1363,6 +1374,7 @@ function docker_install_ca_certs {
local registry_hostnames local registry_hostnames
local destdir local destdir
# TODO(dulek): Support for CRI-O.
registry_hostnames=(${1//,/ }) registry_hostnames=(${1//,/ })
for hostname in ${registry_hostnames[@]}; do for hostname in ${registry_hostnames[@]}; do
destdir="/etc/docker/certs.d/${hostname}:5000" destdir="/etc/docker/certs.d/${hostname}:5000"
@@ -1393,6 +1405,7 @@ function configure_and_run_registry {
local registry_ip local registry_ip
local hostnames local hostnames
# TODO(dulek): Support for CRI-O.
service_cidr=$(openstack --os-cloud devstack-admin \ service_cidr=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \ --os-region "$REGION_NAME" \
subnet show "$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \ subnet show "$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \

View File

@@ -11,6 +11,14 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
function container_runtime {
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
sudo podman "$@"
else
docker "$@"
fi
}
function create_kuryr_account { function create_kuryr_account {
if is_service_enabled kuryr-kubernetes; then if is_service_enabled kuryr-kubernetes; then
create_service_user "kuryr" "admin" create_service_user "kuryr" "admin"
@@ -551,59 +559,71 @@ function run_k8s_api {
cluster_ip_range="$service_cidr" cluster_ip_range="$service_cidr"
fi fi
run_container kubernetes-api \ local command
--net host \ command=(--net=host
--restart on-failure \ --volume=${KURYR_HYPERKUBE_DATA_DIR}:/srv/kubernetes:rw)
--volume="${KURYR_HYPERKUBE_DATA_DIR}:/srv/kubernetes:rw" \ if [[ ${CONTAINER_ENGINE} == 'docker' ]]; then
"${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}" \ command+=(--restart=on-failure)
/hyperkube apiserver \ fi
--service-cluster-ip-range="${cluster_ip_range}" \ command+=(${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}
--insecure-bind-address=0.0.0.0 \ /hyperkube apiserver
--insecure-port="${KURYR_K8S_API_PORT}" \ --service-cluster-ip-range=${cluster_ip_range}
--etcd-servers="http://${SERVICE_HOST}:${ETCD_PORT}" \ --insecure-bind-address=0.0.0.0
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota \ --insecure-port=${KURYR_K8S_API_PORT}
--client-ca-file=/srv/kubernetes/ca.crt \ --etcd-servers=http://${SERVICE_HOST}:${ETCD_PORT}
--basic-auth-file=/srv/kubernetes/basic_auth.csv \ --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
--min-request-timeout=300 \ --client-ca-file=/srv/kubernetes/ca.crt
--tls-cert-file=/srv/kubernetes/server.cert \ --basic-auth-file=/srv/kubernetes/basic_auth.csv
--tls-private-key-file=/srv/kubernetes/server.key \ --min-request-timeout=300
--token-auth-file=/srv/kubernetes/known_tokens.csv \ --tls-cert-file=/srv/kubernetes/server.cert
--allow-privileged=true \ --tls-private-key-file=/srv/kubernetes/server.key
--v=2 \ --token-auth-file=/srv/kubernetes/known_tokens.csv
--logtostderr=true --allow-privileged=true
--v=2
--logtostderr=true)
run_container kubernetes-api "${command[@]}"
} }
function run_k8s_controller_manager { function run_k8s_controller_manager {
# Runs Hyperkube's Kubernetes controller manager # Runs Hyperkube's Kubernetes controller manager
wait_for "Kubernetes API Server" "$KURYR_K8S_API_URL" wait_for "Kubernetes API Server" "$KURYR_K8S_API_URL"
run_container kubernetes-controller-manager \ local command
--net host \ command=(--net=host
--volume="${KURYR_HYPERKUBE_DATA_DIR}:/srv/kubernetes:rw" \ --volume=${KURYR_HYPERKUBE_DATA_DIR}:/srv/kubernetes:rw)
--restart on-failure \ if [[ ${CONTAINER_ENGINE} == 'docker' ]]; then
"${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}" \ command+=(--restart=on-failure)
/hyperkube controller-manager \ fi
--master="$KURYR_K8S_API_URL" \ command+=(${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}
--service-account-private-key-file=/srv/kubernetes/server.key \ /hyperkube controller-manager
--root-ca-file=/srv/kubernetes/ca.crt \ --master=$KURYR_K8S_API_URL
--min-resync-period=3m \ --service-account-private-key-file=/srv/kubernetes/server.key
--v=2 \ --root-ca-file=/srv/kubernetes/ca.crt
--logtostderr=true --min-resync-period=3m
--v=2
--logtostderr=true)
run_container kubernetes-controller-manager "${command[@]}"
} }
function run_k8s_scheduler { function run_k8s_scheduler {
# Runs Hyperkube's Kubernetes scheduler # Runs Hyperkube's Kubernetes scheduler
wait_for "Kubernetes API Server" "$KURYR_K8S_API_URL" wait_for "Kubernetes API Server" "$KURYR_K8S_API_URL"
run_container kubernetes-scheduler \ local command
--net host \ command=(--net=host
--volume="${KURYR_HYPERKUBE_DATA_DIR}:/srv/kubernetes:rw" \ --volume=${KURYR_HYPERKUBE_DATA_DIR}:/srv/kubernetes:rw)
--restart on-failure \ if [[ ${CONTAINER_ENGINE} == 'docker' ]]; then
"${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}" \ command+=(--restart=on-failure)
/hyperkube scheduler \ fi
--master="$KURYR_K8S_API_URL" \ command+=(${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}
--v=2 \ /hyperkube scheduler
--logtostderr=true --master=$KURYR_K8S_API_URL
--v=2
--logtostderr=true)
run_container kubernetes-scheduler "${command[@]}"
} }
function prepare_kubeconfig { function prepare_kubeconfig {
@@ -622,16 +642,24 @@ function extract_hyperkube {
tmp_loopback_cni_path="/tmp/loopback" tmp_loopback_cni_path="/tmp/loopback"
tmp_nsenter_path="/tmp/nsenter" tmp_nsenter_path="/tmp/nsenter"
hyperkube_container=$(docker run -d \ hyperkube_container=$(container_runtime run -d \
--net host \ --net host \
"${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}" \ "${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}" \
/bin/false) /bin/false)
docker cp "${hyperkube_container}:/hyperkube" "$tmp_hyperkube_path" if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
docker cp "${hyperkube_container}:/opt/cni/bin/loopback" \ mnt=`container_runtime mount "${hyperkube_container}"`
"$tmp_loopback_cni_path" sudo cp "${mnt}/hyperkube" "$tmp_hyperkube_path"
docker cp "${hyperkube_container}:/usr/bin/nsenter" "$tmp_nsenter_path" sudo cp "${mnt}/opt/cni/bin/loopback" "$tmp_loopback_cni_path"
sudo cp "${mnt}/usr/bin/nsenter" "$tmp_nsenter_path"
container_runtime umount ${hyperkube_container}
else
container_runtime cp "${hyperkube_container}:/hyperkube" "$tmp_hyperkube_path"
container_runtime cp "${hyperkube_container}:/opt/cni/bin/loopback" \
"$tmp_loopback_cni_path"
container_runtime cp "${hyperkube_container}:/usr/bin/nsenter" "$tmp_nsenter_path"
fi
docker rm --force "$hyperkube_container" container_runtime rm --force "$hyperkube_container"
sudo install -o "$STACK_USER" -m 0555 -D "$tmp_hyperkube_path" \ sudo install -o "$STACK_USER" -m 0555 -D "$tmp_hyperkube_path" \
"$KURYR_HYPERKUBE_BINARY" "$KURYR_HYPERKUBE_BINARY"
sudo install -o "$STACK_USER" -m 0555 -D "$tmp_loopback_cni_path" \ sudo install -o "$STACK_USER" -m 0555 -D "$tmp_loopback_cni_path" \
@@ -663,16 +691,12 @@ function run_k8s_kubelet {
# adding Python and all our CNI/binding dependencies. # adding Python and all our CNI/binding dependencies.
local command local command
local minor_version local minor_version
local cgroup_driver
cgroup_driver="$(docker info|awk '/Cgroup/ {print $NF}')"
sudo mkdir -p "${KURYR_HYPERKUBE_DATA_DIR}/"{kubelet,kubelet.cert} sudo mkdir -p "${KURYR_HYPERKUBE_DATA_DIR}/"{kubelet,kubelet.cert}
command="$KURYR_HYPERKUBE_BINARY kubelet\ command="$KURYR_HYPERKUBE_BINARY kubelet\
--kubeconfig=${HOME}/.kube/config \ --kubeconfig=${HOME}/.kube/config \
--allow-privileged=true \ --allow-privileged=true \
--v=2 \ --v=2 \
--cgroup-driver=$cgroup_driver \
--address=0.0.0.0 \ --address=0.0.0.0 \
--enable-server \ --enable-server \
--network-plugin=cni \ --network-plugin=cni \
@@ -681,6 +705,22 @@ function run_k8s_kubelet {
--cert-dir=${KURYR_HYPERKUBE_DATA_DIR}/kubelet.cert \ --cert-dir=${KURYR_HYPERKUBE_DATA_DIR}/kubelet.cert \
--root-dir=${KURYR_HYPERKUBE_DATA_DIR}/kubelet" --root-dir=${KURYR_HYPERKUBE_DATA_DIR}/kubelet"
if [[ ${CONTAINER_ENGINE} == 'docker' ]]; then
command+=" --cgroup-driver $(docker info|awk '/Cgroup/ {print $NF}')"
elif [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
local crio_conf
crio_conf=/etc/crio/crio.conf
command+=" --cgroup-driver=$(iniget ${crio_conf} crio.runtime cgroup_manager)"
command+=" --container-runtime=remote --container-runtime-endpoint=unix:///var/run/crio/crio.sock --runtime-request-timeout=10m"
# We need to reconfigure CRI-O in this case as well.
# FIXME(dulek): This should probably go to devstack-plugin-container
iniset -sudo ${crio_conf} crio.network network_dir \"${CNI_CONF_DIR}\"
iniset -sudo ${crio_conf} crio.network plugin_dir \"${CNI_BIN_DIR}\"
sudo systemctl --no-block restart crio.service
fi
declare -r min_not_require_kubeconfig_ver="1.10.0" declare -r min_not_require_kubeconfig_ver="1.10.0"
if [[ "$KURYR_HYPERKUBE_VERSION" == "$(echo -e "${KURYR_HYPERKUBE_VERSION}\n${min_not_require_kubeconfig_ver}" | sort -V | head -n 1)" ]]; then if [[ "$KURYR_HYPERKUBE_VERSION" == "$(echo -e "${KURYR_HYPERKUBE_VERSION}\n${min_not_require_kubeconfig_ver}" | sort -V | head -n 1)" ]]; then
# Version 1.10 did away with that config option # Version 1.10 did away with that config option

View File

@@ -0,0 +1,5 @@
---
features:
- |
Added support for using cri-o (and podman & buildah) as container engine in
both container images and DevStack.

View File

@@ -13,6 +13,7 @@ function print_usage() {
echo "-c/--conf-dir Specify the path where to place the CNI configuration" echo "-c/--conf-dir Specify the path where to place the CNI configuration"
echo "-t/--tag Specify string to use as the tag part of the container image name, i.e., kuryr/cni:tag" echo "-t/--tag Specify string to use as the tag part of the container image name, i.e., kuryr/cni:tag"
echo "-D/--no-daemon Do not run CNI as a daemon" echo "-D/--no-daemon Do not run CNI as a daemon"
echo "-p/--podman Use podman instead of docker to build image"
} }
for arg in "$@"; do for arg in "$@"; do
@@ -24,6 +25,7 @@ for arg in "$@"; do
"--dockerfile") set -- "$@" "-f" ;; "--dockerfile") set -- "$@" "-f" ;;
"--tag") set -- "$@" "-t" ;; "--tag") set -- "$@" "-t" ;;
"--no-daemon") set -- "$@" "-D" ;; "--no-daemon") set -- "$@" "-D" ;;
"--podman") set -- "$@" "-p" ;;
"--"*) print_usage "$arg" >&2; exit 1 ;; "--"*) print_usage "$arg" >&2; exit 1 ;;
*) set -- "$@" "$arg" *) set -- "$@" "$arg"
esac esac
@@ -34,16 +36,20 @@ dockerfile="cni.Dockerfile"
image_name="kuryr/cni" image_name="kuryr/cni"
daemonized="True" daemonized="True"
build_args=() build_args=()
build_cmd="docker build"
OPTIND=1 OPTIND=1
while getopts "hf:b:c:t:D" opt; do while getopts "hf:b:c:t:D:p" opt; do
case "$opt" in case "$opt" in
"h") print_usage; exit 0 ;; "h") print_usage; exit 0 ;;
"D") daemonized=False ;; "D") daemonized=False ;;
"f") dockerfile=${OPTARG} ;; "f") dockerfile=${OPTARG} ;;
"b") build_args+=('--build-arg' "CNI_BIN_DIR_PATH=${OPTARG}") ;; "b") build_args+=('--build-arg' "CNI_BIN_DIR_PATH=${OPTARG}") ;;
"c") build_args+=('--build-arg' "CNI_CONFIG_DIR_PATH=${OPTARG}") ;; "c") build_args+=('--build-arg' "CNI_CONFIG_DIR_PATH=${OPTARG}") ;;
# Until https://github.com/containers/buildah/issues/1206 is resolved
# we need to use buildah directly.
"p") build_cmd="sudo buildah bud" && image_name="docker.io/kuryr/cni" ;;
"t") image_name=${image_name}:${OPTARG} ;; "t") image_name=${image_name}:${OPTARG} ;;
"?") print_usage >&2; exit 1 ;; "?") print_usage >&2; exit 1 ;;
esac esac
@@ -52,7 +58,7 @@ done
shift $((OPTIND - 1)) shift $((OPTIND - 1))
# create cni daemonset image # create cni daemonset image
docker build -t "$image_name" \ ${build_cmd} -t "$image_name" \
--build-arg "CNI_DAEMON=$daemonized" \ --build-arg "CNI_DAEMON=$daemonized" \
"${build_args[@]}" \ "${build_args[@]}" \
-f "$dockerfile" . -f "$dockerfile" .