diff --git a/magnum/api/controllers/v1/cluster_actions.py b/magnum/api/controllers/v1/cluster_actions.py index e4fd7aa5ee..e08266242b 100644 --- a/magnum/api/controllers/v1/cluster_actions.py +++ b/magnum/api/controllers/v1/cluster_actions.py @@ -151,6 +151,7 @@ class ActionsController(base.Controller): else: nodegroup = objects.NodeGroup.get( context, cluster.uuid, cluster_upgrade_req.nodegroup) + pecan.request.rpcapi.cluster_upgrade( cluster, new_cluster_template, diff --git a/magnum/drivers/common/templates/fragments/configure-docker-registry.sh b/magnum/drivers/common/templates/fragments/configure-docker-registry.sh index f3cfcfa5b7..f817368a92 100644 --- a/magnum/drivers/common/templates/fragments/configure-docker-registry.sh +++ b/magnum/drivers/common/templates/fragments/configure-docker-registry.sh @@ -2,11 +2,9 @@ . /etc/sysconfig/heat-params -if [ "$REGISTRY_ENABLED" = "False" ]; then - exit 0 -fi - -cat > /etc/sysconfig/registry-config.yml << EOF +if [ "$REGISTRY_ENABLED" = "True" ]; then + ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" + $ssh_cmd cat > /etc/sysconfig/registry-config.yml << EOF version: 0.1 log: fields: @@ -28,7 +26,7 @@ http: addr: :5000 EOF -cat > /etc/systemd/system/registry.service << EOF + $ssh_cmd cat > /etc/systemd/system/registry.service << EOF [Unit] Description=Docker registry v2 Requires=docker.service @@ -43,3 +41,5 @@ ExecStop=/usr/bin/docker rm -f registry [Install] WantedBy=multi-user.target EOF + +fi \ No newline at end of file diff --git a/magnum/drivers/common/templates/fragments/configure-docker-storage.sh b/magnum/drivers/common/templates/fragments/configure-docker-storage.sh index 87b803825b..b3a4df41c0 100644 --- a/magnum/drivers/common/templates/fragments/configure-docker-storage.sh +++ b/magnum/drivers/common/templates/fragments/configure-docker-storage.sh @@ -2,22 +2,24 @@ . /etc/sysconfig/heat-params +ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" + if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then if [ "$ENABLE_CINDER" == "False" ]; then # FIXME(yuanying): Use ephemeral disk for docker storage # Currently Ironic doesn't support cinder volumes, # so we must use preserved ephemeral disk instead of a cinder volume. - device_path=$(readlink -f /dev/disk/by-label/ephemeral0) + device_path=$($ssh_cmd readlink -f /dev/disk/by-label/ephemeral0) else attempts=60 while [ ${attempts} -gt 0 ]; do - device_name=$(ls /dev/disk/by-id | grep ${DOCKER_VOLUME:0:20}$) + device_name=$($ssh_cmd ls /dev/disk/by-id | grep ${DOCKER_VOLUME:0:20}$) if [ -n "${device_name}" ]; then break fi echo "waiting for disk device" sleep 0.5 - udevadm trigger + $ssh_cmd udevadm trigger let attempts-- done diff --git a/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh b/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh index db46b94df8..ee1aaba5fd 100644 --- a/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh +++ b/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh @@ -4,12 +4,14 @@ # * Remove any existing docker-storage configuration. In case of an # existing configuration, docker-storage-setup will fail. # * Remove docker storage graph +ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" + clear_docker_storage () { # stop docker - systemctl stop docker - systemctl disable docker-storage-setup + $ssh_cmd systemctl stop docker + $ssh_cmd systemctl disable docker-storage-setup # clear storage graph - rm -rf /var/lib/docker/* + $ssh_cmd rm -rf /var/lib/docker/* if [ -f /etc/sysconfig/docker-storage ]; then sed -i "/^DOCKER_STORAGE_OPTIONS=/ s/=.*/=/" /etc/sysconfig/docker-storage @@ -21,9 +23,9 @@ configure_storage_driver_generic() { clear_docker_storage if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then - mkfs.xfs -f ${device_path} + $ssh_cmd mkfs.xfs -f ${device_path} echo "${device_path} /var/lib/docker xfs defaults 0 0" >> /etc/fstab - mount -a + $ssh_cmd mount -a fi echo "DOCKER_STORAGE_OPTIONS=\"--storage-driver $1\"" > /etc/sysconfig/docker-storage @@ -38,8 +40,8 @@ configure_devicemapper () { if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then - pvcreate -f ${device_path} - vgcreate docker ${device_path} + $ssh_cmd pvcreate -f ${device_path} + $ssh_cmd vgcreate docker ${device_path} echo "VG=docker" >> /etc/sysconfig/docker-storage-setup else @@ -47,5 +49,5 @@ configure_devicemapper () { echo "DATA_SIZE=95%FREE" >> /etc/sysconfig/docker-storage-setup fi - docker-storage-setup + $ssh_cmd docker-storage-setup } diff --git a/magnum/drivers/common/templates/fragments/enable-docker-registry.sh b/magnum/drivers/common/templates/fragments/enable-docker-registry.sh index abc3c473a3..1eb8001b74 100644 --- a/magnum/drivers/common/templates/fragments/enable-docker-registry.sh +++ b/magnum/drivers/common/templates/fragments/enable-docker-registry.sh @@ -6,7 +6,9 @@ if [ "$REGISTRY_ENABLED" = "False" ]; then exit 0 fi +ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" + echo "starting docker registry ..." -systemctl daemon-reload -systemctl enable registry -systemctl --no-block start registry +$ssh_cmd systemctl daemon-reload +$ssh_cmd systemctl enable registry +$ssh_cmd systemctl --no-block start registry diff --git a/magnum/drivers/common/templates/kubernetes/fragments/add-proxy.sh b/magnum/drivers/common/templates/kubernetes/fragments/add-proxy.sh index a5198e44dd..06ba69f3c6 100644 --- a/magnum/drivers/common/templates/kubernetes/fragments/add-proxy.sh +++ b/magnum/drivers/common/templates/kubernetes/fragments/add-proxy.sh @@ -1,6 +1,10 @@ #!/bin/sh +set +x . /etc/sysconfig/heat-params +set -x + +ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" DOCKER_HTTP_PROXY_CONF=/etc/systemd/system/docker.service.d/http_proxy.conf @@ -60,6 +64,6 @@ EOF fi if [ "$DOCKER_RESTART" -eq 1 ]; then - systemctl daemon-reload - systemctl --no-block restart docker.service + $ssh_cmd systemctl daemon-reload + $ssh_cmd systemctl --no-block restart docker.service fi diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh index ca485cab48..014b33b537 100644 --- a/magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh +++ b/magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh @@ -4,6 +4,8 @@ set -x +ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" + if [ ! -z "$HTTP_PROXY" ]; then export HTTP_PROXY fi @@ -20,13 +22,13 @@ if [ -n "$ETCD_VOLUME_SIZE" ] && [ "$ETCD_VOLUME_SIZE" -gt 0 ]; then attempts=60 while [ ${attempts} -gt 0 ]; do - device_name=$(ls /dev/disk/by-id | grep ${ETCD_VOLUME:0:20}$) + device_name=$($ssh_cmd ls /dev/disk/by-id | grep ${ETCD_VOLUME:0:20}$) if [ -n "${device_name}" ]; then break fi echo "waiting for disk device" sleep 0.5 - udevadm trigger + $ssh_cmd udevadm trigger let attempts-- done @@ -36,20 +38,20 @@ if [ -n "$ETCD_VOLUME_SIZE" ] && [ "$ETCD_VOLUME_SIZE" -gt 0 ]; then fi device_path=/dev/disk/by-id/${device_name} - fstype=$(blkid -s TYPE -o value ${device_path}) + fstype=$($ssh_cmd blkid -s TYPE -o value ${device_path} || echo "") if [ "${fstype}" != "xfs" ]; then - mkfs.xfs -f ${device_path} + $ssh_cmd mkfs.xfs -f ${device_path} fi - mkdir -p /var/lib/etcd + $ssh_cmd mkdir -p /var/lib/etcd echo "${device_path} /var/lib/etcd xfs defaults 0 0" >> /etc/fstab - mount -a - chown -R etcd.etcd /var/lib/etcd - chmod 755 /var/lib/etcd + $ssh_cmd mount -a + $ssh_cmd chown -R etcd.etcd /var/lib/etcd + $ssh_cmd chmod 755 /var/lib/etcd fi _prefix=${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/} -atomic install \ +$ssh_cmd atomic install \ --system-package no \ --system \ --storage ostree \ diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh index ea5cc8bb79..ae0b11fd19 100644 --- a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh +++ b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh @@ -4,6 +4,8 @@ echo "configuring kubernetes (master)" +ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" + if [ ! -z "$HTTP_PROXY" ]; then export HTTP_PROXY fi @@ -18,7 +20,7 @@ fi _prefix=${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/} -mkdir -p /opt/cni +$ssh_cmd mkdir -p /opt/cni _addtl_mounts=',{"type":"bind","source":"/opt/cni","destination":"/opt/cni","options":["bind","rw","slave","mode=777"]}' if [ "$NETWORK_DRIVER" = "calico" ]; then @@ -36,11 +38,19 @@ EOF fi fi -atomic install --storage ostree --system --set=ADDTL_MOUNTS=${_addtl_mounts} --system-package=no --name=kubelet ${_prefix}kubernetes-kubelet:${KUBE_TAG} + +mkdir -p /srv/magnum/kubernetes/ +cat > /srv/magnum/kubernetes/install-kubernetes.sh <> ${KUBELET_KUBECONFIG} apiVersion: v1 clusters: @@ -241,9 +251,9 @@ KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-fil # specified cgroup driver KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=${CGROUP_DRIVER}" -systemctl disable docker -if cat /usr/lib/systemd/system/docker.service | grep 'native.cgroupdriver'; then - cp /usr/lib/systemd/system/docker.service /etc/systemd/system/ +$ssh_cmd systemctl disable docker +if $ssh_cmd cat /usr/lib/systemd/system/docker.service | grep 'native.cgroupdriver'; then + $ssh_cmd cp /usr/lib/systemd/system/docker.service /etc/systemd/system/ sed -i "s/\(native.cgroupdriver=\)\w\+/\1$CGROUP_DRIVER/" \ /etc/systemd/system/docker.service else @@ -253,8 +263,8 @@ EOF fi -systemctl daemon-reload -systemctl enable docker +$ssh_cmd systemctl daemon-reload +$ssh_cmd systemctl enable docker if [ -z "${KUBE_NODE_IP}" ]; then KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) @@ -267,4 +277,3 @@ sed -i ' /^KUBELET_HOSTNAME=/ s/=.*/=""/ /^KUBELET_ARGS=/ s|=.*|="'"\$(/etc/kubernetes/get_require_kubeconfig.sh) ${KUBELET_ARGS}"'"| ' /etc/kubernetes/kubelet - diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh index 33652090fc..6cc86a8d34 100644 --- a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh +++ b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh @@ -1,6 +1,10 @@ -#!/bin/sh -x +#!/bin/sh +set +x . /etc/sysconfig/heat-params +set -x + +ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" echo "configuring kubernetes (minion)" @@ -19,11 +23,11 @@ fi _prefix=${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/} _addtl_mounts='' -mkdir -p /opt/cni +$ssh_cmd mkdir -p /opt/cni _addtl_mounts=',{"type":"bind","source":"/opt/cni","destination":"/opt/cni","options":["bind","rw","slave","mode=777"]}' if [ "$NETWORK_DRIVER" = "calico" ]; then - if [ "`systemctl status NetworkManager.service | grep -o "Active: active"`" = "Active: active" ]; then + if [ "$($ssh_cmd systemctl status NetworkManager.service | grep -o "Active: active")" = "Active: active" ]; then CALICO_NM=/etc/NetworkManager/conf.d/calico.conf [ -f ${CALICO_NM} ] || { echo "Writing File: $CALICO_NM" @@ -33,22 +37,26 @@ if [ "$NETWORK_DRIVER" = "calico" ]; then unmanaged-devices=interface-name:cali*;interface-name:tunl* EOF } - systemctl restart NetworkManager + $ssh_cmd systemctl restart NetworkManager fi fi -atomic install --storage ostree --system --system-package=no --set=ADDTL_MOUNTS=${_addtl_mounts} --name=kubelet ${_prefix}kubernetes-kubelet:${KUBE_TAG} +mkdir -p /srv/magnum/kubernetes/ +cat > /srv/magnum/kubernetes/install-kubernetes.sh <> ${KUBELET_KUBECONFIG} apiVersion: v1 clusters: @@ -110,8 +118,8 @@ if [ "$TLS_DISABLED" = "True" ]; then sed -i 's/^.*certificate-authority.*$//' ${KUBELET_KUBECONFIG} fi -chmod 0644 ${KUBELET_KUBECONFIG} -chmod 0644 ${PROXY_KUBECONFIG} +chmod 0640 ${KUBELET_KUBECONFIG} +chmod 0640 ${PROXY_KUBECONFIG} sed -i ' /^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/ @@ -136,11 +144,6 @@ if [ "$(echo "${CLOUD_PROVIDER_ENABLED}" | tr '[:upper:]' '[:lower:]')" = "true" KUBELET_ARGS="${KUBELET_ARGS} --cloud-provider=external" fi -# Workaround for Cinder support (fixed in k8s >= 1.6) -if [ ! -f /usr/bin/udevadm ]; then - ln -s /sbin/udevadm /usr/bin/udevadm -fi - # For using default log-driver, other options should be ignored sed -i 's/\-\-log\-driver\=journald//g' /etc/sysconfig/docker @@ -158,9 +161,9 @@ if [ "$(echo $AUTO_HEALING_ENABLED | tr '[:upper:]' '[:lower:]')" = "true" ]; th KUBELET_ARGS="${KUBELET_ARGS} --node-labels=draino-enabled=true" fi -systemctl disable docker -if cat /usr/lib/systemd/system/docker.service | grep 'native.cgroupdriver'; then - cp /usr/lib/systemd/system/docker.service /etc/systemd/system/ +$ssh_cmd systemctl disable docker +if $ssh_cmd cat /usr/lib/systemd/system/docker.service | grep 'native.cgroupdriver'; then + $ssh_cmd "cp /usr/lib/systemd/system/docker.service /etc/systemd/system/" sed -i "s/\(native.cgroupdriver=\)\w\+/\1$CGROUP_DRIVER/" \ /etc/systemd/system/docker.service else @@ -170,8 +173,8 @@ EOF fi -systemctl daemon-reload -systemctl enable docker +$ssh_cmd systemctl daemon-reload +$ssh_cmd systemctl enable docker cat > /etc/kubernetes/get_require_kubeconfig.sh <> /etc/environment < $CA_CERT # Generate client's private key and csr - openssl genrsa -out "${_KEY}" 4096 + $ssh_cmd openssl genrsa -out "${_KEY}" 4096 chmod 400 "${_KEY}" - openssl req -new -days 1000 \ + $ssh_cmd openssl req -new -days 1000 \ -key "${_KEY}" \ -out "${_CSR}" \ -reqexts req_ext \ @@ -96,8 +101,8 @@ EOF } #Kubelet Certs -INSTANCE_NAME=$(hostname --short | sed 's/\.novalocal//') -HOSTNAME=$(hostname) +INSTANCE_NAME=$(cat /etc/hostname | head -1 | sed 's/\.novalocal//') +HOSTNAME=$(cat /etc/hostname | head -1) cat > ${cert_dir}/kubelet.conf < ${CA_CERT} # Generate server's private key and csr - openssl genrsa -out "${_KEY}" 4096 + $ssh_cmd openssl genrsa -out "${_KEY}" 4096 chmod 400 "${_KEY}" - openssl req -new -days 1000 \ + $ssh_cmd openssl req -new -days 1000 \ -key "${_KEY}" \ -out "${_CSR}" \ -reqexts req_ext \ @@ -147,7 +149,7 @@ extendedKeyUsage = clientAuth,serverAuth EOF #Kubelet Certs -INSTANCE_NAME=$(hostname --short | sed 's/\.novalocal//') +INSTANCE_NAME=$(cat /etc/hostname | head -1 | sed 's/\.novalocal//') cat > ${cert_dir}/kubelet.conf < ${cert_dir}/service_account_priv # Common certs and key are created for both etcd and kubernetes services. # Both etcd and kube user should have permission to access the certs and key. -groupadd kube_etcd -usermod -a -G kube_etcd etcd -usermod -a -G kube_etcd kube -chmod 550 "${cert_dir}" -chown -R kube:kube_etcd "${cert_dir}" -chmod 440 $cert_dir/server.key -mkdir -p /etc/etcd/certs -cp ${cert_dir}/* /etc/etcd/certs +$ssh_cmd groupadd kube_etcd +$ssh_cmd usermod -a -G kube_etcd etcd +$ssh_cmd usermod -a -G kube_etcd kube +$ssh_cmd chmod 550 "${cert_dir}" +$ssh_cmd chown -R kube:kube_etcd "${cert_dir}" +$ssh_cmd chmod 440 "$cert_dir/server.key" +$ssh_cmd mkdir -p /etc/etcd/certs +$ssh_cmd cp ${cert_dir}/* /etc/etcd/certs diff --git a/magnum/drivers/common/templates/kubernetes/fragments/start-container-agent.sh b/magnum/drivers/common/templates/kubernetes/fragments/start-container-agent.sh index 5598305823..342fe96b59 100644 --- a/magnum/drivers/common/templates/kubernetes/fragments/start-container-agent.sh +++ b/magnum/drivers/common/templates/kubernetes/fragments/start-container-agent.sh @@ -1,18 +1,23 @@ #!/bin/bash -. /etc/sysconfig/heat-params +set -x +set +u +HTTP_PROXY="$HTTP_PROXY" +HTTPS_PROXY="$HTTPS_PROXY" +NO_PROXY="$NO_PROXY" +CONTAINER_INFRA_PREFIX="$CONTAINER_INFRA_PREFIX" +HEAT_CONTAINER_AGENT_TAG="$HEAT_CONTAINER_AGENT_TAG" -set -uxe -if [ ! -z "$HTTP_PROXY" ]; then +if [ -n "${HTTP_PROXY}" ]; then export HTTP_PROXY fi -if [ ! -z "$HTTPS_PROXY" ]; then +if [ -n "${HTTPS_PROXY}" ]; then export HTTPS_PROXY fi -if [ ! -z "$NO_PROXY" ]; then +if [ -n "${NO_PROXY}" ]; then export NO_PROXY fi @@ -21,7 +26,7 @@ fi # in host mount namespace and apply configuration. mkdir -p /srv/magnum/.ssh chmod 700 /srv/magnum/.ssh -ssh-keygen -t rsa -N '' -f /srv/magnum/.ssh/heat_agent_rsa +ssh-keygen -q -t rsa -N '' -f /srv/magnum/.ssh/heat_agent_rsa chmod 400 /srv/magnum/.ssh/heat_agent_rsa chmod 400 /srv/magnum/.ssh/heat_agent_rsa.pub # Add the public to the host authorized_keys file. @@ -41,13 +46,13 @@ sed -i '/^PermitRootLogin/ s/ .*/ without-password/' /etc/ssh/sshd_config systemctl restart sshd -_prefix=${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/} +_prefix="${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/}" atomic install \ --storage ostree \ --system \ --system-package no \ --set REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt \ --name heat-container-agent \ -${_prefix}heat-container-agent:${HEAT_CONTAINER_AGENT_TAG} +"${_prefix}heat-container-agent:${HEAT_CONTAINER_AGENT_TAG}" systemctl start heat-container-agent diff --git a/magnum/drivers/common/templates/kubernetes/fragments/upgrade-kubernetes.sh b/magnum/drivers/common/templates/kubernetes/fragments/upgrade-kubernetes.sh new file mode 100644 index 0000000000..18fd0a9aa9 --- /dev/null +++ b/magnum/drivers/common/templates/kubernetes/fragments/upgrade-kubernetes.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +. /etc/sysconfig/heat-params +set -x + +ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" +kubecontrol="/var/lib/containers/atomic/heat-container-agent.0/rootfs/usr/bin/kubectl --kubeconfig /etc/kubernetes/kubelet-config.yaml" +new_kube_tag="$kube_tag_input" + +if [ ${new_kube_tag}!=${KUBE_TAG} ]; then + HOSTNAME_OVERRIDE="$(cat /etc/hostname | head -1 | sed 's/\.novalocal//')" + # If there is only one master and this is the master node, skip the drain, just cordon it + # If there is only one worker and this is the worker node, skip the drain, just cordon it + all_masters=$(${ssh_cmd} ${kubecontrol} get nodes --selector=node-role.kubernetes.io/master= -o name) + all_workers=$(${ssh_cmd} ${kubecontrol} get nodes --selector=node-role.kubernetes.io/master!= -o name) + if [ "node/${HOSTNAME_OVERRIDE}" != "${all_masters}" ] && [ "node/${HOSTNAME_OVERRIDE}" != "${all_workers}" ]; then + ${ssh_cmd} ${kubecontrol} drain ${HOSTNAME_OVERRIDE} --ignore-daemonsets --delete-local-data --force + else + ${ssh_cmd} ${kubecontrol} cordon ${HOSTNAME_OVERRIDE} + fi + + declare -A service_image_mapping + service_image_mapping=( ["kubelet"]="kubernetes-kubelet" ["kube-controller-manager"]="kubernetes-controller-manager" ["kube-scheduler"]="kubernetes-scheduler" ["kube-proxy"]="kubernetes-proxy" ["kube-apiserver"]="kubernetes-apiserver" ) + + SERVICE_LIST=$($ssh_cmd atomic containers list -f container=kube -q --no-trunc) + + for service in ${SERVICE_LIST}; do + ${ssh_cmd} systemctl stop ${service} + done + + for service in ${SERVICE_LIST}; do + ${ssh_cmd} atomic pull --storage ostree "docker.io/openstackmagnum/${service_image_mapping[${service}]}:${new_kube_tag}" + done + + for service in ${SERVICE_LIST}; do + ${ssh_cmd} atomic containers update --rebase docker.io/openstackmagnum/${service_image_mapping[${service}]}:${new_kube_tag} ${service} + done + + for service in ${SERVICE_LIST}; do + systemctl restart ${service} + done + + ${ssh_cmd} /var/lib/containers/atomic/heat-container-agent.0/rootfs/usr/bin/kubectl --kubeconfig /etc/kubernetes/kubelet-config.yaml uncordon ${HOSTNAME_OVERRIDE} + + # FIXME(flwang): The KUBE_TAG could be out of date after a successful upgrade + for service in ${SERVICE_LIST}; do + ${ssh_cmd} atomic --assumeyes images "delete docker.io/openstackmagnum/${service_image_mapping[${service}]}:${KUBE_TAG}" + done + + ${ssh_cmd} atomic images prune + +fi diff --git a/magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.yaml b/magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.sh similarity index 92% rename from magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.yaml rename to magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.sh index 23a4a076c5..1cb0882988 100644 --- a/magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.yaml +++ b/magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.sh @@ -1,10 +1,12 @@ -#cloud-config -merge_how: dict(recurse_array)+list(append) -write_files: - - path: /etc/sysconfig/heat-params - owner: "root:root" - permissions: "0600" - content: | +#!/bin/sh + +echo "START: write-heat-params" + +HEAT_PARAMS=/etc/sysconfig/heat-params +[ -f ${HEAT_PARAMS} ] || { + echo "Writing File: $HEAT_PARAMS" + mkdir -p "$(dirname ${HEAT_PARAMS})" + cat > ${HEAT_PARAMS} < ${HEAT_PARAMS} < maximum node count of cluster workers when doing scale up + update_max_batch_size: + type: number + description: > + max batch size when doing rolling upgrade + default: 1 + + resources: ###################################################################### @@ -707,6 +732,9 @@ resources: # well, we could remove this rule here. # The PR in ccm is # https://github.com/kubernetes/cloud-provider-openstack/pull/491 + - protocol: tcp + port_range_min: 22 + port_range_max: 22 - protocol: tcp port_range_min: 30000 port_range_max: 32767 @@ -798,6 +826,8 @@ resources: type: OS::Heat::ResourceGroup depends_on: - network + update_policy: + rolling_update: {max_batch_size: {get_param: update_max_batch_size}, pause_time: 30} properties: count: {get_param: number_of_masters} resource_def: @@ -812,7 +842,7 @@ resources: api_public_address: {get_attr: [api_lb, floating_address]} api_private_address: {get_attr: [api_lb, address]} ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} + server_image: {get_param: master_image} master_flavor: {get_param: master_flavor} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} @@ -850,7 +880,7 @@ resources: http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} - kube_tag: {get_param: kube_tag} + kube_tag: {get_param: master_kube_tag} cloud_provider_tag: {get_param: cloud_provider_tag} cloud_provider_enabled: {get_param: cloud_provider_enabled} kube_version: {get_param: kube_version} @@ -968,6 +998,8 @@ resources: type: OS::Heat::ResourceGroup depends_on: - network + update_policy: + rolling_update: {max_batch_size: {get_param: update_max_batch_size}, pause_time: 30} properties: count: {get_param: number_of_minions} removal_policies: [{resource_list: {get_param: minions_to_remove}}] @@ -980,7 +1012,7 @@ resources: - [{ get_param: 'OS::stack_name' }, 'minion', '%index%'] prometheus_monitoring: {get_param: prometheus_monitoring} ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} + server_image: {get_param: minion_image} minion_flavor: {get_param: minion_flavor} fixed_network: {get_attr: [network, fixed_network]} fixed_subnet: {get_attr: [network, fixed_subnet]} @@ -1015,7 +1047,7 @@ resources: http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} - kube_tag: {get_param: kube_tag} + kube_tag: {get_param: minion_kube_tag} kube_version: {get_param: kube_version} trustee_user_id: {get_param: trustee_user_id} trustee_username: {get_param: trustee_username} diff --git a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml index e1d61dccbf..fb48d6b06f 100644 --- a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml +++ b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml @@ -504,190 +504,146 @@ resources: # a multipart MIME user-data archive. # - write_heat_params: + agent_config: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: - str_replace: - template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.yaml} - params: - "$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring} - "$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]} - "$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]} - "$KUBE_API_PORT": {get_param: kubernetes_port} - "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} - "$KUBE_NODE_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} - "$ETCD_VOLUME": {get_resource: etcd_volume} - "$ETCD_VOLUME_SIZE": {get_param: etcd_volume_size} - "$DOCKER_VOLUME": {get_resource: docker_volume} - "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} - "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} - "$CGROUP_DRIVER": {get_param: cgroup_driver} - "$NETWORK_DRIVER": {get_param: network_driver} - "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} - "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} - "$FLANNEL_BACKEND": {get_param: flannel_backend} - "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} - "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} - "$PODS_NETWORK_CIDR": {get_param: pods_network_cidr} - "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} - "$ADMISSION_CONTROL_LIST": {get_param: admission_control_list} - "$ETCD_DISCOVERY_URL": {get_param: discovery_url} - "$AUTH_URL": {get_param: auth_url} - "$USERNAME": {get_param: username} - "$PASSWORD": {get_param: password} - "$CLUSTER_SUBNET": {get_param: fixed_subnet} - "$TLS_DISABLED": {get_param: tls_disabled} - "$TRAEFIK_INGRESS_CONTROLLER_TAG": {get_param: traefik_ingress_controller_tag} - "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} - "$INFLUX_GRAFANA_DASHBOARD_ENABLED": {get_param: influx_grafana_dashboard_enabled} - "$VERIFY_CA": {get_param: verify_ca} - "$CLUSTER_UUID": {get_param: cluster_uuid} - "$MAGNUM_URL": {get_param: magnum_url} - "$VOLUME_DRIVER": {get_param: volume_driver} - "$REGION_NAME": {get_param: region_name} - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$KUBE_TAG": {get_param: kube_tag} - "$CLOUD_PROVIDER_TAG": {get_param: cloud_provider_tag} - "$CLOUD_PROVIDER_ENABLED": {get_param: cloud_provider_enabled} - "$ETCD_TAG": {get_param: etcd_tag} - "$COREDNS_TAG": {get_param: coredns_tag} - "$FLANNEL_TAG": {get_param: flannel_tag} - "$FLANNEL_CNI_TAG": {get_param: flannel_cni_tag} - "$KUBE_VERSION": {get_param: kube_version} - "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} - "$TRUSTEE_USER_ID": {get_param: trustee_user_id} - "$TRUSTEE_PASSWORD": {get_param: trustee_password} - "$TRUST_ID": {get_param: trust_id} - "$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url} - "$CONTAINER_INFRA_PREFIX": {get_param: container_infra_prefix} - "$ETCD_LB_VIP": {get_param: etcd_lb_vip} - "$DNS_SERVICE_IP": {get_param: dns_service_ip} - "$DNS_CLUSTER_DOMAIN": {get_param: dns_cluster_domain} - "$CERT_MANAGER_API": {get_param: cert_manager_api} - "$CA_KEY": {get_param: ca_key} - "$CALICO_TAG": {get_param: calico_tag} - "$CALICO_CNI_TAG": {get_param: calico_cni_tag} - "$CALICO_KUBE_CONTROLLERS_TAG": {get_param: calico_kube_controllers_tag} - "$CALICO_IPV4POOL": {get_param: calico_ipv4pool} - "$INGRESS_CONTROLLER": {get_param: ingress_controller} - "$INGRESS_CONTROLLER_ROLE": {get_param: ingress_controller_role} - "$OCTAVIA_INGRESS_CONTROLLER_TAG": {get_param: octavia_ingress_controller_tag} - "$KUBELET_OPTIONS": {get_param: kubelet_options} - "$KUBEAPI_OPTIONS": {get_param: kubeapi_options} - "$KUBECONTROLLER_OPTIONS": {get_param: kubecontroller_options} - "$KUBEPROXY_OPTIONS": {get_param: kubeproxy_options} - "$KUBESCHEDULER_OPTIONS": {get_param: kubescheduler_options} - "$OCTAVIA_ENABLED": {get_param: octavia_enabled} - "$KUBE_SERVICE_ACCOUNT_KEY": {get_param: kube_service_account_key} - "$KUBE_SERVICE_ACCOUNT_PRIVATE_KEY": {get_param: kube_service_account_private_key} - "$PROMETHEUS_TAG": {get_param: prometheus_tag} - "$GRAFANA_TAG": {get_param: grafana_tag} - "$HEAT_CONTAINER_AGENT_TAG": {get_param: heat_container_agent_tag} - "$KEYSTONE_AUTH_ENABLED": {get_param: keystone_auth_enabled} - "$K8S_KEYSTONE_AUTH_TAG": {get_param: k8s_keystone_auth_tag} - "$MONITORING_ENABLED": {get_param: monitoring_enabled} - "$PROJECT_ID": {get_param: project_id} - "$EXTERNAL_NETWORK_ID": {get_param: external_network} - "$TILLER_ENABLED": {get_param: tiller_enabled} - "$TILLER_TAG": {get_param: tiller_tag} - "$TILLER_NAMESPACE": {get_param: tiller_namespace} - "$NODE_PROBLEM_DETECTOR_TAG": {get_param: node_problem_detector_tag} - "$NGINX_INGRESS_CONTROLLER_TAG": {get_param: nginx_ingress_controller_tag} - "$AUTO_HEALING_ENABLED": {get_param: auto_healing_enabled} - "$AUTO_SCALING_ENABLED": {get_param: auto_scaling_enabled} - "$DRAINO_TAG": {get_param: draino_tag} - "$AUTOSCALER_TAG": {get_param: autoscaler_tag} - "$MIN_NODE_COUNT": {get_param: min_node_count} - "$MAX_NODE_COUNT": {get_param: max_node_count} + list_join: + - "\n" + - + - str_replace: + template: {get_file: ../../common/templates/fragments/atomic-install-openstack-ca.sh} + params: + $OPENSTACK_CA: {get_param: openstack_ca} + - str_replace: + template: {get_file: ../../common/templates/kubernetes/fragments/start-container-agent.sh} + params: + $CONTAINER_INFRA_PREFIX: {get_param: container_infra_prefix} + $HEAT_CONTAINER_AGENT_TAG: {get_param: heat_container_agent_tag} + - get_file: ../../common/templates/kubernetes/fragments/disable-selinux.sh - install_openstack_ca: + master_config: type: OS::Heat::SoftwareConfig properties: - group: ungrouped + group: script config: - str_replace: - params: - $OPENSTACK_CA: {get_param: openstack_ca} - template: {get_file: ../../common/templates/fragments/atomic-install-openstack-ca.sh} + list_join: + - "\n" + - + - str_replace: + template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.sh} + params: + "$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring} + "$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]} + "$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]} + "$KUBE_API_PORT": {get_param: kubernetes_port} + "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} + "$KUBE_NODE_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} + "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} + "$ETCD_VOLUME": {get_resource: etcd_volume} + "$ETCD_VOLUME_SIZE": {get_param: etcd_volume_size} + "$DOCKER_VOLUME": {get_resource: docker_volume} + "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} + "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} + "$CGROUP_DRIVER": {get_param: cgroup_driver} + "$NETWORK_DRIVER": {get_param: network_driver} + "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} + "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} + "$FLANNEL_BACKEND": {get_param: flannel_backend} + "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} + "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} + "$PODS_NETWORK_CIDR": {get_param: pods_network_cidr} + "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} + "$ADMISSION_CONTROL_LIST": {get_param: admission_control_list} + "$ETCD_DISCOVERY_URL": {get_param: discovery_url} + "$AUTH_URL": {get_param: auth_url} + "$USERNAME": {get_param: username} + "$PASSWORD": {get_param: password} + "$CLUSTER_SUBNET": {get_param: fixed_subnet} + "$TLS_DISABLED": {get_param: tls_disabled} + "$TRAEFIK_INGRESS_CONTROLLER_TAG": {get_param: traefik_ingress_controller_tag} + "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} + "$INFLUX_GRAFANA_DASHBOARD_ENABLED": {get_param: influx_grafana_dashboard_enabled} + "$VERIFY_CA": {get_param: verify_ca} + "$CLUSTER_UUID": {get_param: cluster_uuid} + "$MAGNUM_URL": {get_param: magnum_url} + "$VOLUME_DRIVER": {get_param: volume_driver} + "$REGION_NAME": {get_param: region_name} + "$HTTP_PROXY": {get_param: http_proxy} + "$HTTPS_PROXY": {get_param: https_proxy} + "$NO_PROXY": {get_param: no_proxy} + "$KUBE_TAG": {get_param: kube_tag} + "$CLOUD_PROVIDER_TAG": {get_param: cloud_provider_tag} + "$CLOUD_PROVIDER_ENABLED": {get_param: cloud_provider_enabled} + "$ETCD_TAG": {get_param: etcd_tag} + "$COREDNS_TAG": {get_param: coredns_tag} + "$FLANNEL_TAG": {get_param: flannel_tag} + "$FLANNEL_CNI_TAG": {get_param: flannel_cni_tag} + "$KUBE_VERSION": {get_param: kube_version} + "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} + "$TRUSTEE_USER_ID": {get_param: trustee_user_id} + "$TRUSTEE_PASSWORD": {get_param: trustee_password} + "$TRUST_ID": {get_param: trust_id} + "$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url} + "$CONTAINER_INFRA_PREFIX": {get_param: container_infra_prefix} + "$ETCD_LB_VIP": {get_param: etcd_lb_vip} + "$DNS_SERVICE_IP": {get_param: dns_service_ip} + "$DNS_CLUSTER_DOMAIN": {get_param: dns_cluster_domain} + "$CERT_MANAGER_API": {get_param: cert_manager_api} + "$CA_KEY": {get_param: ca_key} + "$CALICO_TAG": {get_param: calico_tag} + "$CALICO_CNI_TAG": {get_param: calico_cni_tag} + "$CALICO_KUBE_CONTROLLERS_TAG": {get_param: calico_kube_controllers_tag} + "$CALICO_IPV4POOL": {get_param: calico_ipv4pool} + "$INGRESS_CONTROLLER": {get_param: ingress_controller} + "$INGRESS_CONTROLLER_ROLE": {get_param: ingress_controller_role} + "$OCTAVIA_INGRESS_CONTROLLER_TAG": {get_param: octavia_ingress_controller_tag} + "$KUBELET_OPTIONS": {get_param: kubelet_options} + "$KUBEAPI_OPTIONS": {get_param: kubeapi_options} + "$KUBECONTROLLER_OPTIONS": {get_param: kubecontroller_options} + "$KUBEPROXY_OPTIONS": {get_param: kubeproxy_options} + "$KUBESCHEDULER_OPTIONS": {get_param: kubescheduler_options} + "$OCTAVIA_ENABLED": {get_param: octavia_enabled} + "$KUBE_SERVICE_ACCOUNT_KEY": {get_param: kube_service_account_key} + "$KUBE_SERVICE_ACCOUNT_PRIVATE_KEY": {get_param: kube_service_account_private_key} + "$PROMETHEUS_TAG": {get_param: prometheus_tag} + "$GRAFANA_TAG": {get_param: grafana_tag} + "$HEAT_CONTAINER_AGENT_TAG": {get_param: heat_container_agent_tag} + "$KEYSTONE_AUTH_ENABLED": {get_param: keystone_auth_enabled} + "$K8S_KEYSTONE_AUTH_TAG": {get_param: k8s_keystone_auth_tag} + "$MONITORING_ENABLED": {get_param: monitoring_enabled} + "$PROJECT_ID": {get_param: project_id} + "$EXTERNAL_NETWORK_ID": {get_param: external_network} + "$TILLER_ENABLED": {get_param: tiller_enabled} + "$TILLER_TAG": {get_param: tiller_tag} + "$TILLER_NAMESPACE": {get_param: tiller_namespace} + "$NODE_PROBLEM_DETECTOR_TAG": {get_param: node_problem_detector_tag} + "$NGINX_INGRESS_CONTROLLER_TAG": {get_param: nginx_ingress_controller_tag} + "$AUTO_HEALING_ENABLED": {get_param: auto_healing_enabled} + "$AUTO_SCALING_ENABLED": {get_param: auto_scaling_enabled} + "$DRAINO_TAG": {get_param: draino_tag} + "$AUTOSCALER_TAG": {get_param: autoscaler_tag} + "$MIN_NODE_COUNT": {get_param: min_node_count} + "$MAX_NODE_COUNT": {get_param: max_node_count} + - get_file: ../../common/templates/kubernetes/fragments/make-cert.sh + - get_file: ../../common/templates/kubernetes/fragments/configure-etcd.sh + - get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh + - get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-master.sh + - str_replace: + template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} + params: + $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} + - get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh + - get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh - make_cert: - type: OS::Heat::SoftwareConfig + master_config_deployment: + type: OS::Heat::SoftwareDeployment properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/make-cert.sh} - - configure_docker_storage: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - params: - $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} - template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} - - configure_etcd: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/configure-etcd.sh} - - write_kube_os_config: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh} - - configure_kubernetes: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-master.sh} - - enable_services: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh} - - disable_selinux: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/disable-selinux.sh} - - add_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh} - - start_container_agent: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/start-container-agent.sh} - - kube_master_init: - type: OS::Heat::MultipartMime - properties: - parts: - - config: {get_resource: install_openstack_ca} - - config: {get_resource: disable_selinux} - - config: {get_resource: write_heat_params} - - config: {get_resource: configure_etcd} - - config: {get_resource: write_kube_os_config} - - config: {get_resource: configure_docker_storage} - - config: {get_resource: configure_kubernetes} - - config: {get_resource: make_cert} - - config: {get_resource: add_proxy} - - config: {get_resource: start_container_agent} - - config: {get_resource: enable_services} + signal_transport: HEAT_SIGNAL + config: {get_resource: master_config} + server: {get_resource: kube-master} + actions: ['CREATE'] ###################################################################### # @@ -706,7 +662,7 @@ resources: key_name: {get_param: ssh_key_name} user_data_format: SOFTWARE_CONFIG software_config_transport: POLL_SERVER_HEAT - user_data: {get_resource: kube_master_init} + user_data: {get_resource: agent_config} networks: - port: {get_resource: kube_master_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} @@ -783,6 +739,25 @@ resources: volume_id: {get_resource: docker_volume} mountpoint: /dev/vdb + upgrade_kubernetes: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: kube_tag_input + config: + get_file: ../../common/templates/kubernetes/fragments/upgrade-kubernetes.sh + + upgrade_kubernetes_deployment: + type: OS::Heat::SoftwareDeployment + properties: + signal_transport: HEAT_SIGNAL + config: {get_resource: upgrade_kubernetes} + server: {get_resource: kube-master} + actions: ['UPDATE'] + input_values: + kube_tag_input: {get_param: kube_tag} + outputs: OS::stack_id: diff --git a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml index 9d12681dae..8c86996841 100644 --- a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml +++ b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml @@ -283,21 +283,24 @@ parameters: resources: - start_container_agent: + agent_config: type: OS::Heat::SoftwareConfig properties: group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/start-container-agent.sh} - - minion_wait_handle: - type: OS::Heat::WaitConditionHandle - - minion_wait_condition: - type: OS::Heat::WaitCondition - depends_on: kube-minion - properties: - handle: {get_resource: minion_wait_handle} - timeout: {get_param: wait_condition_timeout} + config: + list_join: + - "\n" + - + - str_replace: + template: {get_file: ../../common/templates/fragments/atomic-install-openstack-ca.sh} + params: + $OPENSTACK_CA: {get_param: openstack_ca} + - str_replace: + template: {get_file: ../../common/templates/kubernetes/fragments/start-container-agent.sh} + params: + $CONTAINER_INFRA_PREFIX: {get_param: container_infra_prefix} + $HEAT_CONTAINER_AGENT_TAG: {get_param: heat_container_agent_tag} + - get_file: ../../common/templates/kubernetes/fragments/disable-selinux.sh ###################################################################### # @@ -305,169 +308,83 @@ resources: # a multipart MIME user-data archive. # - write_heat_params: + node_config: type: OS::Heat::SoftwareConfig properties: - group: ungrouped + group: script config: - str_replace: - template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params.yaml} - params: - $PROMETHEUS_MONITORING: {get_param: prometheus_monitoring} - $KUBE_ALLOW_PRIV: {get_param: kube_allow_priv} - $KUBE_MASTER_IP: {get_param: kube_master_ip} - $KUBE_API_PORT: {get_param: kubernetes_port} - $KUBE_NODE_PUBLIC_IP: {get_attr: [kube_minion_floating, floating_ip_address]} - $KUBE_NODE_IP: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} - $ETCD_SERVER_IP: {get_param: etcd_server_ip} - $DOCKER_VOLUME: {get_resource: docker_volume} - $DOCKER_VOLUME_SIZE: {get_param: docker_volume_size} - $DOCKER_STORAGE_DRIVER: {get_param: docker_storage_driver} - $CGROUP_DRIVER: {get_param: cgroup_driver} - $NETWORK_DRIVER: {get_param: network_driver} - $REGISTRY_ENABLED: {get_param: registry_enabled} - $REGISTRY_PORT: {get_param: registry_port} - $SWIFT_REGION: {get_param: swift_region} - $REGISTRY_CONTAINER: {get_param: registry_container} - $REGISTRY_INSECURE: {get_param: registry_insecure} - $REGISTRY_CHUNKSIZE: {get_param: registry_chunksize} - $TLS_DISABLED: {get_param: tls_disabled} - $VERIFY_CA: {get_param: verify_ca} - $CLUSTER_UUID: {get_param: cluster_uuid} - $MAGNUM_URL: {get_param: magnum_url} - $USERNAME: {get_param: username} - $PASSWORD: {get_param: password} - $VOLUME_DRIVER: {get_param: volume_driver} - $REGION_NAME: {get_param: region_name} - $HTTP_PROXY: {get_param: http_proxy} - $HTTPS_PROXY: {get_param: https_proxy} - $NO_PROXY: {get_param: no_proxy} - $KUBE_TAG: {get_param: kube_tag} - $FLANNEL_NETWORK_CIDR: {get_param: flannel_network_cidr} - $PODS_NETWORK_CIDR: {get_param: pods_network_cidr} - $KUBE_VERSION: {get_param: kube_version} - $WAIT_CURL: {get_attr: [minion_wait_handle, curl_cli]} - $TRUSTEE_USER_ID: {get_param: trustee_user_id} - $TRUSTEE_USERNAME: {get_param: trustee_username} - $TRUSTEE_PASSWORD: {get_param: trustee_password} - $TRUSTEE_DOMAIN_ID: {get_param: trustee_domain_id} - $TRUST_ID: {get_param: trust_id} - $AUTH_URL: {get_param: auth_url} - $CLOUD_PROVIDER_ENABLED: {get_param: cloud_provider_enabled} - $INSECURE_REGISTRY_URL: {get_param: insecure_registry_url} - $CONTAINER_INFRA_PREFIX: {get_param: container_infra_prefix} - $DNS_SERVICE_IP: {get_param: dns_service_ip} - $DNS_CLUSTER_DOMAIN: {get_param: dns_cluster_domain} - $KUBELET_OPTIONS: {get_param: kubelet_options} - $KUBEPROXY_OPTIONS: {get_param: kubeproxy_options} - $OCTAVIA_ENABLED: {get_param: octavia_enabled} - $HEAT_CONTAINER_AGENT_TAG: {get_param: heat_container_agent_tag} - $AUTO_HEALING_ENABLED: {get_param: auto_healing_enabled} + list_join: + - "\n" + - + - str_replace: + template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params.sh} + params: + $PROMETHEUS_MONITORING: {get_param: prometheus_monitoring} + $KUBE_ALLOW_PRIV: {get_param: kube_allow_priv} + $KUBE_MASTER_IP: {get_param: kube_master_ip} + $KUBE_API_PORT: {get_param: kubernetes_port} + $KUBE_NODE_PUBLIC_IP: {get_attr: [kube_minion_floating, floating_ip_address]} + $KUBE_NODE_IP: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} + $ETCD_SERVER_IP: {get_param: etcd_server_ip} + $DOCKER_VOLUME: {get_resource: docker_volume} + $DOCKER_VOLUME_SIZE: {get_param: docker_volume_size} + $DOCKER_STORAGE_DRIVER: {get_param: docker_storage_driver} + $CGROUP_DRIVER: {get_param: cgroup_driver} + $NETWORK_DRIVER: {get_param: network_driver} + $REGISTRY_ENABLED: {get_param: registry_enabled} + $REGISTRY_PORT: {get_param: registry_port} + $SWIFT_REGION: {get_param: swift_region} + $REGISTRY_CONTAINER: {get_param: registry_container} + $REGISTRY_INSECURE: {get_param: registry_insecure} + $REGISTRY_CHUNKSIZE: {get_param: registry_chunksize} + $TLS_DISABLED: {get_param: tls_disabled} + $VERIFY_CA: {get_param: verify_ca} + $CLUSTER_UUID: {get_param: cluster_uuid} + $MAGNUM_URL: {get_param: magnum_url} + $USERNAME: {get_param: username} + $PASSWORD: {get_param: password} + $VOLUME_DRIVER: {get_param: volume_driver} + $REGION_NAME: {get_param: region_name} + $HTTP_PROXY: {get_param: http_proxy} + $HTTPS_PROXY: {get_param: https_proxy} + $NO_PROXY: {get_param: no_proxy} + $KUBE_TAG: {get_param: kube_tag} + $FLANNEL_NETWORK_CIDR: {get_param: flannel_network_cidr} + $PODS_NETWORK_CIDR: {get_param: pods_network_cidr} + $KUBE_VERSION: {get_param: kube_version} + $TRUSTEE_USER_ID: {get_param: trustee_user_id} + $TRUSTEE_PASSWORD: {get_param: trustee_password} + $TRUST_ID: {get_param: trust_id} + $AUTH_URL: {get_param: auth_url} + $CLOUD_PROVIDER_ENABLED: {get_param: cloud_provider_enabled} + $INSECURE_REGISTRY_URL: {get_param: insecure_registry_url} + $CONTAINER_INFRA_PREFIX: {get_param: container_infra_prefix} + $DNS_SERVICE_IP: {get_param: dns_service_ip} + $DNS_CLUSTER_DOMAIN: {get_param: dns_cluster_domain} + $KUBELET_OPTIONS: {get_param: kubelet_options} + $KUBEPROXY_OPTIONS: {get_param: kubeproxy_options} + $OCTAVIA_ENABLED: {get_param: octavia_enabled} + $HEAT_CONTAINER_AGENT_TAG: {get_param: heat_container_agent_tag} + $AUTO_HEALING_ENABLED: {get_param: auto_healing_enabled} + - get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh + - get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh + - get_file: ../../common/templates/fragments/configure-docker-registry.sh + - get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-minion.sh + - get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh + - str_replace: + template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} + params: + $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} + - get_file: ../../common/templates/kubernetes/fragments/enable-services-minion.sh + - get_file: ../../common/templates/fragments/enable-docker-registry.sh - - install_openstack_ca: - type: OS::Heat::SoftwareConfig + node_config_deployment: + type: OS::Heat::SoftwareDeployment properties: - group: ungrouped - config: - str_replace: - params: - $OPENSTACK_CA: {get_param: openstack_ca} - template: {get_file: ../../common/templates/fragments/atomic-install-openstack-ca.sh} - - write_kube_os_config: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh} - - make_cert: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh} - - configure_docker_storage: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - params: - $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} - template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} - - configure_docker_registry: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/fragments/configure-docker-registry.sh} - - configure_kubernetes_minion: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-minion.sh} - - enable_services: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-minion.sh} - - enable_docker_registry: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/fragments/enable-docker-registry.sh} - - minion_wc_notify: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: | - #!/bin/bash -v - if [ "verify_ca" == "True" ]; then - VERIFY_CA="" - else - VERIFY_CA="-k" - fi - wc_notify $VERIFY_CA --data-binary '{"status": "SUCCESS"}' - params: - wc_notify: {get_attr: [minion_wait_handle, curl_cli]} - verify_ca: {get_param: verify_ca} - - disable_selinux: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/disable-selinux.sh} - - add_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh} - - kube_minion_init: - type: OS::Heat::MultipartMime - properties: - parts: - - config: {get_resource: install_openstack_ca} - - config: {get_resource: disable_selinux} - - config: {get_resource: write_heat_params} - - config: {get_resource: start_container_agent} - - config: {get_resource: write_kube_os_config} - - config: {get_resource: make_cert} - - config: {get_resource: configure_docker_storage} - - config: {get_resource: configure_docker_registry} - - config: {get_resource: configure_kubernetes_minion} - - config: {get_resource: add_proxy} - - config: {get_resource: enable_services} - - config: {get_resource: enable_docker_registry} - - config: {get_resource: minion_wc_notify} + signal_transport: HEAT_SIGNAL + config: {get_resource: node_config} + server: {get_resource: kube-minion} + actions: ['CREATE'] ###################################################################### # @@ -484,8 +401,9 @@ resources: image: {get_param: server_image} flavor: {get_param: minion_flavor} key_name: {get_param: ssh_key_name} - user_data_format: RAW - user_data: {get_resource: kube_minion_init} + user_data: {get_resource: agent_config} + user_data_format: SOFTWARE_CONFIG + software_config_transport: POLL_SERVER_HEAT networks: - port: {get_resource: kube_minion_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} @@ -528,6 +446,25 @@ resources: volume_id: {get_resource: docker_volume} mountpoint: /dev/vdb + upgrade_kubernetes: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: kube_tag_input + config: + get_file: ../../common/templates/kubernetes/fragments/upgrade-kubernetes.sh + + upgrade_kubernetes_deployment: + type: OS::Heat::SoftwareDeployment + properties: + signal_transport: HEAT_SIGNAL + config: {get_resource: upgrade_kubernetes} + server: {get_resource: kube-minion} + actions: ['UPDATE'] + input_values: + kube_tag_input: {get_param: kube_tag} + outputs: kube_minion_ip: diff --git a/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py b/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py index 091710c276..103cbcded2 100644 --- a/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py +++ b/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py @@ -333,6 +333,8 @@ class TestClusterConductorWithK8s(base.TestCase): 'portal_network_cidr': '10.254.0.0/16', 'project_id': 'project_id', 'max_node_count': 2, + 'master_image': 'image_id', + 'minion_image': 'image_id', } if missing_attr is not None: expected.pop(mapping[missing_attr], None) @@ -340,6 +342,10 @@ class TestClusterConductorWithK8s(base.TestCase): if missing_attr == 'node_count': expected['max_node_count'] = None + if missing_attr == 'image_id': + expected['master_image'] = None + expected['minion_image'] = None + self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', @@ -465,6 +471,8 @@ class TestClusterConductorWithK8s(base.TestCase): 'portal_network_cidr': '10.254.0.0/16', 'project_id': 'project_id', 'max_node_count': 2, + 'master_image': 'image_id', + 'minion_image': 'image_id', } self.assertEqual(expected, definition) @@ -581,6 +589,8 @@ class TestClusterConductorWithK8s(base.TestCase): 'portal_network_cidr': '10.254.0.0/16', 'project_id': 'project_id', 'max_node_count': 2, + 'master_image': None, + 'minion_image': None, } self.assertEqual(expected, definition) self.assertEqual( @@ -1008,6 +1018,8 @@ class TestClusterConductorWithK8s(base.TestCase): 'portal_network_cidr': '10.254.0.0/16', 'project_id': 'project_id', 'max_node_count': 2, + 'master_image': 'image_id', + 'minion_image': 'image_id', } self.assertEqual(expected, definition) self.assertEqual( diff --git a/magnum/tests/unit/drivers/test_template_definition.py b/magnum/tests/unit/drivers/test_template_definition.py index 9a53d80d52..722fe4662e 100644 --- a/magnum/tests/unit/drivers/test_template_definition.py +++ b/magnum/tests/unit/drivers/test_template_definition.py @@ -528,6 +528,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase): autoscaler_tag = mock_cluster.labels.get('autoscaler_tag') min_node_count = mock_cluster.labels.get('min_node_count') max_node_count = mock_cluster.labels.get('max_node_count') + master_image = mock_cluster_template.image_id + minion_image = mock_cluster_template.image_id k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() @@ -600,6 +602,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase): 'min_node_count': min_node_count, 'max_node_count': max_node_count, 'traefik_ingress_controller_tag': traefik_ingress_controller_tag, + 'master_image': master_image, + 'minion_image': minion_image, }} mock_get_params.assert_called_once_with(mock_context, mock_cluster_template, @@ -923,6 +927,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase): autoscaler_tag = mock_cluster.labels.get('autoscaler_tag') min_node_count = mock_cluster.labels.get('min_node_count') max_node_count = mock_cluster.labels.get('max_node_count') + master_image = mock_cluster_template.image_id + minion_image = mock_cluster_template.image_id k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() @@ -997,6 +1003,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase): 'min_node_count': min_node_count, 'max_node_count': max_node_count, 'traefik_ingress_controller_tag': traefik_ingress_controller_tag, + 'master_image': master_image, + 'minion_image': minion_image, }} mock_get_params.assert_called_once_with(mock_context, mock_cluster_template, diff --git a/releasenotes/notes/k8s-fedora-atomic-rolling-upgrade-3d8edcdd91fa1529.yaml b/releasenotes/notes/k8s-fedora-atomic-rolling-upgrade-3d8edcdd91fa1529.yaml new file mode 100644 index 0000000000..8af72a788e --- /dev/null +++ b/releasenotes/notes/k8s-fedora-atomic-rolling-upgrade-3d8edcdd91fa1529.yaml @@ -0,0 +1,17 @@ +--- +features: + - | + Now the fedora atomic Kubernetes driver can support rolling upgrade for k8s + version change or the image change. User can call command + `openstack coe cluster upgrade ` to + upgrade current cluster to the new version defined in the new cluster + template. At this moment, only the image change and the kube_tag change + are supported. +issues: + - | + There is a known issue when doing image(operating system) upgrade for k8s + cluster. Because when doing image change for a server resource, Heat will + trigger the Nova rebuild to rebuild the instnace and there is no chance to + call kubectl drain to drain the node, so there could be a very minior + downtime when doing(starting to do) the rebuild and meanwhile a request + is routed to that node.