Support verifying the digest for hyperkube image

Given we're using public container registry as the default registry,
so it would be nice to have a verification for the image's digest.
Kubernetes already supports that so user can just use format like
@sha256:xxx for those addons' tags. This patch introduces the support
for hyperkube based on podman and fedora coreos driver.

Task: 37776
Story: 2007001

Change-Id: I970c1b91254d2a375192420a9169f3a629c56ce7
This commit is contained in:
Feilong Wang 2019-12-09 16:16:50 +13:00
parent c2c701f78c
commit a943756aea
9 changed files with 51 additions and 6 deletions

View File

@ -31,3 +31,9 @@ do
echo "Trying to label master node with node-role.kubernetes.io/master=\"\""
sleep 5s
done
KUBE_DIGEST=$($ssh_cmd podman image inspect hyperkube:${KUBE_TAG} --format "{{.Digest}}")
if [ -n "${KUBE_IMAGE_DIGEST}" ] && [ "${KUBE_IMAGE_DIGEST}" != "${KUBE_DIGEST}" ]; then
printf "The sha256 ${KUBE_DIGEST} of current hyperkube image cannot match the given one: ${KUBE_IMAGE_DIGEST}."
exit 1
fi

View File

@ -11,18 +11,19 @@ else
kubecontrol="/var/lib/containers/atomic/heat-container-agent.0/rootfs/usr/bin/kubectl --kubeconfig $KUBECONFIG"
fi
new_kube_tag="$kube_tag_input"
new_kube_image_digest="$kube_image_digest"
new_ostree_remote="$ostree_remote_input"
new_ostree_commit="$ostree_commit_input"
function drain {
# If there is only one master and this is the master node, skip the drain, just cordon it
# If there is only one worker and this is the worker node, skip the drain, just cordon it
all_masters=$(kubectl get nodes --selector=node-role.kubernetes.io/master= -o name)
all_workers=$(kubectl get nodes --selector=node-role.kubernetes.io/master!= -o name)
all_masters=$(${ssh_cmd} ${kubecontrol} get nodes --selector=node-role.kubernetes.io/master= -o name)
all_workers=$(${ssh_cmd} ${kubecontrol} get nodes --selector=node-role.kubernetes.io/master!= -o name)
if [ "node/${INSTANCE_NAME}" != "${all_masters}" ] && [ "node/${INSTANCE_NAME}" != "${all_workers}" ]; then
kubectl drain ${INSTANCE_NAME} --ignore-daemonsets --delete-local-data --force
${ssh_cmd} ${kubecontrol} drain ${INSTANCE_NAME} --ignore-daemonsets --delete-local-data --force
else
kubectl cordon ${INSTANCE_NAME}
${ssh_cmd} ${kubecontrol} cordon ${INSTANCE_NAME}
fi
}
@ -45,8 +46,14 @@ if [ "${new_kube_tag}" != "${KUBE_TAG}" ]; then
${ssh_cmd} systemctl start ${service}
done
KUBE_DIGEST=$($ssh_cmd podman image inspect hyperkube:${new_kube_tag} --format "{{.Digest}}")
if [ -n "${new_kube_image_digest}" ] && [ "${new_kube_image_digest}" != "${KUBE_DIGEST}" ]; then
printf "The sha256 ${KUBE_DIGEST} of current hyperkube image cannot match the given one: ${new_kube_image_digest}."
exit 1
fi
i=0
until ${kubecontrol} uncordon ${INSTANCE_NAME}
until ${ssh_cmd} ${kubecontrol} uncordon ${INSTANCE_NAME}
do
i=$((i+1))
[ $i -lt 30 ] || break;

View File

@ -116,6 +116,7 @@ NPD_ENABLED="$NPD_ENABLED"
NODEGROUP_ROLE="$NODEGROUP_ROLE"
NODEGROUP_NAME="$NODEGROUP_NAME"
USE_PODMAN="$USE_PODMAN"
KUBE_IMAGE_DIGEST="$KUBE_IMAGE_DIGEST"
EOF
}

View File

@ -105,7 +105,7 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
'draino_tag', 'autoscaler_tag',
'min_node_count', 'max_node_count', 'npd_enabled',
'ostree_remote', 'ostree_commit',
'use_podman']
'use_podman', 'kube_image_digest']
labels = self._get_relevant_labels(cluster, kwargs)

View File

@ -807,6 +807,12 @@ parameters:
default:
false
kube_image_digest:
type: string
description: >
The digest of the image which should match the given kube_tag
default: ''
resources:
######################################################################

View File

@ -809,6 +809,12 @@ parameters:
constraints:
- allowed_values: [true]
kube_image_digest:
type: string
description: >
The digest of the image which should match the given kube_tag
default: ''
resources:
######################################################################
@ -1134,6 +1140,7 @@ resources:
ostree_remote: {get_param: ostree_remote}
ostree_commit: {get_param: ostree_commit}
use_podman: {get_param: use_podman}
kube_image_digest: {get_param: kube_image_digest}
kube_cluster_config:
condition: create_cluster_resources

View File

@ -575,6 +575,12 @@ parameters:
description: >
If true, run system containers for kubernetes, etcd and heat-agent
kube_image_digest:
type: string
description: >
The digest of the image which should match the given kube_tag
default: ''
conditions:
image_based: {equals: [{get_param: boot_volume_size}, 0]}
@ -750,6 +756,7 @@ resources:
"$NODEGROUP_ROLE": {get_param: nodegroup_role}
"$NODEGROUP_NAME": {get_param: nodegroup_name}
"$USE_PODMAN": {get_param: use_podman}
"$KUBE_IMAGE_DIGEST": {get_param: kube_image_digest}
- get_file: ../../common/templates/kubernetes/fragments/make-cert.sh
- get_file: ../../common/templates/kubernetes/fragments/configure-etcd.sh
- get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh
@ -894,6 +901,7 @@ resources:
group: script
inputs:
- name: kube_tag_input
- name: kube_image_digest_input
- name: ostree_remote_input
- name: ostree_commit_input
config:
@ -908,6 +916,7 @@ resources:
actions: ['UPDATE']
input_values:
kube_tag_input: {get_param: kube_tag}
kube_image_digest_input: {get_param: kube_image_digest}
ostree_remote_input: {get_param: ostree_remote}
ostree_commit_input: {get_param: ostree_commit}

View File

@ -587,6 +587,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
ostree_remote = mock_cluster.labels.get('ostree_remote')
ostree_commit = mock_cluster.labels.get('ostree_commit')
use_podman = mock_cluster.labels.get('use_podman')
kube_image_digest = mock_cluster.labels.get('kube_image_digest')
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
@ -682,6 +683,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
'ostree_remote': ostree_remote,
'ostree_commit': ostree_commit,
'use_podman': use_podman,
'kube_image_digest': kube_image_digest,
}}
mock_get_params.assert_called_once_with(mock_context,
mock_cluster_template,
@ -1051,6 +1053,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
ostree_remote = mock_cluster.labels.get('ostree_remote')
ostree_commit = mock_cluster.labels.get('ostree_commit')
use_podman = mock_cluster.labels.get('use_podman')
kube_image_digest = mock_cluster.labels.get('kube_image_digest')
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
@ -1148,6 +1151,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
'ostree_remote': ostree_remote,
'ostree_commit': ostree_commit,
'use_podman': use_podman,
'kube_image_digest': kube_image_digest,
}}
mock_get_params.assert_called_once_with(mock_context,
mock_cluster_template,

View File

@ -0,0 +1,5 @@
---
features:
- |
Now the Fedora CoreOS driver can support the sha256 verification for the
hyperkube image when bootstraping the Kubernetes cluster.