Container Infrastructure Management Service for OpenStack
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

139 lines
5.7KB

  1. #!/bin/bash
  2. . /etc/sysconfig/heat-params
  3. set -x
  4. ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost"
  5. KUBECONFIG="/etc/kubernetes/kubelet-config.yaml"
  6. if [ "$(echo $USE_PODMAN | tr '[:upper:]' '[:lower:]')" == "true" ]; then
  7. kubecontrol="/var/lib/containers/atomic/heat-container-agent.0/rootfs/usr/bin/kubectl --kubeconfig $KUBECONFIG"
  8. else
  9. kubecontrol="/usr/local/bin/kubectl --kubeconfig $KUBECONFIG"
  10. fi
  11. new_kube_tag="$kube_tag_input"
  12. new_ostree_remote="$ostree_remote_input"
  13. new_ostree_commit="$ostree_commit_input"
  14. function drain {
  15. # If there is only one master and this is the master node, skip the drain, just cordon it
  16. # If there is only one worker and this is the worker node, skip the drain, just cordon it
  17. all_masters=$(kubectl get nodes --selector=node-role.kubernetes.io/master= -o name)
  18. all_workers=$(kubectl get nodes --selector=node-role.kubernetes.io/master!= -o name)
  19. if [ "node/${INSTANCE_NAME}" != "${all_masters}" ] && [ "node/${INSTANCE_NAME}" != "${all_workers}" ]; then
  20. kubectl drain ${INSTANCE_NAME} --ignore-daemonsets --delete-local-data --force
  21. else
  22. kubectl cordon ${INSTANCE_NAME}
  23. fi
  24. }
  25. if [ "${new_kube_tag}" != "${KUBE_TAG}" ]; then
  26. drain
  27. if [ "$(echo $USE_PODMAN | tr '[:upper:]' '[:lower:]')" == "true" ]; then
  28. SERVICE_LIST=$($ssh_cmd podman ps -f name=kube --format {{.Names}})
  29. for service in ${SERVICE_LIST}; do
  30. ${ssh_cmd} systemctl stop ${service}
  31. ${ssh_cmd} podman rm ${service}
  32. done
  33. ${ssh_cmd} podman rmi ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/}hyperkube:${KUBE_TAG}
  34. echo "KUBE_TAG=$new_kube_tag" >> /etc/sysconfig/heat-params
  35. for service in ${SERVICE_LIST}; do
  36. ${ssh_cmd} systemctl start ${service}
  37. done
  38. else
  39. declare -A service_image_mapping
  40. service_image_mapping=( ["kubelet"]="kubernetes-kubelet" ["kube-controller-manager"]="kubernetes-controller-manager" ["kube-scheduler"]="kubernetes-scheduler" ["kube-proxy"]="kubernetes-proxy" ["kube-apiserver"]="kubernetes-apiserver" )
  41. SERVICE_LIST=$($ssh_cmd atomic containers list -f container=kube -q --no-trunc)
  42. for service in ${SERVICE_LIST}; do
  43. ${ssh_cmd} systemctl stop ${service}
  44. done
  45. for service in ${SERVICE_LIST}; do
  46. ${ssh_cmd} atomic pull --storage ostree "${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/}${service_image_mapping[${service}]}:${new_kube_tag}"
  47. done
  48. for service in ${SERVICE_LIST}; do
  49. ${ssh_cmd} atomic containers update --rebase ${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/}${service_image_mapping[${service}]}:${new_kube_tag} ${service}
  50. done
  51. for service in ${SERVICE_LIST}; do
  52. systemctl restart ${service}
  53. done
  54. ${ssh_cmd} /var/lib/containers/atomic/heat-container-agent.0/rootfs/usr/bin/kubectl --kubeconfig /etc/kubernetes/kubelet-config.yaml uncordon ${INSTANCE_NAME}
  55. for service in ${SERVICE_LIST}; do
  56. ${ssh_cmd} atomic --assumeyes images "delete ${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/}${service_image_mapping[${service}]}:${KUBE_TAG}"
  57. done
  58. ${ssh_cmd} atomic images prune
  59. fi
  60. i=0
  61. until kubectl uncordon ${INSTANCE_NAME}
  62. do
  63. i=$((i+1))
  64. [ $i -lt 30 ] || break;
  65. echo "Trying to uncordon node..."
  66. sleep 5s
  67. done
  68. fi
  69. function setup_uncordon {
  70. # Create a service to uncordon the node itself after reboot
  71. if [ ! -f /etc/systemd/system/uncordon.service ]; then
  72. $ssh_cmd cat > /etc/systemd/system/uncordon.service << EOF
  73. [Unit]
  74. Description=magnum-uncordon
  75. After=network.target kubelet.service
  76. [Service]
  77. Restart=Always
  78. RemainAfterExit=yes
  79. ExecStart=${kubecontrol} uncordon ${INSTANCE_NAME}
  80. [Install]
  81. WantedBy=multi-user.target
  82. EOF
  83. ${ssh_cmd} systemctl enable uncordon.service
  84. fi
  85. }
  86. remote_list=`${ssh_cmd} ostree remote list`
  87. # Fedora Atomic 29 will be the last release before migrating to Fedora CoreOS, so we're OK to add 28 and 29 remotes directly
  88. if [[ ! " ${remote_list[@]} " =~ "fedora-atomic-28" ]]; then
  89. ${ssh_cmd} ostree remote add --set=gpgkeypath=/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-28-primary --contenturl=mirrorlist=https://ostree.fedoraproject.org/mirrorlist fedora-atomic-28 https://kojipkgs.fedoraproject.org/atomic/repo/
  90. fi
  91. if [[ ! " ${remote_list[@]} " =~ "fedora-atomic-29" ]]; then
  92. ${ssh_cmd} ostree remote add --set=gpgkeypath=/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-29-primary --contenturl=mirrorlist=https://ostree.fedoraproject.org/mirrorlist fedora-atomic-29 https://kojipkgs.fedoraproject.org/atomic/repo/
  93. fi
  94. # The uri of existing Fedora Atomic 27 remote is not accessible now, so replace it with correct uri
  95. if [[ " ${remote_list[@]} " =~ "fedora-atomic" ]]; then
  96. sed -i '
  97. /^url=/ s|=.*|=https://kojipkgs.fedoraproject.org/atomic/repo/|
  98. ' /etc/ostree/remotes.d/fedora-atomic.conf
  99. fi
  100. current_ostree_commit=`${ssh_cmd} rpm-ostree status | grep Commit | awk '{print $2}'`
  101. current_ostree_remote=`${ssh_cmd} rpm-ostree status | awk '/* ostree/{print $0}' | awk '{match($0,"* ostree://([^ ]+)",a)}END{print a[1]}'`
  102. # NOTE(flwang): 1. Either deploy or rebase for only one upgrade
  103. # 2. Using rpm-ostree command instead of atomic command to keep the possibility of supporting fedora coreos 30
  104. if [ "$new_ostree_commit" != "" ] && [ "$current_ostree_commit" != "$new_ostree_commit" ]; then
  105. drain
  106. setup_uncordon
  107. ${ssh_cmd} rpm-ostree deploy $new_ostree_commit
  108. shutdown --reboot --no-wall -t 1
  109. elif [ "$new_ostree_remote" != "" ] && [ "$current_ostree_remote" != "$new_ostree_remote" ]; then
  110. drain
  111. setup_uncordon
  112. ${ssh_cmd} rpm-ostree rebase $new_ostree_remote
  113. shutdown --reboot --no-wall -t 1
  114. fi