Update Kubernetes version to 1.16.2
This updates the kubeadm and minikube Kubernetes deployments to deploy version 1.16.2 Change-Id: I324f9665a24c9383c59376fb77cdb853facd0f18 Signed-off-by: Steve Wilkerson <sw5822@att.com>
This commit is contained in:
parent
7a165f5772
commit
c9acad238c
@ -54,19 +54,13 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
@ -54,19 +54,13 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
@ -54,19 +54,13 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
@ -26,7 +26,6 @@ metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
|
@ -53,19 +53,13 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
@ -57,19 +57,13 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
@ -64,7 +64,7 @@ examples:
|
||||
{{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" ) -}}
|
||||
return: |
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: barbican
|
||||
@ -96,7 +96,7 @@ examples:
|
||||
serviceName: barbican-api
|
||||
servicePort: b-api
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: barbican-namespace-fqdn
|
||||
@ -118,7 +118,7 @@ examples:
|
||||
serviceName: barbican-api
|
||||
servicePort: b-api
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: barbican-cluster-fqdn
|
||||
@ -184,7 +184,7 @@ examples:
|
||||
{{- include "helm-toolkit.manifests.ingress" ( dict "envAll" . "backendServiceType" "key-manager" "backendPort" "b-api" "endpoint" "public" ) -}}
|
||||
return: |
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: barbican
|
||||
@ -247,7 +247,7 @@ examples:
|
||||
{{- $hostName := tuple $backendServiceType $endpoint $envAll | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }}
|
||||
{{- $hostNameFull := tuple $backendServiceType $endpoint $envAll | include "helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup" }}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $ingressName }}
|
||||
@ -282,7 +282,7 @@ spec:
|
||||
{{- range $key2, $ingressController := tuple "namespace" "cluster" }}
|
||||
{{- $hostNameFullRules := dict "vHost" $hostNameFull "backendName" $backendName "backendPort" $backendPort }}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ printf "%s-%s-%s" $ingressName $ingressController "fqdn" }}
|
||||
|
@ -60,6 +60,7 @@ rules:
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
- "networking.k8s.io"
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
@ -75,6 +76,7 @@ rules:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
- "networking.k8s.io"
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
|
@ -21,7 +21,7 @@ limitations under the License.
|
||||
{{- $_ := set .Values.network.ingress.annotations "kubernetes.io/ingress.class" .Values.deployment.cluster.class -}}
|
||||
{{- end -}}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ .Release.Namespace }}-{{ .Release.Name }}
|
||||
|
@ -38,6 +38,7 @@ rules:
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
@ -53,6 +54,7 @@ rules:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
|
@ -78,7 +78,7 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- policy
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
resourceNames:
|
||||
|
@ -20,7 +20,7 @@ limitations under the License.
|
||||
{{/* Create one ClusterRole and PSP per PSP definition in values */}}
|
||||
{{- range $pspName, $pspDetails := .Values.data }}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ $pspName }}
|
||||
|
@ -43,19 +43,13 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
@ -144,8 +144,14 @@ bootstrap:
|
||||
dependencies:
|
||||
static:
|
||||
bootstrap:
|
||||
daemonset:
|
||||
- docker-registry-proxy
|
||||
pod:
|
||||
# NOTE(srwilkers): As the daemonset dependency is currently broken for
|
||||
# kubernetes 1.16, use the pod dependency and require the same node
|
||||
# instead for the same result
|
||||
- requireSameNode: true
|
||||
labels:
|
||||
application: docker
|
||||
component: registry-proxy
|
||||
services:
|
||||
- endpoint: internal
|
||||
service: docker_registry
|
||||
|
@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
version:
|
||||
kubernetes: v1.13.4
|
||||
kubernetes: v1.16.0
|
||||
helm: v2.13.0
|
||||
cni: v0.6.0
|
||||
|
||||
|
@ -18,9 +18,9 @@
|
||||
set -xe
|
||||
|
||||
: ${HELM_VERSION:="v2.14.1"}
|
||||
: ${KUBE_VERSION:="v1.13.4"}
|
||||
: ${MINIKUBE_VERSION:="v0.30.0"}
|
||||
: ${CALICO_VERSION:="v3.3"}
|
||||
: ${KUBE_VERSION:="v1.16.2"}
|
||||
: ${MINIKUBE_VERSION:="v1.3.1"}
|
||||
: ${CALICO_VERSION:="v3.9"}
|
||||
|
||||
: "${HTTP_PROXY:=""}"
|
||||
: "${HTTPS_PROXY:=""}"
|
||||
@ -33,7 +33,12 @@ function configure_resolvconf {
|
||||
# kubelet to resolve cluster services.
|
||||
sudo mv /etc/resolv.conf /etc/resolv.conf.backup
|
||||
|
||||
sudo bash -c "echo 'search svc.cluster.local cluster.local' > /etc/resolv.conf"
|
||||
# Create symbolic link to the resolv.conf file managed by systemd-resolved, as
|
||||
# the kubelet.resolv-conf extra-config flag is automatically executed by the
|
||||
# minikube start command, regardless of being passed in here
|
||||
sudo ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf
|
||||
|
||||
sudo bash -c "echo 'search svc.cluster.local cluster.local' >> /etc/resolv.conf"
|
||||
sudo bash -c "echo 'nameserver 10.96.0.10' >> /etc/resolv.conf"
|
||||
|
||||
# NOTE(drewwalters96): Use the Google DNS servers to prevent local addresses in
|
||||
@ -105,14 +110,14 @@ rm -rf "${TMP_DIR}"
|
||||
|
||||
# NOTE: Deploy kubenetes using minikube. A CNI that supports network policy is
|
||||
# required for validation; use calico for simplicity.
|
||||
sudo -E minikube config set embed-certs true
|
||||
sudo -E minikube config set kubernetes-version "${KUBE_VERSION}"
|
||||
sudo -E minikube config set vm-driver none
|
||||
sudo -E minikube addons disable addon-manager
|
||||
sudo -E minikube addons disable dashboard
|
||||
sudo -E minikube config set embed-certs true
|
||||
|
||||
export CHANGE_MINIKUBE_NONE_USER=true
|
||||
export MINIKUBE_IN_STYLE=false
|
||||
sudo -E minikube start \
|
||||
--wait=false \
|
||||
--docker-env HTTP_PROXY="${HTTP_PROXY}" \
|
||||
--docker-env HTTPS_PROXY="${HTTPS_PROXY}" \
|
||||
--docker-env NO_PROXY="${NO_PROXY},10.96.0.0/12" \
|
||||
@ -120,10 +125,22 @@ sudo -E minikube start \
|
||||
--extra-config=controller-manager.allocate-node-cidrs=true \
|
||||
--extra-config=controller-manager.cluster-cidr=192.168.0.0/16
|
||||
|
||||
kubectl apply -f \
|
||||
https://docs.projectcalico.org/"${CALICO_VERSION}"/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
|
||||
kubectl apply -f \
|
||||
https://docs.projectcalico.org/"${CALICO_VERSION}"/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
|
||||
# Note(srwilkers): With newer versions of Minikube, explicitly disabling the wait
|
||||
# in the start command is required, as this wait checks the nodes status which
|
||||
# will block until the CNI is deployed. Instead, we now wait for the etcd pod to
|
||||
# be present, as this seems to be the last static manifest pod launched by
|
||||
# minikube. This allows us to move forward with applying the CNI
|
||||
END=$(($(date +%s) + 240))
|
||||
until kubectl --namespace=kube-system \
|
||||
get pods -l component=etcd --no-headers -o name | grep -q "^pod/etcd-minikube"; do
|
||||
NOW=$(date +%s)
|
||||
[ "${NOW}" -gt "${END}" ] && exit 1
|
||||
echo "Waiting for kubernetes etcd"
|
||||
sleep 10
|
||||
done
|
||||
|
||||
curl https://docs.projectcalico.org/"${CALICO_VERSION}"/manifests/calico.yaml -o /tmp/calico.yaml
|
||||
kubectl apply -f /tmp/calico.yaml
|
||||
|
||||
# Note: Patch calico daemonset to enable Prometheus metrics and annotations
|
||||
tee /tmp/calico-node.yaml << EOF
|
||||
@ -144,9 +161,6 @@ spec:
|
||||
EOF
|
||||
kubectl patch daemonset calico-node -n kube-system --patch "$(cat /tmp/calico-node.yaml)"
|
||||
|
||||
# NOTE: Wait for node to be ready.
|
||||
kubectl wait --timeout=240s --for=condition=Ready nodes/minikube
|
||||
|
||||
# NOTE: Wait for dns to be running.
|
||||
END=$(($(date +%s) + 240))
|
||||
until kubectl --namespace=kube-system \
|
||||
@ -175,26 +189,35 @@ subjects:
|
||||
namespace: kube-system
|
||||
EOF
|
||||
|
||||
helm init --service-account helm-tiller
|
||||
# NOTE(srwilkers): Required due to tiller deployment spec using extensions/v1beta1
|
||||
# which has been removed in Kubernetes 1.16.0.
|
||||
# See: https://github.com/helm/helm/issues/6374
|
||||
helm init --service-account helm-tiller --output yaml \
|
||||
| sed 's@apiVersion: extensions/v1beta1@apiVersion: apps/v1@' \
|
||||
| sed 's@ replicas: 1@ replicas: 1\n selector: {"matchLabels": {"app": "helm", "name": "tiller"}}@' \
|
||||
| kubectl apply -f -
|
||||
|
||||
# Patch tiller-deploy service to expose metrics port
|
||||
tee /tmp/tiller-deploy.yaml << EOF
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "44135"
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 44135
|
||||
targetPort: http
|
||||
EOF
|
||||
kubectl patch service tiller-deploy -n kube-system --patch "$(cat /tmp/tiller-deploy.yaml)"
|
||||
|
||||
kubectl --namespace=kube-system wait \
|
||||
--timeout=240s \
|
||||
--for=condition=Ready \
|
||||
pod -l app=helm,name=tiller
|
||||
|
||||
# Patch tiller-deploy service to expose metrics port
|
||||
tee /tmp/tiller-deploy.yaml << EOF
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "44135"
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 44135
|
||||
targetPort: http
|
||||
EOF
|
||||
kubectl patch service tiller-deploy -n kube-system --patch "$(cat /tmp/tiller-deploy.yaml)"
|
||||
|
||||
helm init --client-only
|
||||
|
||||
# Set up local helm server
|
||||
sudo -E tee /etc/systemd/system/helm-serve.service << EOF
|
||||
|
@ -53,6 +53,6 @@ helm upgrade --install ceph-openstack-config ./ceph-provisioners \
|
||||
helm test ceph-openstack-config --timeout 600
|
||||
|
||||
#NOTE: Validate Deployment info
|
||||
kubectl get -n openstack jobs --show-all
|
||||
kubectl get -n openstack jobs
|
||||
kubectl get -n openstack secrets
|
||||
kubectl get -n openstack configmaps
|
||||
|
@ -53,6 +53,6 @@ helm upgrade --install ceph-osh-infra-config ./ceph-provisioners \
|
||||
helm test ceph-osh-infra-config --timeout 600
|
||||
|
||||
#NOTE: Validate Deployment info
|
||||
kubectl get -n osh-infra jobs --show-all
|
||||
kubectl get -n osh-infra jobs
|
||||
kubectl get -n osh-infra secrets
|
||||
kubectl get -n osh-infra configmaps
|
||||
|
@ -34,7 +34,7 @@ ENV GOOGLE_KUBERNETES_REPO_URL ${GOOGLE_KUBERNETES_REPO_URL}
|
||||
ARG GOOGLE_HELM_REPO_URL=https://storage.googleapis.com/kubernetes-helm
|
||||
ENV GOOGLE_HELM_REPO_URL ${GOOGLE_HELM_REPO_URL}
|
||||
|
||||
ARG KUBE_VERSION="v1.13.4"
|
||||
ARG KUBE_VERSION="v1.16.2"
|
||||
ENV KUBE_VERSION ${KUBE_VERSION}
|
||||
|
||||
ARG CNI_VERSION="v0.6.0"
|
||||
|
@ -1,5 +1,5 @@
|
||||
#jinja2: trim_blocks:False
|
||||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
kind: ClusterConfiguration
|
||||
kubernetesVersion: {{ k8s.kubernetesVersion }}
|
||||
imageRepository: {{ k8s.imageRepository }}
|
||||
@ -7,10 +7,6 @@ networking:
|
||||
dnsDomain: {{ k8s.networking.dnsDomain }}
|
||||
podSubnet: {{ k8s.networking.podSubnet }}
|
||||
serviceSubnet: {{ k8s.networking.serviceSubnet }}
|
||||
apiServer:
|
||||
extraArgs:
|
||||
service-node-port-range: "1024-65535"
|
||||
feature-gates: "MountPropagation=true,PodShareProcessNamespace=true"
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
address: "0.0.0.0"
|
||||
@ -23,7 +19,7 @@ scheduler:
|
||||
feature-gates: "PodShareProcessNamespace=true"
|
||||
certificatesDir: {{ k8s.certificatesDir }}
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
localAPIEndpoint:
|
||||
advertiseAddress: {% if k8s.api.advertiseAddress is defined %}{{ k8s.api.advertiseAddress }}{% else %}{% if k8s.api.advertiseAddressDevice is defined %}{{ hostvars[inventory_hostname]['ansible_'+k8s.api.advertiseAddressDevice].ipv4.address }}{% else %}{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}{% endif %}{% endif %}
|
||||
bindPort: {{ k8s.api.bindPort }}
|
||||
|
@ -1,13 +1,13 @@
|
||||
[Service]
|
||||
User=root
|
||||
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
|
||||
Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true --cgroup-driver={{ kubelet_cgroup_driver }}"
|
||||
Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --cgroup-driver={{ kubelet_cgroup_driver }}"
|
||||
Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --node-ip={% if kubelet.bind_addr is defined %}{{ kubelet.bind_addr }}{% else %}{% if kubelet.bind_device is defined %}{{ hostvars[inventory_hostname]['ansible_'+kubelet.bind_device].ipv4.address }}{% else %}{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}{% endif %}{% endif %} --hostname-override={{ kubelet_node_hostname }}"
|
||||
Environment="KUBELET_DNS_ARGS=--cluster-dns=10.96.0.10 --cluster-domain={{ k8s.networking.dnsDomain }} --resolv-conf=/etc/kubernetes/kubelet-resolv.conf"
|
||||
Environment="KUBELET_AUTHZ_ARGS=--anonymous-auth=false --authorization-mode=Webhook --client-ca-file=/etc/kubernetes/pki/ca.crt"
|
||||
Environment="KUBELET_CERTIFICATE_ARGS=--rotate-certificates=true --cert-dir=/var/lib/kubelet/pki"
|
||||
Environment="KUBELET_NODE_LABELS=--node-labels {{ kubelet.kubelet_labels }}"
|
||||
Environment="KUBELET_EXTRA_ARGS=--max-pods=220 --pods-per-core=0 --feature-gates=MountPropagation=true --feature-gates=PodShareProcessNamespace=true"
|
||||
Environment="KUBELET_EXTRA_ARGS=--max-pods=220 --pods-per-core=0 --feature-gates=PodShareProcessNamespace=true"
|
||||
#ExecStartPre=-+/sbin/restorecon -v /usr/bin/kubelet #SELinux
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_CERTIFICATE_ARGS $KUBELET_NODE_LABELS $KUBELET_EXTRA_ARGS
|
||||
|
@ -36,7 +36,7 @@ all:
|
||||
helm:
|
||||
tiller_image: gcr.io/kubernetes-helm/tiller:v2.7.0
|
||||
k8s:
|
||||
kubernetesVersion: v1.13.4
|
||||
kubernetesVersion: v1.16.2
|
||||
imageRepository: gcr.io/google_containers
|
||||
certificatesDir: /etc/kubernetes/pki
|
||||
selfHosted: false
|
||||
|
Loading…
Reference in New Issue
Block a user