This patch allows to deploy monitoring in k8s 1.23

Since v1.20 (changes are gradually introduced in next versions)
k8s components are exposing (by default) metrics on 127.0.0.1
and are changing expose port.

Change-Id: I1335c407c57ac857ebc34c6dea41157cc42650ee
This commit is contained in:
root 2022-05-05 11:22:09 +00:00
parent 7b257e94b1
commit 06d47778e1
3 changed files with 27 additions and 29 deletions

View File

@ -269,7 +269,7 @@ CERT_DIR=/etc/kubernetes/certs
# kube-proxy config # kube-proxy config
PROXY_KUBECONFIG=/etc/kubernetes/proxy-kubeconfig.yaml PROXY_KUBECONFIG=/etc/kubernetes/proxy-kubeconfig.yaml
KUBE_PROXY_ARGS="--kubeconfig=${PROXY_KUBECONFIG} --cluster-cidr=${PODS_NETWORK_CIDR} --hostname-override=${INSTANCE_NAME}" KUBE_PROXY_ARGS="--kubeconfig=${PROXY_KUBECONFIG} --cluster-cidr=${PODS_NETWORK_CIDR} --hostname-override=${INSTANCE_NAME} --metrics-bind-address=0.0.0.0"
cat > /etc/kubernetes/proxy << EOF cat > /etc/kubernetes/proxy << EOF
KUBE_PROXY_ARGS="${KUBE_PROXY_ARGS} ${KUBEPROXY_OPTIONS}" KUBE_PROXY_ARGS="${KUBE_PROXY_ARGS} ${KUBEPROXY_OPTIONS}"
EOF EOF
@ -404,6 +404,8 @@ KUBE_CONTROLLER_MANAGER_ARGS="--leader-elect=true --kubeconfig=/etc/kubernetes/a
KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --cluster-name=${CLUSTER_UUID}" KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --cluster-name=${CLUSTER_UUID}"
KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --allocate-node-cidrs=true" KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --allocate-node-cidrs=true"
KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --cluster-cidr=${PODS_NETWORK_CIDR}" KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --cluster-cidr=${PODS_NETWORK_CIDR}"
KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --secure-port=10257"
KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --authorization-always-allow-paths=/healthz,/readyz,/livez,/metrics"
KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS $KUBECONTROLLER_OPTIONS" KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS $KUBECONTROLLER_OPTIONS"
if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then
KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --service-account-private-key-file=$CERT_DIR/service_account_private.key --root-ca-file=$CERT_DIR/ca.crt" KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --service-account-private-key-file=$CERT_DIR/service_account_private.key --root-ca-file=$CERT_DIR/ca.crt"
@ -426,7 +428,7 @@ sed -i '
/^KUBE_CONTROLLER_MANAGER_ARGS=/ s#\(KUBE_CONTROLLER_MANAGER_ARGS\).*#\1="'"${KUBE_CONTROLLER_MANAGER_ARGS}"'"# /^KUBE_CONTROLLER_MANAGER_ARGS=/ s#\(KUBE_CONTROLLER_MANAGER_ARGS\).*#\1="'"${KUBE_CONTROLLER_MANAGER_ARGS}"'"#
' /etc/kubernetes/controller-manager ' /etc/kubernetes/controller-manager
sed -i '/^KUBE_SCHEDULER_ARGS=/ s#=.*#="--leader-elect=true --kubeconfig=/etc/kubernetes/admin.conf"#' /etc/kubernetes/scheduler sed -i '/^KUBE_SCHEDULER_ARGS=/ s#=.*#="--leader-elect=true --kubeconfig=/etc/kubernetes/admin.conf --authorization-always-allow-paths=/healthz,/readyz,/livez,/metrics "#' /etc/kubernetes/scheduler
$ssh_cmd mkdir -p /etc/kubernetes/manifests $ssh_cmd mkdir -p /etc/kubernetes/manifests
KUBELET_ARGS="--register-node=true --pod-manifest-path=/etc/kubernetes/manifests --hostname-override=${INSTANCE_NAME}" KUBELET_ARGS="--register-node=true --pod-manifest-path=/etc/kubernetes/manifests --hostname-override=${INSTANCE_NAME}"

View File

@ -180,18 +180,11 @@ ${APP_INGRESS_ANNOTATIONS}
endpoints: ${KUBE_MASTERS_PRIVATE} endpoints: ${KUBE_MASTERS_PRIVATE}
## If using kubeControllerManager.endpoints only the port and targetPort are used ## If using kubeControllerManager.endpoints only the port and targetPort are used
service: service:
port: 10252 port: 10257
targetPort: 10252 targetPort: 10257
# selector: # selector:
# component: kube-controller-manager # component: kube-controller-manager
serviceMonitor:
## Enable scraping kube-controller-manager over https.
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
https: "True"
# Skip TLS certificate validation when scraping
insecureSkipVerify: "True"
# Name of the server to use when validating TLS certificate
serverName: null
coreDns: coreDns:
enabled: true enabled: true
@ -224,28 +217,19 @@ ${APP_INGRESS_ANNOTATIONS}
endpoints: ${KUBE_MASTERS_PRIVATE} endpoints: ${KUBE_MASTERS_PRIVATE}
## If using kubeScheduler.endpoints only the port and targetPort are used ## If using kubeScheduler.endpoints only the port and targetPort are used
service: service:
port: 10251 port: 10259
targetPort: 10251 targetPort: 10259
# selector: # selector:
# component: kube-scheduler # component: kube-scheduler
serviceMonitor: serviceMonitor:
## Enable scraping kube-scheduler over https. scheme: https
## Requires proper certs (not self-signed) and delegated authentication/authorization checks insecureSkipVerify: true
https: "True"
## Skip TLS certificate validation when scraping
insecureSkipVerify: "True"
## Name of the server to use when validating TLS certificate
serverName: null
kubeProxy: kubeProxy:
## If your kube proxy is not deployed as a pod, specify IPs it can be found on ## If your kube proxy is not deployed as a pod, specify IPs it can be found on
endpoints: ${KUBE_MASTERS_PRIVATE} # masters + minions endpoints: ${KUBE_MASTERS_PRIVATE} # masters + minions
serviceMonitor:
## Enable scraping kube-proxy over https.
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
https: "True"
## Skip TLS certificate validation when scraping
insecureSkipVerify: "True"
kube-state-metrics: kube-state-metrics:
priorityClassName: "system-cluster-critical" priorityClassName: "system-cluster-critical"
@ -272,7 +256,7 @@ ${APP_INGRESS_ANNOTATIONS}
requests: requests:
cpu: 2m cpu: 2m
limits: limits:
memory: 30M memory: 64M
# clusterDomain: ${CLUSTER_ROOT_DOMAIN_NAME} # clusterDomain: ${CLUSTER_ROOT_DOMAIN_NAME}
priorityClassName: "system-cluster-critical" priorityClassName: "system-cluster-critical"
logFormat: json logFormat: json
@ -281,7 +265,7 @@ ${APP_INGRESS_ANNOTATIONS}
requests: requests:
cpu: 2m cpu: 2m
limits: limits:
memory: 32M memory: 64M
image: image:
repository: ${CONTAINER_INFRA_PREFIX:-quay.io/prometheus-operator/}prometheus-operator repository: ${CONTAINER_INFRA_PREFIX:-quay.io/prometheus-operator/}prometheus-operator
prometheusDefaultBaseImage: ${CONTAINER_INFRA_PREFIX:-quay.io/prometheus/}prometheus prometheusDefaultBaseImage: ${CONTAINER_INFRA_PREFIX:-quay.io/prometheus/}prometheus

View File

@ -1086,6 +1086,18 @@ resources:
- protocol: tcp - protocol: tcp
port_range_min: 9090 port_range_min: 9090
port_range_max: 9090 port_range_max: 9090
- protocol: tcp
port_range_min: 10259
port_range_max: 10259
- protocol: tcp
port_range_min: 10257
port_range_max: 10257
- protocol: tcp
port_range_min: 10249
port_range_max: 10249
- protocol: tcp
port_range_min: 9153
port_range_max: 9153
secgroup_kube_minion: secgroup_kube_minion:
condition: create_cluster_resources condition: create_cluster_resources