This patch allows to deploy monitoring in k8s 1.23
Since v1.20 (changes are gradually introduced in next versions) k8s components are exposing (by default) metrics on 127.0.0.1 and are changing expose port. Change-Id: I1335c407c57ac857ebc34c6dea41157cc42650ee
This commit is contained in:
parent
7b257e94b1
commit
06d47778e1
|
@ -269,7 +269,7 @@ CERT_DIR=/etc/kubernetes/certs
|
|||
|
||||
# kube-proxy config
|
||||
PROXY_KUBECONFIG=/etc/kubernetes/proxy-kubeconfig.yaml
|
||||
KUBE_PROXY_ARGS="--kubeconfig=${PROXY_KUBECONFIG} --cluster-cidr=${PODS_NETWORK_CIDR} --hostname-override=${INSTANCE_NAME}"
|
||||
KUBE_PROXY_ARGS="--kubeconfig=${PROXY_KUBECONFIG} --cluster-cidr=${PODS_NETWORK_CIDR} --hostname-override=${INSTANCE_NAME} --metrics-bind-address=0.0.0.0"
|
||||
cat > /etc/kubernetes/proxy << EOF
|
||||
KUBE_PROXY_ARGS="${KUBE_PROXY_ARGS} ${KUBEPROXY_OPTIONS}"
|
||||
EOF
|
||||
|
@ -404,6 +404,8 @@ KUBE_CONTROLLER_MANAGER_ARGS="--leader-elect=true --kubeconfig=/etc/kubernetes/a
|
|||
KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --cluster-name=${CLUSTER_UUID}"
|
||||
KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --allocate-node-cidrs=true"
|
||||
KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --cluster-cidr=${PODS_NETWORK_CIDR}"
|
||||
KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --secure-port=10257"
|
||||
KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --authorization-always-allow-paths=/healthz,/readyz,/livez,/metrics"
|
||||
KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS $KUBECONTROLLER_OPTIONS"
|
||||
if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then
|
||||
KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --service-account-private-key-file=$CERT_DIR/service_account_private.key --root-ca-file=$CERT_DIR/ca.crt"
|
||||
|
@ -426,7 +428,7 @@ sed -i '
|
|||
/^KUBE_CONTROLLER_MANAGER_ARGS=/ s#\(KUBE_CONTROLLER_MANAGER_ARGS\).*#\1="'"${KUBE_CONTROLLER_MANAGER_ARGS}"'"#
|
||||
' /etc/kubernetes/controller-manager
|
||||
|
||||
sed -i '/^KUBE_SCHEDULER_ARGS=/ s#=.*#="--leader-elect=true --kubeconfig=/etc/kubernetes/admin.conf"#' /etc/kubernetes/scheduler
|
||||
sed -i '/^KUBE_SCHEDULER_ARGS=/ s#=.*#="--leader-elect=true --kubeconfig=/etc/kubernetes/admin.conf --authorization-always-allow-paths=/healthz,/readyz,/livez,/metrics "#' /etc/kubernetes/scheduler
|
||||
|
||||
$ssh_cmd mkdir -p /etc/kubernetes/manifests
|
||||
KUBELET_ARGS="--register-node=true --pod-manifest-path=/etc/kubernetes/manifests --hostname-override=${INSTANCE_NAME}"
|
||||
|
|
|
@ -180,18 +180,11 @@ ${APP_INGRESS_ANNOTATIONS}
|
|||
endpoints: ${KUBE_MASTERS_PRIVATE}
|
||||
## If using kubeControllerManager.endpoints only the port and targetPort are used
|
||||
service:
|
||||
port: 10252
|
||||
targetPort: 10252
|
||||
port: 10257
|
||||
targetPort: 10257
|
||||
# selector:
|
||||
# component: kube-controller-manager
|
||||
serviceMonitor:
|
||||
## Enable scraping kube-controller-manager over https.
|
||||
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
|
||||
https: "True"
|
||||
# Skip TLS certificate validation when scraping
|
||||
insecureSkipVerify: "True"
|
||||
# Name of the server to use when validating TLS certificate
|
||||
serverName: null
|
||||
|
||||
|
||||
coreDns:
|
||||
enabled: true
|
||||
|
@ -224,28 +217,19 @@ ${APP_INGRESS_ANNOTATIONS}
|
|||
endpoints: ${KUBE_MASTERS_PRIVATE}
|
||||
## If using kubeScheduler.endpoints only the port and targetPort are used
|
||||
service:
|
||||
port: 10251
|
||||
targetPort: 10251
|
||||
port: 10259
|
||||
targetPort: 10259
|
||||
# selector:
|
||||
# component: kube-scheduler
|
||||
serviceMonitor:
|
||||
## Enable scraping kube-scheduler over https.
|
||||
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
|
||||
https: "True"
|
||||
## Skip TLS certificate validation when scraping
|
||||
insecureSkipVerify: "True"
|
||||
## Name of the server to use when validating TLS certificate
|
||||
serverName: null
|
||||
scheme: https
|
||||
insecureSkipVerify: true
|
||||
|
||||
|
||||
kubeProxy:
|
||||
## If your kube proxy is not deployed as a pod, specify IPs it can be found on
|
||||
endpoints: ${KUBE_MASTERS_PRIVATE} # masters + minions
|
||||
serviceMonitor:
|
||||
## Enable scraping kube-proxy over https.
|
||||
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
|
||||
https: "True"
|
||||
## Skip TLS certificate validation when scraping
|
||||
insecureSkipVerify: "True"
|
||||
|
||||
|
||||
kube-state-metrics:
|
||||
priorityClassName: "system-cluster-critical"
|
||||
|
@ -272,7 +256,7 @@ ${APP_INGRESS_ANNOTATIONS}
|
|||
requests:
|
||||
cpu: 2m
|
||||
limits:
|
||||
memory: 30M
|
||||
memory: 64M
|
||||
# clusterDomain: ${CLUSTER_ROOT_DOMAIN_NAME}
|
||||
priorityClassName: "system-cluster-critical"
|
||||
logFormat: json
|
||||
|
@ -281,7 +265,7 @@ ${APP_INGRESS_ANNOTATIONS}
|
|||
requests:
|
||||
cpu: 2m
|
||||
limits:
|
||||
memory: 32M
|
||||
memory: 64M
|
||||
image:
|
||||
repository: ${CONTAINER_INFRA_PREFIX:-quay.io/prometheus-operator/}prometheus-operator
|
||||
prometheusDefaultBaseImage: ${CONTAINER_INFRA_PREFIX:-quay.io/prometheus/}prometheus
|
||||
|
|
|
@ -1086,6 +1086,18 @@ resources:
|
|||
- protocol: tcp
|
||||
port_range_min: 9090
|
||||
port_range_max: 9090
|
||||
- protocol: tcp
|
||||
port_range_min: 10259
|
||||
port_range_max: 10259
|
||||
- protocol: tcp
|
||||
port_range_min: 10257
|
||||
port_range_max: 10257
|
||||
- protocol: tcp
|
||||
port_range_min: 10249
|
||||
port_range_max: 10249
|
||||
- protocol: tcp
|
||||
port_range_min: 9153
|
||||
port_range_max: 9153
|
||||
|
||||
secgroup_kube_minion:
|
||||
condition: create_cluster_resources
|
||||
|
|
Loading…
Reference in New Issue