openstack-helm/nova/templates/deployment-scheduler.yaml
Oleksii Grudev d467d685a3 Fix health probe for several conductor workers
It was observed that when increasing amount of
conductor workers from default "1" to higher value
the readiness probe fails to check rabbitmq connections
for conductor processes - it happens since the script is trying
to obtain rabbitmq connections for parent conductor process
which in case of workers>1 doesn`t open rabbit connections
but spawns child processes which handle rabbitmq
connections instead.
This patch removes the "check-all-pids" option, keeps the logic
but simplifies and fastens he code - instead of checking all
processes when "check-all-pids" option was set (however
regardless of "sock_count value" if only one process opens connection
the check returns positive result) processes will be checked one-by-one
until the first one with open rabbitmq connection(s) is
found.

Change-Id: I72be0bbdefcba77a55b6ceed6e192c9621c069eb
2020-01-31 10:43:06 +00:00

128 lines
5.2 KiB
YAML

{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.manifests.deployment_scheduler }}
{{- $envAll := . }}
{{- $mounts_nova_scheduler := .Values.pod.mounts.nova_scheduler.nova_scheduler }}
{{- $mounts_nova_scheduler_init := .Values.pod.mounts.nova_scheduler.init_container }}
{{- $serviceAccountName := "nova-scheduler" }}
{{ tuple $envAll "scheduler" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nova-scheduler
annotations:
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
labels:
{{ tuple $envAll "nova" "scheduler" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
spec:
replicas: {{ .Values.pod.replicas.scheduler }}
selector:
matchLabels:
{{ tuple $envAll "nova" "scheduler" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
{{ tuple $envAll | include "helm-toolkit.snippets.kubernetes_upgrades_deployment" | indent 2 }}
template:
metadata:
labels:
{{ tuple $envAll "nova" "scheduler" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
annotations:
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
spec:
serviceAccountName: {{ $serviceAccountName }}
{{ dict "envAll" $envAll "application" "nova" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
affinity:
{{ tuple $envAll "nova" "scheduler" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
nodeSelector:
{{ .Values.labels.scheduler.node_selector_key }}: {{ .Values.labels.scheduler.node_selector_value }}
initContainers:
{{ tuple $envAll "scheduler" $mounts_nova_scheduler_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
containers:
- name: nova-scheduler
{{ tuple $envAll "nova_scheduler" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.scheduler | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
{{ dict "envAll" $envAll "application" "nova" "container" "nova_scheduler" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
readinessProbe:
exec:
command:
- python
- /tmp/health-probe.py
- --config-file
- /etc/nova/nova.conf
- --service-queue-name
- scheduler
initialDelaySeconds: 80
periodSeconds: 90
timeoutSeconds: 70
livenessProbe:
exec:
command:
- python
- /tmp/health-probe.py
- --config-file
- /etc/nova/nova.conf
- --service-queue-name
- scheduler
- --liveness-probe
initialDelaySeconds: 120
periodSeconds: 90
timeoutSeconds: 70
command:
- /tmp/nova-scheduler.sh
volumeMounts:
- name: pod-tmp
mountPath: /tmp
- name: nova-bin
mountPath: /tmp/nova-scheduler.sh
subPath: nova-scheduler.sh
readOnly: true
- name: nova-bin
mountPath: /tmp/health-probe.py
subPath: health-probe.py
readOnly: true
- name: nova-etc
mountPath: /etc/nova/nova.conf
subPath: nova.conf
readOnly: true
{{- if .Values.conf.nova.DEFAULT.log_config_append }}
- name: nova-etc
mountPath: {{ .Values.conf.nova.DEFAULT.log_config_append }}
subPath: {{ base .Values.conf.nova.DEFAULT.log_config_append }}
readOnly: true
{{- end }}
- name: nova-etc
mountPath: /etc/nova/policy.yaml
subPath: policy.yaml
readOnly: true
{{ if $mounts_nova_scheduler.volumeMounts }}{{ toYaml $mounts_nova_scheduler.volumeMounts | indent 12 }}{{ end }}
volumes:
- name: pod-tmp
emptyDir: {}
- name: nova-bin
configMap:
name: nova-bin
defaultMode: 0555
- name: nova-etc
secret:
secretName: nova-etc
defaultMode: 0444
{{ if $mounts_nova_scheduler.volumes }}{{ toYaml $mounts_nova_scheduler.volumes | indent 8 }}{{ end }}
{{- end }}