Files
armada/charts/tiller/templates/deployment-tiller.yaml
Phil Sphicas a3f11e5873 Tiller: listen on localhost by default
This change introduces a configuration option to control whether Tiller
listens on any IP addresses (the previous default), or binds only to
127.0.0.1 (the new default).

The same option is used for both the Armada and Tiller charts:
    .conf.tiller.listen_on_any (default: false)

The affected tiller command line argument is:
    -listen 127.0.0.1:port (if false)
    -listen :port (if true)

Listening on any address allows Helm client direct access to Tiller, via
'helm --host pod_ip:port'.

Listening on localhost does prevent connections directly to the pod IP,
but it does not preclude the use of 'kubectl port-forward' to establish
a connection to Tiller.

The Tiller container in the Armada pod exists only to service Armada via
127.0.0.1. The Helm client automatically sets up port forwarding (if it
has access to the Kubernetes API). As a result, this change should be
non-impacting. However, the previous behavior can be restored by setting
.conf.tiller.listen_on_any=true.

Change-Id: Id308976bac21cc521e8470516ce49ebd1942da68
2021-04-22 20:29:02 +00:00

136 lines
5.1 KiB
YAML

{{/*
Copyright 2017 AT&T Intellectual Property. All other rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- define "tillerReadinessProbeTemplate" }}
httpGet:
scheme: HTTP
path: /readiness
port: {{ .Values.conf.tiller.probe_port }}
{{- end }}
{{- define "tillerLivenessProbeTemplate" }}
httpGet:
scheme: HTTP
path: /liveness
port: {{ .Values.conf.tiller.probe_port }}
{{- end }}
{{- if .Values.manifests.deployment_tiller }}
{{- $envAll := . }}
{{- $serviceAccountName := "tiller-deploy" }}
{{- $mounts_tiller := .Values.pod.mounts.tiller.tiller }}
{{ tuple $envAll "tiller_deploy" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: run-tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: {{ $serviceAccountName }}
namespace: {{ .Release.Namespace }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: helm
name: tiller
name: tiller-deploy
annotations:
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 4 }}
spec:
replicas: 1
selector:
matchLabels:
app: helm
name: tiller
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: helm
name: tiller
{{ tuple $envAll "tiller" "deploy" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
annotations:
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
{{ dict "envAll" $envAll "podName" "tiller" "containerNames" (list "tiller") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
spec:
{{ dict "envAll" $envAll "application" "tiller" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
serviceAccountName: {{ $serviceAccountName }}
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: tiller
{{ tuple $envAll "tiller" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.tiller | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
{{ dict "envAll" $envAll "application" "tiller" "container" "tiller" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
env:
- name: TILLER_NAMESPACE
value: {{ .Values.Name }}
- name: TILLER_HISTORY_MAX
value: {{ .Values.deployment.tiller_history | quote }}
volumeMounts:
{{ toYaml $mounts_tiller.volumeMounts | indent 12 }}
command:
- /tiller
{{- if .Values.conf.tiller.storage }}
- --storage={{ .Values.conf.tiller.storage }}
{{- if and (eq .Values.conf.tiller.storage "sql") (.Values.conf.tiller.sql_dialect) (.Values.conf.tiller.sql_connection) }}
- --sql-dialect={{ .Values.conf.tiller.sql_dialect }}
- --sql-connection-string={{ .Values.conf.tiller.sql_connection }}
{{- end }}
{{- end }}
- -listen
- "{{ if not .Values.conf.tiller.listen_on_any }}127.0.0.1{{ end }}:{{ .Values.conf.tiller.port }}"
- -probe-listen
- ":{{ .Values.conf.tiller.probe_port }}"
- -logtostderr
- -v
- {{ .Values.conf.tiller.verbosity | quote }}
{{- if .Values.conf.tiller.trace }}
- -trace
{{- end }}
lifecycle:
preStop:
exec:
command:
# Delay tiller termination so that it has a chance to finish
# deploying releases including marking them with
# DEPLOYED/FAILED status, otherwise they can get stuck in
# PENDING_*** status.
- sleep
- "{{ .Values.conf.tiller.prestop_sleep }}"
ports:
- name: tiller
containerPort: {{ .Values.conf.tiller.port }}
protocol: TCP
{{ dict "envAll" $envAll "component" "tiller" "container" "tiller" "type" "readiness" "probeTemplate" (include "tillerReadinessProbeTemplate" $envAll | fromYaml) | include "helm-toolkit.snippets.kubernetes_probe" | trim | indent 10 }}
{{ dict "envAll" $envAll "component" "tiller" "container" "tiller" "type" "liveness" "probeTemplate" (include "tillerLivenessProbeTemplate" $envAll | fromYaml) | include "helm-toolkit.snippets.kubernetes_probe" | trim | indent 10 }}
volumes:
{{ toYaml $mounts_tiller.volumes | indent 8 }}
status: {}
{{- end }}