armada/charts/armada/values.yaml
Phil Sphicas a3f11e5873 Tiller: listen on localhost by default
This change introduces a configuration option to control whether Tiller
listens on any IP addresses (the previous default), or binds only to
127.0.0.1 (the new default).

The same option is used for both the Armada and Tiller charts:
    .conf.tiller.listen_on_any (default: false)

The affected tiller command line argument is:
    -listen 127.0.0.1:port (if false)
    -listen :port (if true)

Listening on any address allows Helm client direct access to Tiller, via
'helm --host pod_ip:port'.

Listening on localhost does prevent connections directly to the pod IP,
but it does not preclude the use of 'kubectl port-forward' to establish
a connection to Tiller.

The Tiller container in the Armada pod exists only to service Armada via
127.0.0.1. The Helm client automatically sets up port forwarding (if it
has access to the Kubernetes API). As a result, this change should be
non-impacting. However, the previous behavior can be restored by setting
.conf.tiller.listen_on_any=true.

Change-Id: Id308976bac21cc521e8470516ce49ebd1942da68
2021-04-22 20:29:02 +00:00

437 lines
11 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file provides defaults for armada
release_group: null
labels:
job:
node_selector_key: ucp-control-plane
node_selector_value: enabled
test:
node_selector_key: ucp-control-plane
node_selector_value: enabled
node_selector_key: ucp-control-plane
node_selector_value: enabled
images:
tags:
api: 'quay.io/airshipit/armada:latest'
dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
ks_endpoints: 'docker.io/openstackhelm/heat:newton'
ks_service: 'docker.io/openstackhelm/heat:newton'
ks_user: 'docker.io/openstackhelm/heat:newton'
image_repo_sync: docker.io/docker:17.07.0
tiller: gcr.io/kubernetes-helm/tiller:v2.16.9
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
- tiller
network:
api:
ingress:
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
classes:
cluster: nginx-cluster
namespace: nginx
public: true
node_port:
enabled: false
port: 31903
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- armada-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
api:
jobs:
- armada-ks-service
- armada-ks-user
services:
- endpoint: internal
service: identity
ks_endpoints:
jobs:
- armada-ks-service
services:
- endpoint: internal
service: identity
ks_service:
services:
- endpoint: internal
service: identity
ks_user:
services:
- endpoint: internal
service: identity
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
# typically overridden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
cluster_domain_suffix: cluster.local
identity:
name: keystone
auth:
admin:
password: password
project_domain_name: default
project_name: admin
region_name: RegionOne
user_domain_name: default
username: admin
armada:
password: password
project_domain_name: default
project_name: service
region_name: RegionOne
role: admin
user_domain_name: default
username: armada
hosts:
default: keystone
internal: keystone-api
path:
default: /v3
scheme:
default: http
port:
api:
default: 80
internal: 5000
host_fqdn_override:
default: null
armada:
name: armada
hosts:
default: armada-api
public: armada
port:
api:
default: 8000
path:
default: /api/v1.0
scheme:
default: http
host_fqdn_override:
default: null
# NOTE(lamt): This chart supports TLS for fqdn overriden public
# endpoints using the following format:
# public:
# host: null
# tls:
# crt: null
# key: null
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
secrets:
identity:
admin: armada-keystone-admin
armada: armada-keystone-user
tls:
armada:
api:
public: armada-tls-public
conf:
armada:
DEFAULT: {}
# When .conf.tiller.enabled is true `tiller_host` and `tiller_port` will
# be overridden by 127.0.0.1 and `.conf.tiller.port` respectively
armada_api:
bind_port: 8000
keystone_authtoken:
auth_type: password
auth_version: 3
delay_auth_decision: true
oslo_policy:
policy_file: policy.yaml
paste:
'app:armada-api':
paste.app_factory: 'armada.api.server:paste_start_armada'
'filter:authtoken':
paste.filter_factory: 'keystonemiddleware.auth_token:filter_factory'
'pipeline:main':
pipeline: authtoken armada-api
policy:
admin_required: 'role:admin or role:admin_ucp'
service_or_admin: 'rule:admin_required or rule:service_role'
service_role: 'role:service'
admin_viewer: 'role:admin_ucp_viewer or rule:service_or_admin'
'armada:create_endpoints': 'rule:admin_required'
'armada:rollback_release': 'rule:admin_required'
'armada:test_manifest': 'rule:admin_required'
'armada:test_release': 'rule:admin_required'
'armada:validate_manifest': 'rule:admin_viewer'
'tiller:get_release': 'rule:admin_viewer'
'tiller:get_status': 'rule:admin_viewer'
tiller:
# If set to false then some form of Tiller needs to be provided
enabled: true
# To have Tiller bind to all interfaces, allowing direct connections from
# the Helm client to pod_ip:port, set 'listen_on_any: true'.
# The default setting 'listen_on_any: false' binds Tiller to 127.0.0.1.
# The Armada container talks directly to Tiller via 127.0.0.1, so the
# default value is appropriate for normal operation.
listen_on_any: false
port: 24134
probe_port: 24135
verbosity: 5
trace: false
storage: null
# Only postgres is supported so far
sql_dialect: postgres
sql_connection: null
namespace: kube-system
# Limit the maximum number of revisions saved per release. 0 for no limit.
history_max: 0
# Note: Defaulting to the (default) kubernetes grace period, as anything
# greater than that will have no effect.
prestop_sleep: 30
monitoring:
prometheus:
armada:
scrape: true
path: /api/v1.0/metrics
port: 8000
pod:
mandatory_access_control:
type: apparmor
armada-api:
init: runtime/default
armada-api: runtime/default
tiller: runtime/default
armada-api-test:
armada-api-test: runtime/default
probes:
armada:
api:
readiness:
enabled: true
params:
initialDelaySeconds: 15
periodSeconds: 10
liveness:
enabled: true
params:
initialDelaySeconds: 15
periodSeconds: 10
tiller:
readiness:
enabled: true
params:
failureThreshold: 3
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
liveness:
enabled: true
params:
failureThreshold: 3
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
security_context:
armada:
pod:
runAsUser: 1000
container:
armada_api_init:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
armada_api:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
tiller:
runAsUser: 65534
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
api_test:
pod:
runAsUser: 1000
container:
armada_api_test:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
env:
# NOTE(@drewwalters96): These configuration values change the Armada API's
# uWSGI configuration.
armada_api:
# # NOTE: ARMADA_UWSGI_PORT should match conf.armada.armada_api.bind_port in
# # standard use cases
# - name: ARMADA_UWSGI_PORT
# value: "8000"
# - name: ARMADA_UWSGI_TIMEOUT
# value: "3600"
# - name: ARMADA_UWSGI_WORKERS
# value: "4"
# - name: ARMADA_UWSGI_THREADS
# value: "1"
# - name: http_proxy
# value: http://proxy.example.com:8080
# - name: https_proxy
# value: http://proxy.example.com:8080
# - name: no_proxy
# value: 10.96.0.1
# - name: HTTP_PROXY
# value: http://proxy.example.com:8080
# - name: HTTPS_PROXY
# value: http://proxy.example.com:8080
# - name: NO_PROXY
# value: 10.96.0.1
mounts:
armada_api:
init_container: null
armada_api:
volumes: []
volumeMounts: []
tiller:
volumes:
- name: kubernetes-client-cache
emptyDir: {}
volumeMounts:
- name: kubernetes-client-cache
# Should be the `$HOME/.kube` of the `runAsUser` above
# as this is where tiller's kubernetes client roots its cache dir.
mountPath: /tmp/.kube
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
replicas:
api: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
disruption_budget:
api:
min_available: 0
termination_grace_period:
api:
timeout: 30
resources:
enabled: false
api:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
tiller:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
jobs:
ks_user:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
ks_service:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
ks_endpoints:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
test:
api:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
network_policy:
armada:
ingress:
- {}
egress:
- {}
manifests:
configmap_bin: true
configmap_etc: true
deployment_api: true
ingress_api: true
job_image_repo_sync: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
secret_ingress_tls: true
secret_keystone: true
service: true
service_ingress: true
test_armada_api: true
network_policy: false