shipyard/charts/shipyard/values.yaml

1278 lines
38 KiB
YAML

# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file provides defaults for shipyard and airflow
labels:
job:
node_selector_key: ucp-control-plane
node_selector_value: enabled
shipyard:
node_selector_key: ucp-control-plane
node_selector_value: enabled
airflow_webserver_ui:
node_selector_key: ucp-control-plane
node_selector_value: enabled
airflow:
node_selector_key: ucp-control-plane
node_selector_value: enabled
test:
node_selector_key: ucp-control-plane
node_selector_value: enabled
images:
tags:
airflow: quay.io/airshipit/airflow:latest-ubuntu_focal
apache_proxy: docker.io/library/httpd:2.4
shipyard: quay.io/airshipit/shipyard:latest-ubuntu_focal
dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
shipyard_db_init: docker.io/postgres:14.8
shipyard_db_auxiliary: docker.io/postgres:14.8
shipyard_db_sync: quay.io/airshipit/shipyard:latest-ubuntu_focal
airflow_db_init: docker.io/postgres:14.8
rabbit_init: docker.io/rabbitmq:3.7-management
airflow_db_sync: quay.io/airshipit/airflow:latest-ubuntu_focal
ks_user: docker.io/openstackhelm/heat:ocata
ks_service: docker.io/openstackhelm/heat:ocata
ks_endpoints: docker.io/openstackhelm/heat:ocata
image_repo_sync: docker.io/docker:17.07.0
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
release_group: null
network:
shipyard:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-body-size: "10m"
nginx.ingress.kubernetes.io/configuration-snippet: |
more_clear_headers "Server";
more_set_headers "X-Content-Type-Options: 'nosniff'";
more_set_headers "X-Frame-Options: 'deny'";
node_port: 31901
enable_node_port: false
airflow:
worker:
name: airflow-worker
port: 8793
enable_node_port: false
airflow_webserver_ui:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-body-size: "10m"
nginx.ingress.kubernetes.io/configuration-snippet: |
more_clear_headers "Server";
more_set_headers "X-Content-Type-Options: 'nosniff'";
more_set_headers "X-Frame-Options: 'deny'";
node_port: 31902
enable_node_port: false
dependencies:
static:
shipyard_db_init:
jobs:
- airflow-db-init
- airflow-db-sync
services:
- service: postgresql_shipyard_db
endpoint: internal
shipyard_db_auxiliary:
jobs:
- shipyard-db-init
shipyard_db_sync:
jobs:
- shipyard-db-auxiliary
services:
- service: postgresql_shipyard_db
endpoint: internal
airflow_db_init:
services:
- service: postgresql_airflow_db
endpoint: internal
rabbit_init:
services:
- service: oslo_messaging
endpoint: internal
airflow_db_sync:
jobs:
- airflow-db-init
services:
- service: postgresql_airflow_db
endpoint: internal
ks_user:
services:
- service: identity
endpoint: internal
ks_service:
services:
- service: identity
endpoint: internal
ks_endpoints:
jobs:
- shipyard-ks-service
services:
- service: identity
endpoint: internal
shipyard:
jobs:
- shipyard-db-sync
- shipyard-ks-endpoints
- shipyard-ks-user
- shipyard-ks-endpoints
services:
- service: identity
endpoint: internal
- service: postgresql_shipyard_db
endpoint: internal
airflow_server:
jobs:
- airflow-rabbit-init
- airflow-db-sync
services:
- service: postgresql_airflow_db
endpoint: internal
- service: oslo_messaging
endpoint: internal
volume_worker:
class_name: general
size: 5Gi
logrotate:
days_before_deletion: 30
percent_max_log_fs_usage: 80
# typically overriden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
cluster_domain_suffix: cluster.local
identity:
name: keystone
auth:
shipyard:
region_name: RegionOne
role: admin
project_name: service
project_domain_name: default
user_domain_name: default
username: shipyard
password: password
admin:
region_name: RegionOne
project_name: admin
password: password
username: admin
user_domain_name: default
project_domain_name: default
hosts:
default: keystone
internal: keystone-api
path:
default: /v3
scheme:
default: http
port:
api:
default: 80
internal: 5000
host_fqdn_override:
default: null
shipyard:
name: shipyard
hosts:
default: shipyard-int
public: shipyard-api
port:
api:
default: 9000
public: 80
path:
default: /api/v1.0
scheme:
default: http
host_fqdn_override:
default: null
# NOTE(bryan-strassner): this chart supports TLS for fqdn over-ridden public
# endpoints using the following format:
# public:
# host: null
# tls:
# crt: null
# key: null
airflow_webserver_ui:
name: airflow-webserver-ui
hosts:
default: airflow-int
public: airflow-api
port:
api:
default: 80
public: 80
auth:
admin:
username: airflow
password: password
path:
default: /
scheme:
default: http
host_fqdn_override:
default: null
# NOTE(bryan-strassner): this chart supports TLS for fqdn over-ridden public
# endpoints using the following format:
# public:
# host: null
# tls:
# crt: null
# key: null
airflow_worker:
name: airflow-worker
hosts:
default: airflow-worker
discovery: airflow-worker-discovery
host_fqdn_override:
default: null
path: null
scheme: 'http'
port:
airflow_worker:
default: 8793
postgresql_shipyard_db:
name: postgresql_shipyard_db
auth:
admin:
username: postgres
password: password
user:
username: shipyard
password: password
database: shipyard
hosts:
default: postgresql
path: /shipyard
scheme: postgresql+psycopg2
port:
postgresql:
default: 5432
host_fqdn_override:
default: null
postgresql_airflow_db:
name: postgresql_airflow_db
auth:
admin:
username: postgres
password: password
user:
username: airflow
password: password
database: airflow
hosts:
default: postgresql
path: /airflow
scheme: postgresql+psycopg2
port:
postgresql:
default: 5432
host_fqdn_override:
default: null
postgresql_airflow_celery_db:
name: postgresql_airflow_celery_db
auth:
admin:
username: postgres
password: password
user:
username: airflow
password: password
database: airflow
hosts:
default: postgresql
path: /airflow
scheme: db+postgresql
port:
postgresql:
default: 5432
host_fqdn_override:
default: null
oslo_messaging:
statefulset:
replicas: 1
name: rabbitmq-rabbitmq
auth:
user:
username: airflow
password: password
admin:
username: rabbitmq
password: password
secret:
tls:
internal: rabbitmq-tls-direct
hosts:
default: rabbitmq
host_fqdn_override:
default: null
path: /airflow
scheme: rabbit
port:
amqp:
default: 5672
http:
default: 15672
oslo_cache:
hosts:
default: memcached
host_fqdn_override:
default: null
port:
memcache:
default: 11211
ldap:
hosts:
default: ldap
auth:
admin:
bind: "cn=admin,dc=cluster,dc=local"
password: password
host_fqdn_override:
default: null
path:
default: "/ou=People,dc=cluster,dc=local"
scheme:
default: ldap
port:
ldap:
default: 389
secrets:
identity:
admin: shipyard-keystone-admin
shipyard: shipyard-keystone-user
oslo_messaging:
admin: airflow-rabbitmq-admin
airflow: airflow-rabbitmq-user
postgresql_shipyard_db:
admin: shipyard-db-admin
user: shipyard-db-user
postgresql_airflow_db:
admin: airflow-db-admin
user: airflow-db-user
airflow_webserver_ui:
admin: airflow-admin-creds
tls:
shipyard:
shipyard:
public: shipyard-tls-public
airflow_webserver_ui:
airflow_webserver_ui:
public: airflow-tls-public
conf:
httpd: |
ServerRoot "/usr/local/apache2"
Listen 80
LoadModule mpm_event_module modules/mod_mpm_event.so
LoadModule authn_file_module modules/mod_authn_file.so
LoadModule authn_core_module modules/mod_authn_core.so
LoadModule authz_host_module modules/mod_authz_host.so
LoadModule authz_groupfile_module modules/mod_authz_groupfile.so
LoadModule authz_user_module modules/mod_authz_user.so
LoadModule authz_core_module modules/mod_authz_core.so
LoadModule access_compat_module modules/mod_access_compat.so
LoadModule auth_basic_module modules/mod_auth_basic.so
LoadModule ldap_module modules/mod_ldap.so
LoadModule authnz_ldap_module modules/mod_authnz_ldap.so
LoadModule reqtimeout_module modules/mod_reqtimeout.so
LoadModule filter_module modules/mod_filter.so
LoadModule proxy_html_module modules/mod_proxy_html.so
LoadModule log_config_module modules/mod_log_config.so
LoadModule env_module modules/mod_env.so
LoadModule headers_module modules/mod_headers.so
LoadModule setenvif_module modules/mod_setenvif.so
LoadModule version_module modules/mod_version.so
LoadModule proxy_module modules/mod_proxy.so
LoadModule proxy_connect_module modules/mod_proxy_connect.so
LoadModule proxy_http_module modules/mod_proxy_http.so
LoadModule proxy_balancer_module modules/mod_proxy_balancer.so
LoadModule remoteip_module modules/mod_remoteip.so
LoadModule slotmem_shm_module modules/mod_slotmem_shm.so
LoadModule slotmem_plain_module modules/mod_slotmem_plain.so
LoadModule unixd_module modules/mod_unixd.so
LoadModule status_module modules/mod_status.so
LoadModule autoindex_module modules/mod_autoindex.so
<IfModule unixd_module>
User daemon
Group daemon
</IfModule>
<Directory />
AllowOverride none
Require all denied
</Directory>
<Files ".ht*">
Require all denied
</Files>
ErrorLog /dev/stderr
LogLevel warn
<IfModule log_config_module>
LogFormat "%a %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" proxy
LogFormat "%h %l %u %t \"%r\" %>s %b" common
<IfModule logio_module>
LogFormat "%a %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio
</IfModule>
SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded
CustomLog /dev/stdout common
CustomLog /dev/stdout combined
CustomLog /dev/stdout proxy env=forwarded
</IfModule>
<Directory "/usr/local/apache2/cgi-bin">
AllowOverride None
Options None
Require all granted
</Directory>
<IfModule headers_module>
RequestHeader unset Proxy early
</IfModule>
<IfModule proxy_html_module>
Include conf/extra/proxy-html.conf
</IfModule>
<VirtualHost *:80>
RemoteIPHeader X-Original-Forwarded-For
ProxyPreserveHost On
ProxyPass / http://localhost:8080/
ProxyPassReverse / http://localhost:8080/
<Proxy *>
AuthName "Airflow"
AuthType Basic
AuthBasicProvider file ldap
AuthUserFile /usr/local/apache2/conf/.htpasswd
AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}
AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}
AuthLDAPURL {{ tuple "ldap" "default" "ldap" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" | quote }}
Require valid-user
</Proxy>
</VirtualHost>
uwsgi:
# for more info about these default overrides please read
# https://uwsgi-docs.readthedocs.io/en/latest/ThingsToKnow.html
threads: 1 # Number of threads per worker/process
workers: 16 # maximum number of workers/processes
cheaper_algo: busyness # Use cheaper busyness plugin for dynamic scaling of workers
cheaper: 4 # Minimum number of workers allowed
cheaper_initial: 8 # Workers created at startup
cheaper_step: 4 # How many workers to spawn at a time
cheaper_overload: 1 # Length of a cycle in seconds
cheaper_busyness_multiplier: 60 # Cycles to wait before killing workers
cheaper_busyness_min: 20 # Below this threshold, kill workers
cheaper_busyness_max: 75 # Above this threshold, spawn new workers
cheaper_busyness_backlog_alert: 40 # Spawn emergency workers if more than this many requests are waiting in the queue
cheaper_busyness_backlog_step: 1 # Emergegency workers to create if there are too many requests in the queue
policy:
admin_create: role:admin or role:admin_ucp
admin_read_access: rule:admin_create or role:admin_ucp_viewer
workflow_orchestrator:list_actions: rule:admin_read_access
workflow_orchestrator:create_action: rule:admin_create
workflow_orchestrator:get_action: rule:admin_read_access
workflow_orchestrator:get_action_step: rule:admin_read_access
workflow_orchestrator:get_action_step_logs: rule:admin_read_access
workflow_orchestrator:get_action_validation: rule:admin_read_access
workflow_orchestrator:invoke_action_control: rule:admin_create
workflow_orchestrator:get_configdocs_status: rule:admin_read_access
workflow_orchestrator:create_configdocs: rule:admin_create
workflow_orchestrator:get_configdocs: rule:admin_read_access
workflow_orchestrator:get_configdocs_cleartext: rule:admin_create
workflow_orchestrator:commit_configdocs: rule:admin_create
workflow_orchestrator:get_renderedconfigdocs: rule:admin_read_access
workflow_orchestrator:get_renderedconfigdocs_cleartext: rule:admin_create
workflow_orchestrator:list_workflows: rule:admin_read_access
workflow_orchestrator:get_workflow: rule:admin_read_access
workflow_orchestrator:get_notedetails: rule:admin_read_access
workflow_orchestrator:get_site_statuses: rule:admin_read_access
workflow_orchestrator:action_deploy_site: rule:admin_create
workflow_orchestrator:action_update_site: rule:admin_create
workflow_orchestrator:action_update_software: rule:admin_create
workflow_orchestrator:action_redeploy_server: rule:admin_create
workflow_orchestrator:action_relabel_nodes: rule:admin_create
workflow_orchestrator:action_test_site: rule:admin_create
rabbitmq:
# adding rmq policy to mirror messages from celery queues
policies:
- vhost: "airflow"
name: "ha_celery"
definition:
ha-mode: "all"
ha-sync-mode: "automatic"
priority: 0
apply-to: all
pattern: 'celery|default'
paste:
app:shipyard-api:
paste.app_factory: shipyard_airflow.shipyard_api:paste_start_shipyard
pipeline:main:
pipeline: authtoken shipyard-api
filter:authtoken:
paste.filter_factory: keystonemiddleware.auth_token:filter_factory
shipyard:
base:
web_server: http://localhost:8080/
pool_size: 15
pool_pre_ping: true
pool_timeout: 30
pool_overflow: 10
connection_recycle: -1
profiler: false
shipyard:
service_type: shipyard
deckhand:
service_type: deckhand
armada:
service_type: armada
drydock:
service_type: physicalprovisioner
promenade:
service_type: kubernetesprovisioner
keystone_authtoken:
delay_auth_decision: true
auth_type: password
auth_section: keystone_authtoken
auth_version: v3
memcache_security_strategy: ENCRYPT
requests_config:
airflow_log_connect_timeout: 5
airflow_log_read_timeout: 300
validation_connect_timeout: 20
validation_read_timeout: 300
notes_connect_timeout: 5
notes_read_timeout: 10
deckhand_client_connect_timeout: 20
deckhand_client_read_timeout: 300
drydock_client_connect_timeout: 20
drydock_client_read_timeout: 300
airflow:
worker_endpoint_scheme: 'http'
worker_port: 8793
k8s_logs:
ucp_namespace: 'ucp'
deployment_status_configmap:
name: 'deployment-status'
namespace: 'ucp'
oslo_policy:
policy_file: /etc/shipyard/policy.yaml
# If non-existent rule is used, the request should be denied. The
# deny_all rule is hard coded in the policy.py code to allow no access.
policy_default_rule: deny_all
document_info:
# The name of the deployment version document that Shipyard validates
deployment_version_name: deployment-version
# The schema of the deployment version document that Shipyard validates
deployment_version_schema: pegleg/DeploymentData/v1
# The name of the deployment configuration document that Shipyard expects
# and validates
deployment_configuration_name: deployment-configuration
# The schema of the deployment configuration document that Shipyard
# expects and validates
deployment_configuration_schema: shipyard/DeploymentConfiguration/v1
# The schema of the deployment strategy document that Shipyard expects
# and validates.
deployment_strategy_schema: shipyard/DeploymentStrategy/v1
validations:
# Control the severity of the deployment-version document validation
# that Shipyard performs during create configdocs.
# Possible values are Skip, Info, Warning, and Error
deployment_version_create: Skip
# Control the severity of the deployment-version document validation
# that Shipyard performs during commit configdocs.
# Possible values are Skip, Info, Warning, and Error
deployment_version_commit: Skip
airflow_config_file:
path: /usr/local/airflow/airflow.cfg
airflow_webserver_config_file:
path: /usr/local/airflow/webserver_config.py
airflow_unittests_file:
path: /usr/local/airflow/unittests.cfg
airflow:
# NOTE: Airflow 1.10 introduces a need to declare all config options:
# https://issues.apache.org/jira/browse/AIRFLOW-3099
core:
# core.airflow_home is not used in 1.10.3 and later.
# Envrionment variable AIRFLOW_HOME is used instead.
#airflow_home: /usr/local/airflow
dags_folder: /usr/local/airflow/dags
sensitive_var_conn_names:
- sql_alchemy_conn
- broker_url
- result_backend
- fernet_key
lazy_discover_providers: "True"
lazy_load_plugins: "True"
hostname_callable: "socket:getfqdn"
default_timezone: "utc"
executor: "CeleryExecutor"
# sql_alchemy_conn is extracted from endpoints by the configmap template
sql_engine_encoding: "utf-8"
sql_alchemy_pool_enabled: "True"
sql_alchemy_pool_size: 5
sql_alchemy_pool_recycle: 1800
sql_alchemy_reconnect_timeout: 30
sql_alchemy_schema: ""
parallelism: 32
dag_concurrency: 8
dags_are_paused_at_creation: "False"
# In 1.10.4, non_pooled_task_slot_count and non_pooled_backfill_task_slot_count are
# removed in favor of default_pool, which is initialized with 128 slots by default.
#non_pooled_task_slot_count: 128
max_active_runs_per_dag: 8
load_examples: "False"
plugins_folder: /usr/local/airflow/plugins
fernet_key: fKp7omMJ4QlTxfZzVBSiyXVgeCK-6epRjGgMpEIsjvs=
donot_pickle: "False"
dagbag_import_timeout: 30
# BashTaskRunner has been renamed to StandardTaskRunner from 1.10.3
task_runner: "StandardTaskRunner"
default_impersonation: ""
security: ""
secure_mode: "True"
unit_test_mode: "False"
task_log_reader: "task"
enable_xcom_pickling: "False"
killed_task_cleanup_time: 60
dag_run_conf_overrides_params: "False"
worker_precheck: "False"
cli:
api_client: airflow.api.client.local_client
# if endpoint_url is not set, it is extracted from endpoints by the
# configmap template
endpoint_url: http://localhost
api:
auth_backend: airflow.api.auth.backend.default
enable_experimental_api: "True"
lineage:
# Shipyard is not using this
backend: ""
logging:
# See image-bundled log_config.py.
# Adds console logging of task/step logs.
# logging_config_class: log_config.LOGGING_CONFIG
logging_config_class: log_config.DEFAULT_LOGGING_CONFIG
# NOTE: Airflow 1.10 introduces extra newline characters between log
# records. Version 1.10.1 should resolve this issue
# https://issues.apache.org/jira/browse/AIRFLOW-1917
#
# NOTE: The log format ends up repeated for each log record that we log
# in our custom operators, once for the logging_mixin class of
# Airflow itself, and once again for the message we want to log.
# E.g.:
# 2018-09-21 19:38:48,950 INFO logging_mixin(95) write - 2018-09-21 19:38:48,950 INFO deployment_configuration_operator(135) get_doc - Deckhand Client acquired
#
# NOTE: Updated from default to match Shipyard logging as much as
# possible without more aggressive techniques
#
log_format: "%%(asctime)s %%(levelname)-8s %%(filename)s:%%(lineno)3d:%%(funcName)s %%(module)s %%(message)s"
log_filename_template: "{{ ti.dag_id }}/{{ ti.task_id }}/{{ execution_date.strftime('%%Y-%%m-%%dT%%H:%%M:%%S') }}/{{ try_number }}.log"
log_processor_filename_template: "{{ filename }}.log"
dag_processor_manager_log_location: /usr/local/airflow/logs/dag_processor_manager/dag_processor_manager.log
logging_level: "DEBUG"
fab_logging_level: "WARNING"
celery_logging_level: "DEBUG"
base_log_folder: /usr/local/airflow/logs
remote_logging: "False"
remote_log_conn_id: ""
remote_base_log_folder: ""
encrypt_s3_logs: "False"
atlas:
# Shipyard is not using this
sasl_enabled: "False"
host: ""
port: 21000
username: ""
password: ""
operators:
default_owner: "airflow"
default_cpus: 1
default_ram: 512
default_disk: 512
default_gpus: 0
hive:
# Shipyard is not using this
default_hive_mapred_queue: ""
webserver:
# if base_url is not set, is extracted from endpoints by the configmap
# template
base_url: http://localhost
# set web_server_host to 0.0.0.0 to bind to all interfaces. By default
# only bind to loopback
web_server_host: 127.0.0.1
web_server_port: 8080
web_server_ssl_cert: ""
web_server_ssl_key: ""
web_server_master_timeout: 120
web_server_worker_timeout: 120
worker_refresh_batch_size: 1
worker_refresh_interval: 120
secret_key: "{SECRET_KEY}"
workers: 4
worker_class: "sync"
access_logfile: "-"
error_logfile: "-"
expose_config: "False"
authenticate: "False"
filter_by_owner: "False"
owner_mode: "user"
dag_default_view: "tree"
dag_orientation: "LR"
demo_mode: "False"
log_fetch_timeout_sec: 20
hide_paused_dags_by_default: "False"
page_size: 100
rbac: "False"
navbar_color: "#007A87"
default_dag_run_display_number: 25
enable_proxy_fix: "False"
email:
# Shipyard is not using this
email_backend: airflow.utils.send_email_smtp
smtp:
# Shipyard is not using this
smtp_host: "localhost"
smtp_starttls: "True"
smtp_ssl: "False"
smtp_user: "airflow"
smtp_password: "airflow"
smtp_port: 25
smtp_mail_from: airflow@airflow.local
celery:
celery_app_name: airflow.providers.celery.executors.celery_executor
# The concurrency that will be used when starting workers with the
# "airflow worker" command. This defines the number of task instances
# that a worker will take, so size up your workers based on the resources
# on your worker box and the nature of your tasks
worker_concurrency: 4
worker_log_server_port: 8793
# broker_url is extracted from endpoints by the configmap template
# result_backend is extracted from endpoints by the configmap template
flower_host: 0.0.0.0
flower_url_prefix: ""
flower_port: 5555
flower_basic_auth: ""
operation_timeout: 30.0
broker_connection_retry_on_startup: "True"
broker_connection_retry: "True"
broker_connection_max_retries: 0
broker_connection_timeout: 30.0
default_queue: "default"
# How many processes CeleryExecutor uses to sync task state.
# 0 means to use max(1, number of cores - 1) processes.
# set to 1 for low-volume use
sync_parallelism: 1
celery_config_options: airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG
# TODO: Enable this for security
ssl_active: "False"
ssl_key: /ect/rabbitmq/certs/tls.key
ssl_cert: /ect/rabbitmq/certs/tls.crt
ssl_cacert: /ect/rabbitmq/certs/ca.crt
celery_broker_transport_options:
visibility_timeout: 21600
dask:
# Shipyard is not using this
cluster_adresss: "127.0.0.1:8786"
tls_ca: ""
tls_cert: ""
tls_key: ""
scheduler:
# Task instances listen for external kill signal (when you clear tasks
# from the CLI or the UI), this defines the frequency at which they
# should listen (in seconds).
job_heartbeat_sec: 10
# The scheduler constantly tries to trigger new tasks (look at the
# scheduler section in the docs for more information). This defines
# how often the scheduler should run (in seconds).
scheduler_heartbeat_sec: 10
# Check for pending dag runs no more than every 10 seconds
min_file_process_interval: 10
dag_dir_list_interval: 300
# Stats for the scheduler are minimally useful - every 5 mins is enough
print_stats_interval: 300
child_process_log_directory: /usr/local/airflow/logs/scheduler
scheduler_zombie_task_threshold: 300
catchup_by_default: "True"
max_tis_per_query: 512
# Shipyard's use of Airflow is low volume. 1 Thread is probably enough.
# deprecated in 1.10.14. Replaced with "parsing_processes"
# max_threads: 1
parsing_processes: 1
authenticate: "False"
# Turn off scheduler use of cron intervals by setting this to False.
# DAGs submitted manually in the web UI or with trigger_dag will still run.
use_job_schedule: "False"
metrics:
statsd_on: "False"
statsd_host: "localhost"
statsd_port: 8125
statsd_prefix: "airflow"
ldap:
# Shipyard is not using this
uri: ""
user_filter: "objectClass=*"
user_name_attr: "uid"
group_member_attr: "memberOf"
superuser_filter: ""
data_profiler_filter: ""
bind_user: "cn=Manager,dc=example,dc=com"
bind_password: "insecure"
basedn: "dc=example,dc=com"
cacert: "/etc/ca/ldap_ca.crt"
search_scope: "LEVEL"
mesos:
# Shipyard is not using this
master: ""
framework_name: ""
task_cpu: ""
task_memory: ""
checkpoint: ""
authenticate: ""
kerberos:
# Shipyard is not using this
ccache: ""
principal: ""
reinit_frequency: ""
kinit_path: ""
keytab: ""
github_enterprise:
# Shipyard is not using this
api_rev: v3
admin:
hide_sensitive_variable_fields: "True"
elasticsearch:
# Shipyard is not using this
elasticsearch_host: ""
elasticsearch_log_id_template: ""
elasticsearch_end_of_log_mark: ""
kubernetes:
# Shipyard is not using this (maybe future for spawning own workers)
worker_container_repository: ""
worker_container_tag: ""
delete_worker_pods: "True"
namespace: "default"
airflow_configmap: ""
dags_volume_subpath: ""
dags_volume_claim: ""
logs_volume_subpath: ""
logs_volume_claim: ""
git_repo: ""
git_branch: ""
git_user: ""
git_password: ""
git_subpath: ""
git_sync_container_repository: ""
git_sync_container_tag: ""
git_sync_init_container_name: ""
worker_service_account_name: ""
image_pull_secrets: ""
gcp_service_account_keys: ""
in_cluster: ""
kubernetes_secrets:
#Shipyard is not using this
# End of Airflow config options
pod:
mandatory_access_control:
type: apparmor
shipyard-api:
init: runtime/default
shipyard-api: runtime/default
airflow-web: runtime/default
airflow-worker:
init: runtime/default
worker-perms: runtime/default
airflow-worker: runtime/default
airflow-scheduler: runtime/default
airflow-logrotate: runtime/default
airflow-scheduler:
init: runtime/default
airflow-scheduler: runtime/default
airflow-webserver-ui:
init: runtime/default
apache-proxy: runtime/default
airflow-webserver-ui: runtime/default
shipyard-db-auxiliary:
init: runtime/default
shipyard-db-auxiliary: runtime/default
shipyard-db-init:
init: runtime/default
shipyard-db-init: runtime/default
shipyard-db-sync:
init: runtime/default
shipyard-db-sync: runtime/default
airflow-db-init:
init: runtime/default
airflow-db-init: runtime/default
airflow-db-sync:
init: runtime/default
airflow-db-sync: runtime/default
shipyard-api-test:
shipyard-api-test: runtime/default
security_context:
shipyard:
pod:
runAsUser: 1000
container:
shipyard_api:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
airflow_web:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
airflow_scheduler:
pod:
runAsUser: 1000
container:
airflow_scheduler:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
airflow_webserver_ui:
container:
apache_proxy:
runAsUser: 0
readOnlyRootFilesystem: false
airflow_webserver_ui:
runAsUser: 1000
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
airflow_worker:
pod:
runAsUser: 1000
container:
worker_perms:
runAsUser: 0
readOnlyRootFilesystem: true
airflow_scheduler:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
airflow_worker:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
airflow_logrotate:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
db_auxiliary:
pod:
runAsUser: 1000
container:
shipyard_db_auxiliary:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
db_init:
pod:
runAsUser: 1000
container:
shipyard_db_init:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
airflow_db_init:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
db_sync:
pod:
runAsUser: 1000
container:
shipyard_db_sync:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
airflow_db_sync:
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
api_test:
pod:
runAsUser: 1000
container:
shipyard_api_test:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
mounts:
airflow_scheduler:
# TODO: This is only used if the standalone scheduler is enabled.
airflow_scheduler:
init_container: null
airflow_worker:
init_container: null
airflow_worker:
airflow_scheduler:
airflow_webserver_ui:
airflow_webserver_ui:
init_container: null
shipyard:
init_container: null
shipyard:
shipyard_db_init:
init_container: null
shipyard_db_init:
shipyard_db_auxiliary:
init_container: null
shipyard_db_auxiliary:
shipyard_db_sync:
init_container: null
shipyard_db_sync:
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
replicas:
shipyard:
api: 2
airflow:
worker: 2
scheduler: 2
airflow_webserver_ui:
airflow_webserver_ui: 1
probes:
airflow_worker:
airflow_worker:
readiness:
enabled: true
params:
periodSeconds: 15
timeoutSeconds: 10
airflow_logrotate:
readiness:
enabled: true
params:
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 10
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
termination_grace_period:
airflow:
timeout: 30
shipyard:
timeout: 30
resources:
enabled: false
airflow:
logrotate:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
scheduler:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
web:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
worker:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
apache_proxy:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "100m"
airflow_webserver_ui:
airflow_webserver_ui:
limits:
memory: "128Mi"
cpu: "100m"
shipyard_api:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
jobs:
rabbit_init:
limits:
memory: "128Mi"
cpu: "500m"
requests:
memory: "128Mi"
cpu: "500m"
airflow_db_init:
limits:
memory: "128Mi"
cpu: "500m"
requests:
memory: "128Mi"
cpu: "500m"
airflow_db_sync:
limits:
memory: "128Mi"
cpu: "500m"
requests:
memory: "128Mi"
cpu: "500m"
ks_endpoints:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
ks_service:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
ks_user:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
shipyard_db_init:
limits:
memory: "128Mi"
cpu: "500m"
requests:
memory: "128Mi"
cpu: "500m"
shipyard_db_auxiliary:
limits:
memory: "128Mi"
cpu: "500m"
requests:
memory: "128Mi"
cpu: "500m"
shipyard_db_sync:
limits:
memory: "128Mi"
cpu: "500m"
requests:
memory: "128Mi"
cpu: "500m"
test:
shipyard:
limits:
memory: "128Mi"
cpu: "100m"
requests:
memory: "128Mi"
cpu: "100m"
network_policy:
airflow:
ingress:
- {}
egress:
- {}
shipyard:
ingress:
- {}
egress:
- {}
tls:
oslo_messaging: false
manifests:
configmap_shipyard_bin: true
configmap_shipyard_etc: true
configmap_airflow_bin: true
configmap_airflow_etc: true
configmap_airflow_usr: true
# TODO: Set this to false only if a new deployment, or if the worker pod is
# running the scheduler
deployment_airflow_scheduler: true
deployment_airflow_webserver_ui: false
deployment_shipyard: true
statefulset_airflow_worker: true
ingress_shipyard_api: true
ingress_airflow_webserver_ui: false
job_shipyard_db_init: true
job_shipyard_db_auxiliary: true
job_shipyard_db_sync: true
job_rabbit_init: true
job_airflow_db_init: true
job_airflow_db_sync: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
secret_apache_etc: false
secret_airflow_webserver_ui: false
secret_airflow_webserver_ui_ingress_tls: false
secret_airflow_db: true
secret_shipyard_db: true
secret_ingress_tls: true
secret_keystone: true
secret_rabbitmq: true
service_shipyard: true
service_shipyard_ingress: true
service_airflow_webserver_ui: false
service_airflow_worker: true
service_discovery_airflow_worker: true
test_shipyard_api: true
network_policy: false