53e863954b
This change adds a new Shipyard Operator that creates/updates a ConfigMap with information on the version and status of the current running deployment. This ConfigMap will be created at the start of the deployments, and will be updated at the end even if the previous steps fail. This operator has been added to the deploy_site, update_site, and update_software DAGs. Change-Id: Iab9ea84d5e1edd6a8635cc4e4fa93647ee485194
900 lines
26 KiB
YAML
900 lines
26 KiB
YAML
# Copyright 2017 The Openstack-Helm Authors.
|
|
# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
# This file provides defaults for shipyard and airflow
|
|
|
|
labels:
|
|
job:
|
|
node_selector_key: ucp-control-plane
|
|
node_selector_value: enabled
|
|
shipyard:
|
|
node_selector_key: ucp-control-plane
|
|
node_selector_value: enabled
|
|
airflow:
|
|
node_selector_key: ucp-control-plane
|
|
node_selector_value: enabled
|
|
|
|
images:
|
|
tags:
|
|
airflow: quay.io/airshipit/airflow:latest
|
|
shipyard: quay.io/airshipit/shipyard:latest
|
|
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
|
|
shipyard_db_init: docker.io/postgres:9.5
|
|
shipyard_db_auxiliary: docker.io/postgres:9.5
|
|
shipyard_db_sync: quay.io/airshipit/shipyard:latest
|
|
airflow_db_init: docker.io/postgres:9.5
|
|
rabbit_init: docker.io/rabbitmq:3.7-management
|
|
airflow_db_sync: quay.io/airshipit/airflow:latest
|
|
ks_user: docker.io/openstackhelm/heat:ocata
|
|
ks_service: docker.io/openstackhelm/heat:ocata
|
|
ks_endpoints: docker.io/openstackhelm/heat:ocata
|
|
image_repo_sync: docker.io/docker:17.07.0
|
|
pull_policy: "IfNotPresent"
|
|
local_registry:
|
|
active: false
|
|
exclude:
|
|
- dep_check
|
|
- image_repo_sync
|
|
|
|
release_group: null
|
|
|
|
network:
|
|
shipyard:
|
|
ingress:
|
|
public: true
|
|
classes:
|
|
namespace: "nginx"
|
|
cluster: "nginx-cluster"
|
|
annotations:
|
|
nginx.ingress.kubernetes.io/rewrite-target: /
|
|
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
|
|
nginx.ingress.kubernetes.io/proxy-body-size: "10m"
|
|
nginx.ingress.kubernetes.io/configuration-snippet: |
|
|
more_clear_headers "Server";
|
|
more_set_headers "X-Content-Type-Options: 'nosniff'";
|
|
more_set_headers "X-Frame-Options: 'deny'";
|
|
node_port: 31901
|
|
enable_node_port: false
|
|
airflow:
|
|
worker:
|
|
name: airflow-worker
|
|
port: 8793
|
|
enable_node_port: false
|
|
|
|
dependencies:
|
|
static:
|
|
shipyard_db_init:
|
|
jobs:
|
|
- airflow-db-init
|
|
- airflow-db-sync
|
|
services:
|
|
- service: postgresql_shipyard_db
|
|
endpoint: internal
|
|
shipyard_db_auxiliary:
|
|
jobs:
|
|
- shipyard-db-init
|
|
shipyard_db_sync:
|
|
jobs:
|
|
- shipyard-db-auxiliary
|
|
services:
|
|
- service: postgresql_shipyard_db
|
|
endpoint: internal
|
|
airflow_db_init:
|
|
services:
|
|
- service: postgresql_airflow_db
|
|
endpoint: internal
|
|
rabbit_init:
|
|
services:
|
|
- service: oslo_messaging
|
|
endpoint: internal
|
|
airflow_db_sync:
|
|
jobs:
|
|
- airflow-db-init
|
|
services:
|
|
- service: postgresql_airflow_db
|
|
endpoint: internal
|
|
ks_user:
|
|
services:
|
|
- service: identity
|
|
endpoint: internal
|
|
ks_service:
|
|
services:
|
|
- service: identity
|
|
endpoint: internal
|
|
ks_endpoints:
|
|
jobs:
|
|
- shipyard-ks-service
|
|
services:
|
|
- service: identity
|
|
endpoint: internal
|
|
shipyard:
|
|
jobs:
|
|
- shipyard-db-sync
|
|
- shipyard-ks-endpoints
|
|
- shipyard-ks-user
|
|
- shipyard-ks-endpoints
|
|
services:
|
|
- service: identity
|
|
endpoint: internal
|
|
- service: postgresql_shipyard_db
|
|
endpoint: internal
|
|
airflow_server:
|
|
jobs:
|
|
- airflow-rabbit-init
|
|
- airflow-db-sync
|
|
services:
|
|
- service: postgresql_airflow_db
|
|
endpoint: internal
|
|
- service: oslo_messaging
|
|
endpoint: internal
|
|
|
|
volume_worker:
|
|
class_name: general
|
|
size: 5Gi
|
|
|
|
logrotate:
|
|
days_before_deletion: 30
|
|
|
|
# typically overriden by environmental
|
|
# values, but should include all endpoints
|
|
# required by this chart
|
|
endpoints:
|
|
cluster_domain_suffix: cluster.local
|
|
identity:
|
|
name: keystone
|
|
auth:
|
|
shipyard:
|
|
region_name: RegionOne
|
|
role: admin
|
|
project_name: service
|
|
project_domain_name: default
|
|
user_domain_name: default
|
|
username: shipyard
|
|
password: password
|
|
admin:
|
|
region_name: RegionOne
|
|
project_name: admin
|
|
password: password
|
|
username: admin
|
|
user_domain_name: default
|
|
project_domain_name: default
|
|
hosts:
|
|
default: keystone
|
|
internal: keystone-api
|
|
path:
|
|
default: /v3
|
|
scheme:
|
|
default: http
|
|
port:
|
|
api:
|
|
default: 80
|
|
internal: 5000
|
|
host_fqdn_override:
|
|
default: null
|
|
shipyard:
|
|
name: shipyard
|
|
hosts:
|
|
default: shipyard-int
|
|
public: shipyard-api
|
|
port:
|
|
api:
|
|
default: 9000
|
|
public: 80
|
|
path:
|
|
default: /api/v1.0
|
|
scheme:
|
|
default: http
|
|
host_fqdn_override:
|
|
default: null
|
|
# NOTE(bryan-strassner): this chart supports TLS for fqdn over-ridden public
|
|
# endpoints using the following format:
|
|
# public:
|
|
# host: null
|
|
# tls:
|
|
# crt: null
|
|
# key: null
|
|
airflow_worker:
|
|
name: airflow-worker
|
|
hosts:
|
|
default: airflow-worker
|
|
discovery: airflow-worker-discovery
|
|
host_fqdn_override:
|
|
default: null
|
|
path: null
|
|
scheme: 'http'
|
|
port:
|
|
airflow_worker:
|
|
default: 8793
|
|
postgresql_shipyard_db:
|
|
name: postgresql_shipyard_db
|
|
auth:
|
|
admin:
|
|
username: postgres
|
|
password: password
|
|
user:
|
|
username: shipyard
|
|
password: password
|
|
database: shipyard
|
|
hosts:
|
|
default: postgresql
|
|
path: /shipyard
|
|
scheme: postgresql+psycopg2
|
|
port:
|
|
postgresql:
|
|
default: 5432
|
|
host_fqdn_override:
|
|
default: null
|
|
postgresql_airflow_db:
|
|
name: postgresql_airflow_db
|
|
auth:
|
|
admin:
|
|
username: postgres
|
|
password: password
|
|
user:
|
|
username: airflow
|
|
password: password
|
|
database: airflow
|
|
hosts:
|
|
default: postgresql
|
|
path: /airflow
|
|
scheme: postgresql+psycopg2
|
|
port:
|
|
postgresql:
|
|
default: 5432
|
|
host_fqdn_override:
|
|
default: null
|
|
postgresql_airflow_celery_db:
|
|
name: postgresql_airflow_celery_db
|
|
auth:
|
|
admin:
|
|
username: postgres
|
|
password: password
|
|
user:
|
|
username: airflow
|
|
password: password
|
|
database: airflow
|
|
hosts:
|
|
default: postgresql
|
|
path: /airflow
|
|
scheme: db+postgresql
|
|
port:
|
|
postgresql:
|
|
default: 5432
|
|
host_fqdn_override:
|
|
default: null
|
|
oslo_messaging:
|
|
auth:
|
|
user:
|
|
username: airflow
|
|
password: password
|
|
admin:
|
|
username: rabbitmq
|
|
password: password
|
|
hosts:
|
|
default: rabbitmq
|
|
host_fqdn_override:
|
|
default: null
|
|
path: /airflow
|
|
scheme: amqp
|
|
port:
|
|
amqp:
|
|
default: 5672
|
|
http:
|
|
default: 15672
|
|
oslo_cache:
|
|
hosts:
|
|
default: memcached
|
|
host_fqdn_override:
|
|
default: null
|
|
port:
|
|
memcache:
|
|
default: 11211
|
|
|
|
secrets:
|
|
identity:
|
|
admin: shipyard-keystone-admin
|
|
shipyard: shipyard-keystone-user
|
|
oslo_messaging:
|
|
admin: airflow-rabbitmq-admin
|
|
airflow: airflow-rabbitmq-user
|
|
postgresql_shipyard_db:
|
|
admin: shipyard-db-admin
|
|
user: shipyard-db-user
|
|
postgresql_airflow_db:
|
|
admin: airflow-db-admin
|
|
user: airflow-db-user
|
|
tls:
|
|
shipyard:
|
|
shipyard:
|
|
public: shipyard-tls-public
|
|
|
|
conf:
|
|
uwsgi:
|
|
threads: 1
|
|
workers: 4
|
|
policy:
|
|
admin_create: role:admin or role:admin_ucp
|
|
admin_read_access: rule:admin_create or role:admin_ucp_viewer
|
|
workflow_orchestrator:list_actions: rule:admin_read_access
|
|
workflow_orchestrator:create_action: rule:admin_create
|
|
workflow_orchestrator:get_action: rule:admin_read_access
|
|
workflow_orchestrator:get_action_step: rule:admin_read_access
|
|
workflow_orchestrator:get_action_step_logs: rule:admin_read_access
|
|
workflow_orchestrator:get_action_validation: rule:admin_read_access
|
|
workflow_orchestrator:invoke_action_control: rule:admin_create
|
|
workflow_orchestrator:get_configdocs_status: rule:admin_read_access
|
|
workflow_orchestrator:create_configdocs: rule:admin_create
|
|
workflow_orchestrator:get_configdocs: rule:admin_read_access
|
|
workflow_orchestrator:get_configdocs_cleartext: rule:admin_create
|
|
workflow_orchestrator:commit_configdocs: rule:admin_create
|
|
workflow_orchestrator:get_renderedconfigdocs: rule:admin_read_access
|
|
workflow_orchestrator:get_renderedconfigdocs_cleartext: rule:admin_create
|
|
workflow_orchestrator:list_workflows: rule:admin_read_access
|
|
workflow_orchestrator:get_workflow: rule:admin_read_access
|
|
workflow_orchestrator:get_notedetails: rule:admin_read_access
|
|
workflow_orchestrator:get_site_statuses: rule:admin_read_access
|
|
workflow_orchestrator:action_deploy_site: rule:admin_create
|
|
workflow_orchestrator:action_update_site: rule:admin_create
|
|
workflow_orchestrator:action_update_software: rule:admin_create
|
|
workflow_orchestrator:action_redeploy_server: rule:admin_create
|
|
workflow_orchestrator:action_relabel_nodes: rule:admin_create
|
|
workflow_orchestrator:action_test_site: rule:admin_create
|
|
rabbitmq:
|
|
# adding rmq policy to mirror messages from celery queues
|
|
policies:
|
|
- vhost: "airflow"
|
|
name: "ha_celery"
|
|
definition:
|
|
ha-mode: "all"
|
|
ha-sync-mode: "automatic"
|
|
priority: 0
|
|
apply-to: all
|
|
pattern: 'celery|default'
|
|
paste:
|
|
app:shipyard-api:
|
|
paste.app_factory: shipyard_airflow.shipyard_api:paste_start_shipyard
|
|
pipeline:main:
|
|
pipeline: authtoken shipyard-api
|
|
filter:authtoken:
|
|
paste.filter_factory: keystonemiddleware.auth_token:filter_factory
|
|
shipyard:
|
|
base:
|
|
web_server: http://localhost:8080/
|
|
pool_size: 15
|
|
pool_pre_ping: true
|
|
pool_timeout: 30
|
|
pool_overflow: 10
|
|
connection_recycle: -1
|
|
profiler: false
|
|
shipyard:
|
|
service_type: shipyard
|
|
deckhand:
|
|
service_type: deckhand
|
|
armada:
|
|
service_type: armada
|
|
drydock:
|
|
service_type: physicalprovisioner
|
|
promenade:
|
|
service_type: kubernetesprovisioner
|
|
keystone_authtoken:
|
|
delay_auth_decision: true
|
|
auth_type: password
|
|
auth_section: keystone_authtoken
|
|
auth_version: v3
|
|
memcache_security_strategy: ENCRYPT
|
|
requests_config:
|
|
airflow_log_connect_timeout: 5
|
|
airflow_log_read_timeout: 300
|
|
deckhand_client_connect_timeout: 5
|
|
deckhand_client_read_timeout: 300
|
|
validation_connect_timeout: 5
|
|
validation_read_timeout: 300
|
|
notes_connect_timeout: 5
|
|
notes_read_timeout: 10
|
|
drydock_client_connect_timeout: 20
|
|
drydock_client_read_timeout: 300
|
|
airflow:
|
|
worker_endpoint_scheme: 'http'
|
|
worker_port: 8793
|
|
k8s_logs:
|
|
ucp_namespace: 'ucp'
|
|
deployment_status_configmap:
|
|
name: 'deployment-status'
|
|
namespace: 'ucp'
|
|
oslo_policy:
|
|
policy_file: /etc/shipyard/policy.yaml
|
|
# If non-existent rule is used, the request should be denied. The
|
|
# deny_all rule is hard coded in the policy.py code to allow no access.
|
|
policy_default_rule: deny_all
|
|
document_info:
|
|
# The name of the deployment version document that Shipyard validates
|
|
deployment_version_name: deployment-version
|
|
# The schema of the deployment version document that Shipyard validates
|
|
deployment_version_schema: pegleg/DeploymentData/v1
|
|
# The name of the deployment configuration document that Shipyard expects
|
|
# and validates
|
|
deployment_configuration_name: deployment-configuration
|
|
# The schema of the deployment configuration document that Shipyard
|
|
# expects and validates
|
|
deployment_configuration_schema: shipyard/DeploymentConfiguration/v1
|
|
# The schema of the deployment strategy document that Shipyard expects
|
|
# and validates.
|
|
deployment_strategy_schema: shipyard/DeploymentStrategy/v1
|
|
validations:
|
|
# Control the severity of the deployment-version document validation
|
|
# that Shipyard performs during create configdocs.
|
|
# Possible values are Skip, Info, Warning, and Error
|
|
deployment_version_create: Skip
|
|
# Control the severity of the deployment-version document validation
|
|
# that Shipyard performs during commit configdocs.
|
|
# Possible values are Skip, Info, Warning, and Error
|
|
deployment_version_commit: Skip
|
|
airflow_config_file:
|
|
path: /usr/local/airflow/airflow.cfg
|
|
airflow:
|
|
# NOTE: Airflow 1.10 introduces a need to declare all config options:
|
|
# https://issues.apache.org/jira/browse/AIRFLOW-3099
|
|
core:
|
|
airflow_home: /usr/local/airflow
|
|
dags_folder: /usr/local/airflow/dags
|
|
base_log_folder: /usr/local/airflow/logs
|
|
remote_logging: "False"
|
|
remote_log_conn_id: ""
|
|
remote_base_log_folder: ""
|
|
encrypt_s3_logs: "False"
|
|
logging_level: "INFO"
|
|
fab_logging_level: "WARN"
|
|
# See image-bundled log_config.py.
|
|
# Adds console logging of task/step logs.
|
|
logging_config_class: log_config.LOGGING_CONFIG
|
|
# NOTE: Airflow 1.10 introduces extra newline characters between log
|
|
# records. Version 1.10.1 should resolve this issue
|
|
# https://issues.apache.org/jira/browse/AIRFLOW-1917
|
|
#
|
|
# NOTE: The log format ends up repeated for each log record that we log
|
|
# in our custom operators, once for the logging_mixin class of
|
|
# Airflow itself, and once again for the message we want to log.
|
|
# E.g.:
|
|
# 2018-09-21 19:38:48,950 INFO logging_mixin(95) write - 2018-09-21 19:38:48,950 INFO deployment_configuration_operator(135) get_doc - Deckhand Client acquired
|
|
#
|
|
# NOTE: Updated from default to match Shipyard logging as much as
|
|
# possible without more aggressive techniques
|
|
#
|
|
log_format: "%%(asctime)s %%(levelname)-8s %%(filename)s:%%(lineno)3d:%%(funcName)s %%(module)s %%(message)s"
|
|
simple_log_format: "%%(asctime)s %%(levelname)s - %%(message)s"
|
|
log_filename_template: "{{ ti.dag_id }}/{{ ti.task_id }}/{{ execution_date.strftime('%%Y-%%m-%%dT%%H:%%M:%%S') }}/{{ try_number }}.log"
|
|
log_processor_filename_template: "{{ filename }}.log"
|
|
dag_processor_manager_log_location: /usr/local/airflow/logs/dag_processor_manager/dag_processor_manager.log
|
|
hostname_callable: "socket:getfqdn"
|
|
default_timezone: "utc"
|
|
executor: "CeleryExecutor"
|
|
# sql_alchemy_conn is extracted from endpoints by the configmap template
|
|
sql_engine_encoding: "utf-8"
|
|
sql_alchemy_pool_enabled: "True"
|
|
sql_alchemy_pool_size: 5
|
|
sql_alchemy_pool_recycle: 1800
|
|
sql_alchemy_reconnect_timeout: 30
|
|
sql_alchemy_schema: ""
|
|
parallelism: 32
|
|
dag_concurrency: 8
|
|
dags_are_paused_at_creation: "False"
|
|
non_pooled_task_slot_count: 128
|
|
max_active_runs_per_dag: 8
|
|
load_examples: "False"
|
|
plugins_folder: /usr/local/airflow/plugins
|
|
fernet_key: fKp7omMJ4QlTxfZzVBSiyXVgeCK-6epRjGgMpEIsjvs=
|
|
donot_pickle: "False"
|
|
dagbag_import_timeout: 30
|
|
task_runner: "BashTaskRunner"
|
|
# task_runner: "StandardTaskRunner" -- coming soon?
|
|
default_impersonation: ""
|
|
security: ""
|
|
secure_mode: "True"
|
|
unit_test_mode: "False"
|
|
task_log_reader: "task"
|
|
enable_xcom_pickling: "False"
|
|
killed_task_cleanup_time: 60
|
|
dag_run_conf_overrides_params: "False"
|
|
worker_precheck: "False"
|
|
cli:
|
|
api_client: airflow.api.client.local_client
|
|
# if endpoint_url is not set, it is extracted from endpoints by the
|
|
# configmap template
|
|
endpoint_url: http://localhost/
|
|
api:
|
|
auth_backend: airflow.api.auth.backend.default
|
|
lineage:
|
|
# Shipyard is not using this
|
|
backend: ""
|
|
atlas:
|
|
# Shipyard is not using this
|
|
sasl_enabled: "False"
|
|
host: ""
|
|
port: 21000
|
|
username: ""
|
|
password: ""
|
|
operators:
|
|
default_owner: "airflow"
|
|
default_cpus: 1
|
|
default_ram: 512
|
|
default_disk: 512
|
|
default_gpus: 0
|
|
hive:
|
|
# Shipyard is not using this
|
|
default_hive_mapred_queue: ""
|
|
webserver:
|
|
# if base_url is not set, is extracted from endpoints by the configmap
|
|
# template
|
|
base_url: http://localhost/
|
|
# set web_server_host to 0.0.0.0 to bind to all interfaces. By default
|
|
# only bind to loopback
|
|
web_server_host: 127.0.0.1
|
|
web_server_port: 8080
|
|
web_server_ssl_cert: ""
|
|
web_server_ssl_key: ""
|
|
web_server_master_timeout: 120
|
|
web_server_worker_timeout: 120
|
|
worker_refresh_batch_size: 1
|
|
worker_refresh_interval: 120
|
|
secret_key: "{SECRET_KEY}"
|
|
workers: 4
|
|
worker_class: "sync"
|
|
access_logfile: "-"
|
|
error_logfile: "-"
|
|
expose_config: "False"
|
|
authenticate: "False"
|
|
filter_by_owner: "False"
|
|
owner_mode: "user"
|
|
dag_default_view: "tree"
|
|
dag_orientation: "LR"
|
|
demo_mode: "False"
|
|
log_fetch_timeout_sec: 20
|
|
hide_paused_dags_by_default: "False"
|
|
page_size: 100
|
|
rbac: "False"
|
|
navbar_color: "#007A87"
|
|
default_dag_run_display_number: 25
|
|
enable_proxy_fix: "False"
|
|
email:
|
|
# Shipyard is not using this
|
|
email_backend: airflow.utils.send_email_smtp
|
|
smtp:
|
|
# Shipyard is not using this
|
|
smtp_host: "localhost"
|
|
smtp_starttls: "True"
|
|
smtp_ssl: "False"
|
|
smtp_user: "airflow"
|
|
smtp_password: "airflow"
|
|
smtp_port: 25
|
|
smtp_mail_from: airflow@airflow.local
|
|
celery:
|
|
celery_app_name: airflow.executors.celery_executor
|
|
# The concurrency that will be used when starting workers with the
|
|
# "airflow worker" command. This defines the number of task instances
|
|
# that a worker will take, so size up your workers based on the resources
|
|
# on your worker box and the nature of your tasks
|
|
worker_concurrency: 4
|
|
worker_log_server_port: 8793
|
|
# broker_url is extracted from endpoints by the configmap template
|
|
# result_backend is extracted from endpoints by the configmap template
|
|
flower_host: 0.0.0.0
|
|
flower_url_prefix: ""
|
|
flower_port: 5555
|
|
flower_basic_auth: ""
|
|
default_queue: "default"
|
|
# How many processes CeleryExecutor uses to sync task state.
|
|
# 0 means to use max(1, number of cores - 1) processes.
|
|
# set to 1 for low-volume use
|
|
sync_parallelism: 1
|
|
celery_config_options: airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG
|
|
# TODO: Enable this for security
|
|
ssl_active: "False"
|
|
ssl_key: ""
|
|
ssl_cert: ""
|
|
ssl_cacert: ""
|
|
celery_broker_transport_options:
|
|
visibility_timeout: 21600
|
|
dask:
|
|
# Shipyard is not using this
|
|
cluster_adresss: "127.0.0.1:8786"
|
|
tls_ca: ""
|
|
tls_cert: ""
|
|
tls_key: ""
|
|
scheduler:
|
|
# Task instances listen for external kill signal (when you clear tasks
|
|
# from the CLI or the UI), this defines the frequency at which they
|
|
# should listen (in seconds).
|
|
job_heartbeat_sec: 10
|
|
# The scheduler constantly tries to trigger new tasks (look at the
|
|
# scheduler section in the docs for more information). This defines
|
|
# how often the scheduler should run (in seconds).
|
|
scheduler_heartbeat_sec: 10
|
|
run_duration: -1
|
|
# Check for pending dag runs no more than every 10 seconds
|
|
min_file_process_interval: 10
|
|
dag_dir_list_interval: 300
|
|
# Stats for the scheduler are minimally useful - every 5 mins is enough
|
|
print_stats_interval: 300
|
|
child_process_log_directory: /usr/local/airflow/logs/scheduler
|
|
scheduler_zombie_task_threshold: 300
|
|
catchup_by_default: "True"
|
|
max_tis_per_query: 512
|
|
statsd_on: "False"
|
|
statsd_host: "localhost"
|
|
statsd_port: 8125
|
|
statsd_prefix: "airflow"
|
|
# Shipyard's use of Airflow is low volume. 1 Thread is probably enough.
|
|
max_threads: 1
|
|
authenticate: "False"
|
|
# Turn off scheduler use of cron intervals by setting this to False.
|
|
# DAGs submitted manually in the web UI or with trigger_dag will still run.
|
|
use_job_schedule: "False"
|
|
ldap:
|
|
# Shipyard is not using this
|
|
uri: ""
|
|
user_filter: "objectClass=*"
|
|
user_name_attr: "uid"
|
|
group_member_attr: "memberOf"
|
|
superuser_filter: ""
|
|
data_profiler_filter: ""
|
|
bind_user: "cn=Manager,dc=example,dc=com"
|
|
bind_password: "insecure"
|
|
basedn: "dc=example,dc=com"
|
|
cacert: "/etc/ca/ldap_ca.crt"
|
|
search_scope: "LEVEL"
|
|
mesos:
|
|
# Shipyard is not using this
|
|
master: ""
|
|
framework_name: ""
|
|
task_cpu: ""
|
|
task_memory: ""
|
|
checkpoint: ""
|
|
authenticate: ""
|
|
kerberos:
|
|
# Shipyard is not using this
|
|
ccache: ""
|
|
principal: ""
|
|
reinit_frequency: ""
|
|
kinit_path: ""
|
|
keytab: ""
|
|
github_enterprise:
|
|
# Shipyard is not using this
|
|
api_rev: v3
|
|
admin:
|
|
hide_sensitive_variable_fields: "True"
|
|
elasticsearch:
|
|
# Shipyard is not using this
|
|
elasticsearch_host: ""
|
|
elasticsearch_log_id_template: ""
|
|
elasticsearch_end_of_log_mark: ""
|
|
kubernetes:
|
|
# Shipyard is not using this (maybe future for spawning own workers)
|
|
worker_container_repository: ""
|
|
worker_container_tag: ""
|
|
delete_worker_pods: "True"
|
|
namespace: "default"
|
|
airflow_configmap: ""
|
|
dags_volume_subpath: ""
|
|
dags_volume_claim: ""
|
|
logs_volume_subpath: ""
|
|
logs_volume_claim: ""
|
|
git_repo: ""
|
|
git_branch: ""
|
|
git_user: ""
|
|
git_password: ""
|
|
git_subpath: ""
|
|
git_sync_container_repository: ""
|
|
git_sync_container_tag: ""
|
|
git_sync_init_container_name: ""
|
|
worker_service_account_name: ""
|
|
image_pull_secrets: ""
|
|
gcp_service_account_keys: ""
|
|
in_cluster: ""
|
|
kubernetes_secrets:
|
|
#Shipyard is not using this
|
|
# End of Airflow config options
|
|
pod:
|
|
security_context:
|
|
shipyard:
|
|
pod:
|
|
runAsUser: 1000
|
|
container:
|
|
shipyard_api:
|
|
readOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
airflow_web:
|
|
allowPrivilegeEscalation: false
|
|
mounts:
|
|
airflow_scheduler:
|
|
# TODO: This is only used if the standalone scheduler is enabled.
|
|
airflow_scheduler:
|
|
init_container: null
|
|
airflow_worker:
|
|
init_container: null
|
|
airflow_worker:
|
|
airflow_scheduler:
|
|
shipyard:
|
|
init_container: null
|
|
shipyard:
|
|
shipyard_db_init:
|
|
init_container: null
|
|
shipyard_db_init:
|
|
shipyard_db_auxiliary:
|
|
init_container: null
|
|
shipyard_db_auxiliary:
|
|
shipyard_db_sync:
|
|
init_container: null
|
|
shipyard_db_sync:
|
|
replicas:
|
|
shipyard:
|
|
api: 2
|
|
airflow:
|
|
worker: 2
|
|
scheduler: 2
|
|
lifecycle:
|
|
upgrades:
|
|
deployments:
|
|
revision_history: 3
|
|
pod_replacement_strategy: RollingUpdate
|
|
rolling_update:
|
|
max_unavailable: 1
|
|
max_surge: 3
|
|
termination_grace_period:
|
|
airflow:
|
|
timeout: 30
|
|
shipyard:
|
|
timeout: 30
|
|
resources:
|
|
enabled: false
|
|
airflow:
|
|
logrotate:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
scheduler:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
web:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
worker:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
shipyard_api:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
jobs:
|
|
rabbit_init:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
airflow_db_init:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
airflow_db_sync:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
ks_endpoints:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
ks_service:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
ks_user:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
shipyard_db_init:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
shipyard_db_auxiliary:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
shipyard_db_sync:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "500m"
|
|
test:
|
|
shipyard:
|
|
limits:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
|
|
manifests:
|
|
configmap_shipyard_bin: true
|
|
configmap_shipyard_etc: true
|
|
configmap_airflow_bin: true
|
|
configmap_airflow_etc: true
|
|
# TODO: Set this to false only if a new deployment, or if the worker pod is
|
|
# running the scheduler
|
|
deployment_airflow_scheduler: true
|
|
deployment_shipyard: true
|
|
statefulset_airflow_worker: true
|
|
ingress_shipyard_api: true
|
|
job_shipyard_db_init: true
|
|
job_shipyard_db_auxiliary: true
|
|
job_shipyard_db_sync: true
|
|
job_rabbit_init: true
|
|
job_airflow_db_init: true
|
|
job_airflow_db_sync: true
|
|
job_ks_endpoints: true
|
|
job_ks_service: true
|
|
job_ks_user: true
|
|
secret_airflow_db: true
|
|
secret_shipyard_db: true
|
|
secret_ingress_tls: true
|
|
secret_keystone: true
|
|
secret_rabbitmq: true
|
|
service_shipyard: true
|
|
service_shipyard_ingress: true
|
|
service_airflow_worker: true
|
|
service_discovery_airflow_worker: true
|
|
test_shipyard_api: true
|