--- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: helm-toolkit data: chart_name: helm-toolkit release: helm-toolkit namespace: helm-toolkit values: {} source: type: tar location: http://172.17.0.1/helm_charts/starlingx/helm-toolkit-0.1.0.tgz subpath: helm-toolkit reference: master dependencies: [] --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: kube-system-ingress data: chart_name: ingress release: kube-system-ingress namespace: kube-system wait: timeout: 1800 labels: release_group: osh-kube-system-ingress install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-kube-system-ingress values: labels: server: node_selector_key: openstack-control-plane node_selector_value: enabled error_server: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: image_repo_sync: null ingress_module_init: null ingress_routed_vip: null keepalived: null pod: replicas: error_page: 2 ingress: 2 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution conf: ingress: worker-processes: '4' source: type: tar location: http://172.17.0.1/helm_charts/starlingx/ingress-0.1.0.tgz subpath: ingress reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-ingress data: chart_name: ingress release: openstack-ingress namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-ingress install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-ingress values: labels: server: node_selector_key: openstack-control-plane node_selector_value: enabled error_server: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: image_repo_sync: null ingress_module_init: null ingress_routed_vip: null keepalived: null pod: replicas: error_page: 2 ingress: 2 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution conf: ingress: worker-processes: '4' source: type: tar location: http://172.17.0.1/helm_charts/starlingx/ingress-0.1.0.tgz subpath: ingress reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-nginx-ports-control data: chart_name: nginx-ports-control release: openstack-nginx-ports-control namespace: openstack values: {} wait: timeout: 1800 resources: [] labels: release_group: osh-openstack-nginx-ports-control source: type: tar location: http://172.17.0.1/helm_charts/starlingx/nginx-ports-control-0.1.0.tgz subpath: nginx-ports-control upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-nginx-ports-control dependencies: [] --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-mariadb data: chart_name: mariadb release: openstack-mariadb namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-mariadb install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-mariadb values: monitoring: prometheus: enabled: false labels: server: node_selector_key: openstack-control-plane node_selector_value: enabled prometheus_mysql_exporter: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: image_repo_sync: null prometheus_mysql_exporter: null prometheus_mysql_exporter_helm_tests: null pod: affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution conf: ingress_conf: worker-processes: '4' source: type: tar location: http://172.17.0.1/helm_charts/starlingx/mariadb-0.1.0.tgz subpath: mariadb dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-garbd data: chart_name: garbd release: openstack-garbd namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-garbd install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-garbd values: labels: server: node_selector_key: openstack-compute-node node_selector_value: enabled images: tags: garbd: docker.io/starlingx/stx-mariadb:master-centos-stable-latest image_repo_sync: null source: type: tar location: http://172.17.0.1/helm_charts/starlingx/garbd-0.1.0.tgz subpath: garbd dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-memcached data: chart_name: memcached release: openstack-memcached namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-memcached install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-memcached values: labels: server: node_selector_key: openstack-control-plane node_selector_value: enabled prometheus_memcached_exporter: node_selector_key: openstack-control-plane node_selector_value: enabled monitoring: prometheus: enabled: false images: tags: image_repo_sync: null prometheus_memcached_exporter: null source: type: tar location: http://172.17.0.1/helm_charts/starlingx/memcached-0.1.0.tgz subpath: memcached reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-rabbitmq data: chart_name: rabbitmq release: openstack-rabbitmq namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-rabbitmq test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-rabbitmq - type: pod labels: release_group: osh-openstack-rabbitmq component: test values: monitoring: prometheus: enabled: false labels: server: node_selector_key: openstack-control-plane node_selector_value: enabled prometheus_rabbitmq_exporter: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: image_repo_sync: null prometheus_rabbitmq_exporter: null prometheus_rabbitmq_exporter_helm_tests: null rabbitmq_init: docker.io/starlingx/stx-heat:master-centos-stable-latest pod: affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution probes: readiness: periodSeconds: 30 liveness: periodSeconds: 30 # TODO: Revert to upstream defaults once the following LP is resolved: # https://bugs.launchpad.net/starlingx/+bug/1814595. By changing this PV # size to 1Gi from the default 265Mi, this avoids the kernel hang from the # filesystem race as seen in the LP. volume: size: 1Gi source: type: tar location: http://172.17.0.1/helm_charts/starlingx/rabbitmq-0.1.0.tgz subpath: rabbitmq reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-keystone data: chart_name: keystone release: openstack-keystone namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-keystone test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-keystone - type: pod labels: release_group: osh-openstack-keystone component: test values: endpoints: identity: name: keystone namespace: openstack labels: api: node_selector_key: openstack-control-plane node_selector_value: enabled job: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: bootstrap: docker.io/starlingx/stx-heat:master-centos-stable-latest db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest image_repo_sync: null keystone_api: docker.io/starlingx/stx-keystone:master-centos-stable-latest keystone_credential_rotate: docker.io/starlingx/stx-keystone:master-centos-stable-latest keystone_credential_setup: docker.io/starlingx/stx-keystone:master-centos-stable-latest keystone_db_sync: docker.io/starlingx/stx-keystone:master-centos-stable-latest keystone_domain_manage: docker.io/starlingx/stx-keystone:master-centos-stable-latest keystone_fernet_rotate: docker.io/starlingx/stx-keystone:master-centos-stable-latest keystone_fernet_setup: docker.io/starlingx/stx-keystone:master-centos-stable-latest ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest test: null pod: user: keystone: uid: 0 replicas: api: 2 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution security_context: keystone: pod: runAsUser: 0 source: type: tar location: http://172.17.0.1/helm_charts/starlingx/keystone-0.1.0.tgz subpath: keystone reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-keystone-api-proxy data: chart_name: keystone-api-proxy release: openstack-keystone-api-proxy namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-keystone-api-proxy test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-keystone-api-proxy - type: pod labels: release_group: osh-openstack-keystone-api-proxy component: test values: images: tags: keystone_api_proxy: docker.io/starlingx/stx-keystone-api-proxy:master-centos-stable-latest ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest source: type: tar location: http://172.17.0.1/helm_charts/keystone-api-proxy-0.1.0.tgz subpath: keystone-api-proxy reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-barbican data: chart_name: barbican release: openstack-barbican namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-barbican test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-barbican - type: pod labels: release_group: osh-openstack-barbican component: test values: labels: api: node_selector_key: openstack-control-plane node_selector_value: enabled job: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: barbican_api: docker.io/starlingx/stx-barbican:master-centos-stable-latest barbican_db_sync: docker.io/starlingx/stx-barbican:master-centos-stable-latest bootstrap: docker.io/starlingx/stx-heat:master-centos-stable-latest db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest image_repo_sync: null ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest scripted_test: docker.io/starlingx/stx-heat:master-centos-stable-latest pod: replicas: api: 2 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution source: type: tar location: http://172.17.0.1/helm_charts/starlingx/barbican-0.1.0.tgz subpath: barbican reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-ceph-rgw data: chart_name: ceph-rgw release: openstack-ceph-rgw namespace: openstack wait: timeout: 300 resources: - type: job labels: release_group: osh-openstack-ceph-rgw test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-ceph-rgw - type: pod labels: release_group: osh-openstack-ceph-rgw component: test values: conf: ceph: global: cephx: false rgw_ks: enabled: true endpoints: object_store: path: default: '/swift/v1' port: api: default: null admin: 7480 internal: 7480 public: 7480 images: tags: ceph_config_helper: docker.io/starlingx/ceph-config-helper:v1.15.0 ceph_rgw: null image_repo_sync: null ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest rgw_s3_admin: docker.io/starlingx/ceph-config-helper:v1.15.0 labels: api: node_selector_key: openstack-control-plane node_selector_value: enabled job: node_selector_key: openstack-control-plane node_selector_value: enabled registry: node_selector_key: openstack-control-plane node_selector_value: enabled pod: replicas: api: 2 registry: 2 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution source: type: tar location: http://172.17.0.1/helm_charts/starlingx/ceph-rgw-0.1.0.tgz subpath: ceph-rgw reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-glance data: chart_name: glance release: openstack-glance namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-glance test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-glance - type: pod labels: release_group: osh-openstack-glance component: test values: labels: api: node_selector_key: openstack-control-plane node_selector_value: enabled job: node_selector_key: openstack-control-plane node_selector_value: enabled registry: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: bootstrap: docker.io/starlingx/stx-heat:master-centos-stable-latest db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest glance_api: docker.io/starlingx/stx-glance:master-centos-stable-latest glance_db_sync: docker.io/starlingx/stx-glance:master-centos-stable-latest glance_registry: docker.io/starlingx/stx-glance:master-centos-stable-latest glance_storage_init: docker.io/starlingx/ceph-config-helper:v1.15.0 image_repo_sync: null ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest test: null pod: replicas: api: 2 registry: 2 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution source: type: tar location: http://172.17.0.1/helm_charts/starlingx/glance-0.1.0.tgz subpath: glance reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-cinder data: chart_name: cinder release: openstack-cinder namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-cinder test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-cinder - type: pod labels: release_group: osh-openstack-cinder component: test values: labels: api: node_selector_key: openstack-control-plane node_selector_value: enabled backup: node_selector_key: openstack-control-plane node_selector_value: enabled job: node_selector_key: openstack-control-plane node_selector_value: enabled scheduler: node_selector_key: openstack-control-plane node_selector_value: enabled volume: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: bootstrap: docker.io/starlingx/stx-heat:master-centos-stable-latest cinder_api: docker.io/starlingx/stx-cinder:master-centos-stable-latest cinder_backup: docker.io/starlingx/stx-cinder:master-centos-stable-latest cinder_backup_storage_init: docker.io/starlingx/ceph-config-helper:v1.15.0 cinder_db_sync: docker.io/starlingx/stx-cinder:master-centos-stable-latest cinder_scheduler: docker.io/starlingx/stx-cinder:master-centos-stable-latest cinder_storage_init: docker.io/starlingx/ceph-config-helper:v1.15.0 cinder_volume: docker.io/starlingx/stx-cinder:master-centos-stable-latest cinder_volume_usage_audit: docker.io/starlingx/stx-cinder:master-centos-stable-latest db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest image_repo_sync: null ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest test: null pod: replicas: api: 2 volume: 1 scheduler: 1 backup: 1 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution conf: cinder: DEFAULT: backup_driver: cinder.backup.drivers.ceph.CephBackupDriver storage: rbd source: type: tar location: http://172.17.0.1/helm_charts/starlingx/cinder-0.1.0.tgz subpath: cinder reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-libvirt data: chart_name: libvirt release: openstack-libvirt namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-libvirt install: no_hooks: false upgrade: no_hooks: false values: ceph_client: user_secret_name: cinder-volume-rbd-keyring labels: agent: libvirt: node_selector_key: openstack-compute-node node_selector_value: enabled conf: ceph: enabled: true kubernetes: cgroup: "k8s-infra" images: tags: image_repo_sync: null libvirt: docker.io/starlingx/stx-libvirt:master-centos-stable-latest source: type: tar location: http://172.17.0.1/helm_charts/starlingx/libvirt-0.1.0.tgz subpath: libvirt reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-openvswitch data: chart_name: openvswitch release: openstack-openvswitch namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-openvswitch install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-openvswitch values: labels: ovs: node_selector_key: openvswitch node_selector_value: enabled images: tags: image_repo_sync: null openvswitch_db_server: docker.io/starlingx/stx-ovs:master-centos-stable-latest openvswitch_vswitchd: docker.io/starlingx/stx-ovs:master-centos-stable-latest source: type: tar location: http://172.17.0.1/helm_charts/starlingx/openvswitch-0.1.0.tgz subpath: openvswitch reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-nova data: chart_name: nova release: openstack-nova namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-nova test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-nova values: manifests: job_ks_endpoints: false ingress_osapi: false service_ingress_osapi: false cron_job_cell_setup: false statefulset_compute_ironic: false deployment_placement: false ingress_placement: false job_db_init_placement: false job_ks_placement_endpoints: false job_ks_placement_service: false job_ks_placement_user: false pdb_placement: false secret_keystone_placement: false service_ingress_placement: false service_placement: false labels: agent: compute: node_selector_key: openstack-compute-node node_selector_value: enabled compute_ironic: node_selector_key: openstack-ironic node_selector_value: enabled api_metadata: node_selector_key: openstack-control-plane node_selector_value: enabled conductor: node_selector_key: openstack-control-plane node_selector_value: enabled consoleauth: node_selector_key: openstack-control-plane node_selector_value: enabled job: node_selector_key: openstack-control-plane node_selector_value: enabled novncproxy: node_selector_key: openstack-control-plane node_selector_value: enabled osapi: node_selector_key: openstack-control-plane node_selector_value: enabled scheduler: node_selector_key: openstack-control-plane node_selector_value: enabled spiceproxy: node_selector_key: openstack-control-plane node_selector_value: enabled test: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: bootstrap: docker.io/starlingx/stx-heat:master-centos-stable-latest db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest image_repo_sync: null ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest nova_api: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_cell_setup: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_cell_setup_init: docker.io/starlingx/stx-heat:master-centos-stable-latest nova_compute: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_compute_ironic: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_compute_ssh: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_conductor: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_consoleauth: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_db_sync: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_novncproxy: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_novncproxy_assets: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_placement: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_scheduler: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_service_cleaner: docker.io/starlingx/ceph-config-helper:v1.15.0 nova_spiceproxy: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_spiceproxy_assets: docker.io/starlingx/stx-nova:master-centos-stable-latest nova_storage_init: docker.io/starlingx/ceph-config-helper:v1.15.0 test: null pod: # TODO(rchurch): # Change-Id: I5a60efd133c156ce2ecac31d22e94b25e4e837bf broke armada apply # of this manifest. Turning it off for now. Need to determine way forward # here. mandatory_access_control: type: null useHostNetwork: novncproxy: false replicas: api_metadata: 1 osapi: 1 conductor: 1 consoleauth: 1 scheduler: 1 novncproxy: 1 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution user: nova: uid: 0 # TODO:(rchurch) Change-Id: Ib8e4b93486588320fd2d562c3bc90b65844e52e5 # introduced readiness/liveness probes for nova. nova_scheduler probes # fail and cause a long delay and eventual failure of the armada # application apply. Need to determine the fix to re-enable this. probes: readiness: nova_scheduler: enabled: false liveness: nova_scheduler: enabled: false conf: ceph: enabled: true nova: DEFAULT: allow_resize_to_same_host: true default_mempages_size: 2048 reserved_host_memory_mb: 0 compute_monitors: cpu.virt_driver running_deleted_instance_poll_interval: 60 mkisofs_cmd: /usr/bin/genisoimage network_allocate_retries: 2 force_raw_images: false concurrent_disk_operations: 2 # Set number of block device allocate retries and interval # for volume create when VM boots and creates a new volume. # The total block allocate retries time is set to 2 hours # to satisfy the volume allocation time on slow RPM disks # which may take 1 hour and a half per volume when several # volumes are created in parallel. block_device_allocate_retries_interval: 3 block_device_allocate_retries: 2400 disk_allocation_ratio: 1.0 cpu_allocation_ratio: 16.0 ram_allocation_ratio: 1.0 remove_unused_original_minimum_age_seconds: 3600 enable_new_services: false map_new_hosts: false # Increase from default of 60 seconds to avoid services being # declared down during controller swacts, reboots, etc... service_down_time: 90 long_rpc_timeout: 400 libvirt: cpu_mode: host-model live_migration_completion_timeout: 180 live_migration_permit_auto_converge: true mem_stats_period_seconds: 0 rbd_user: cinder # Allow up to 1 day for resize conf remove_unused_resized_minimum_age_seconds: 86400 database: idle_timeout: 60 max_overflow: 64 max_pool_size: 1 api_database: idle_timeout: 60 max_overflow: 64 max_pool_size: 1 cell0_database: idle_timeout: 60 max_overflow: 64 max_pool_size: 1 neutron: default_floating_pool: public notifications: notification_format: unversioned filter_scheduler: build_failure_weight_multiplier: 0.0 cpu_weight_multiplier: 0.0 disk_weight_multiplier: 0.0 enabled_filters: - RetryFilter - ComputeFilter - AvailabilityZoneFilter - AggregateInstanceExtraSpecsFilter - ComputeCapabilitiesFilter - ImagePropertiesFilter - NUMATopologyFilter - ServerGroupAffinityFilter - ServerGroupAntiAffinityFilter - PciPassthroughFilter pci_weight_multiplier: 0.0 ram_weight_multiplier: 0.0 shuffle_best_same_weighed_hosts: true soft_affinity_weight_multiplier: 20.0 soft_anti_affinity_weight_multiplier: 20.0 scheduler: workers: 1 discover_hosts_in_cells_interval: 30 periodic_task_interval: -1 service_user: send_service_user_token: true upgrade_levels: None metrics: required: false workarounds: enable_numa_live_migration: true hypervisor: address_search_enabled: false network: sshd: enabled: true console: address_search_enabled: false source: type: tar location: http://172.17.0.1/helm_charts/starlingx/nova-0.1.0.tgz subpath: nova reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-placement data: chart_name: placement release: openstack-placement namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-placement test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-placement values: labels: placement: node_selector_key: openstack-control-plane node_selector_value: enabled job: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest image_repo_sync: null ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest placement: docker.io/starlingx/stx-placement:master-centos-stable-latest placement_db_sync: docker.io/starlingx/stx-placement:master-centos-stable-latest pod: replicas: placement: 1 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution user: placement: uid: 42424 conf: placement: DEFAULT: log_config_append: /etc/placement/logging.conf source: type: tar location: http://172.17.0.1/helm_charts/starlingx/placement-0.1.0.tgz subpath: placement reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-nova-api-proxy data: chart_name: nova-api-proxy release: openstack-nova-api-proxy namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-nova-api-proxy test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-nova-api-proxy - type: pod labels: release_group: osh-openstack-nova-api-proxy component: test values: images: tags: nova_api_proxy: docker.io/starlingx/stx-nova-api-proxy:master-centos-stable-latest ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest pod: affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution source: type: tar location: http://172.17.0.1/helm_charts/starlingx/nova-api-proxy-0.1.0.tgz subpath: nova-api-proxy reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-neutron data: chart_name: neutron release: openstack-neutron namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-neutron test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-neutron values: pod: replicas: server: 2 user: neutron: uid: 0 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution # TODO:(rchurch) Change-Id: Ib99ceaabbad1d1e0faf34cc74314da9aa688fa0a # introduced readiness/liveness probes for neutron. Four of the probes # fail and cause a long delay and eventual failure of the armada # application apply. Need to determine the fix to re-enable these. probes: readiness: dhcp_agent: enabled: false l3_agent: enabled: false metadata_agent: enabled: false sriov_agent: enabled: false liveness: dhcp_agent: enabled: false l3_agent: enabled: false metadata_agent: enabled: false sriov_agent: enabled: false labels: agent: dhcp: node_selector_key: openstack-compute-node node_selector_value: enabled l3: node_selector_key: openstack-compute-node node_selector_value: enabled metadata: node_selector_key: openstack-compute-node node_selector_value: enabled job: node_selector_key: openstack-control-plane node_selector_value: enabled lb: node_selector_key: linuxbridge node_selector_value: enabled # ovs is a special case, requiring a special # label that can apply to both control hosts # and compute hosts, until we get more sophisticated # with our daemonset scheduling ovs: node_selector_key: openvswitch node_selector_value: enabled server: node_selector_key: openstack-control-plane node_selector_value: enabled test: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: bootstrap: docker.io/starlingx/stx-heat:master-centos-stable-latest db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest image_repo_sync: null ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest neutron_db_sync: docker.io/starlingx/stx-neutron:master-centos-stable-latest neutron_dhcp: docker.io/starlingx/stx-neutron:master-centos-stable-latest neutron_l3: docker.io/starlingx/stx-neutron:master-centos-stable-latest neutron_linuxbridge_agent: docker.io/starlingx/stx-neutron:master-centos-stable-latest neutron_metadata: docker.io/starlingx/stx-neutron:master-centos-stable-latest neutron_openvswitch_agent: docker.io/starlingx/stx-neutron:master-centos-stable-latest neutron_server: docker.io/starlingx/stx-neutron:master-centos-stable-latest neutron_sriov_agent: docker.io/starlingx/stx-neutron:master-centos-stable-latest neutron_sriov_agent_init: docker.io/starlingx/stx-neutron:master-centos-stable-latest test: null network: interface: tunnel: docker0 backend: - openvswitch - sriov conf: neutron: DEFAULT: l3_ha: false min_l3_agents_per_router: 1 max_l3_agents_per_router: 1 l3_ha_network_type: vxlan dhcp_agents_per_network: 1 max_overflow: 64 max_pool_size: 1 idle_timeout: 60 rpc_response_max_timeout: 60 router_status_managed: true vlan_transparent: true wsgi_default_pool_size: 100 notify_nova_on_port_data_changes: true notify_nova_on_port_status_changes: true control_exchange: neutron core_plugin: neutron.plugins.ml2.plugin.Ml2Plugin state_path: /var/run/neutron syslog_log_facility: local2 use_syslog: true pnet_audit_enabled: false driver: messagingv2 enable_proxy_headers_parsing: true lock_path: /var/run/neutron/lock log_format: '[%(name)s] %(message)s' policy_file: /etc/neutron/policy.json service_plugins: router,network_segment_range dns_domain: openstacklocal enable_new_agents: false allow_automatic_dhcp_failover: true allow_automatic_l3agent_failover: true # Increase from default of 75 seconds to avoid agents being declared # down during controller swacts, reboots, etc... agent_down_time: 180 agent: root_helper: sudo vhost: vhost_user_enabled: true dhcp_agent: DEFAULT: enable_isolated_metadata: true enable_metadata_network: false interface_driver: openvswitch resync_interval: 30 l3_agent: DEFAULT: agent_mode: dvr_snat interface_driver: openvswitch metadata_port: 80 plugins: ml2_conf: ml2: mechanism_drivers: openvswitch,sriovnicswitch,l2population path_mtu: 0 tenant_network_types: vlan,vxlan type_drivers: flat,vlan,vxlan ml2_type_vxlan: vni_ranges: '' vxlan_group: '' ovs_driver: vhost_user_enabled: true securitygroup: firewall_driver: openvswitch openvswitch_agent: agent: tunnel_types: vxlan ovs: bridge_mappings: public:br-ex securitygroup: firewall_driver: openvswitch source: type: tar location: http://172.17.0.1/helm_charts/starlingx/neutron-0.1.0.tgz subpath: neutron reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-ironic data: chart_name: ironic release: openstack-ironic namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-ironic test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-ironic values: pod: replicas: api: 2 conductor: 2 user: ironic: uid: 0 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution labels: api: node_selector_key: openstack-ironic node_selector_value: enabled conductor: node_selector_key: openstack-ironic node_selector_value: enabled job: node_selector_key: openstack-ironic node_selector_value: enabled images: tags: bootstrap: docker.io/starlingx/stx-heat:master-centos-stable-latest db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest image_repo_sync: null ironic_api: docker.io/starlingx/stx-ironic:master-centos-stable-latest ironic_conductor: docker.io/starlingx/stx-ironic:master-centos-stable-latest ironic_db_sync: docker.io/starlingx/stx-ironic:master-centos-stable-latest ironic_manage_cleaning_network: docker.io/starlingx/stx-heat:master-centos-stable-latest ironic_pxe: docker.io/starlingx/stx-ironic:master-centos-stable-latest ironic_pxe_init: docker.io/starlingx/stx-ironic:master-centos-stable-latest ironic_pxe_http: docker.io/nginx:1.13.3 ironic_retrive_cleaning_network: docker.io/starlingx/stx-heat:master-centos-stable-latest ironic_retrive_swift_config: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest conf: ironic: DEFAULT: # enabled_drivers is deprecated in stein enabled_drivers: '' enabled_hardware_types: ipmi enabled_bios_interfaces: no-bios enabled_boot_interfaces: pxe,ipxe enabled_console_interfaces: ipmitool-socat enabled_deploy_interfaces: iscsi,direct enabled_inspect_interfaces: no-inspect enabled_management_interfaces: ipmitool enabled_network_interfaces: flat,noop enabled_power_interfaces: ipmitool enabled_raid_interfaces: no-raid enabled_storage_interfaces: cinder,noop enabled_vendor_interfaces: ipmitool,no-vendor api: port: 6385 pxe: pxe_append_params: "nofb nomodeset vga=normal console=ttyS0,115200n8" dhcp: dhcp_provider: neutron # Disable ipa image downloading during bootstrap bootstrap: image: enabled: false endpoints: baremetal: port: pxe_http: default: 28080 source: type: tar location: http://172.17.0.1/helm_charts/starlingx/ironic-0.1.0.tgz subpath: ironic reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-heat data: chart_name: heat release: openstack-heat namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-heat test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-heat - type: pod labels: release_group: osh-openstack-heat component: test values: endpoints: oslo_cache: hosts: default: heat-memcached labels: api: node_selector_key: openstack-control-plane node_selector_value: enabled cfn: node_selector_key: openstack-control-plane node_selector_value: enabled cloudwatch: node_selector_key: openstack-control-plane node_selector_value: enabled engine: node_selector_key: openstack-control-plane node_selector_value: enabled job: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: bootstrap: docker.io/starlingx/stx-heat:master-centos-stable-latest db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest heat_api: docker.io/starlingx/stx-heat:master-centos-stable-latest heat_cfn: docker.io/starlingx/stx-heat:master-centos-stable-latest heat_cloudwatch: docker.io/starlingx/stx-heat:master-centos-stable-latest heat_db_sync: docker.io/starlingx/stx-heat:master-centos-stable-latest heat_engine: docker.io/starlingx/stx-heat:master-centos-stable-latest heat_engine_cleaner: docker.io/starlingx/stx-heat:master-centos-stable-latest heat_purge_deleted: docker.io/starlingx/stx-heat:master-centos-stable-latest image_repo_sync: null ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest test: null conf: policy: stacks:global_index: rule:context_is_admin software_configs:global_index: rule:context_is_admin pod: replicas: api: 2 cfn: 2 cloudwatch: 2 engine: 2 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution source: type: tar location: http://172.17.0.1/helm_charts/starlingx/heat-0.1.0.tgz subpath: heat reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-aodh data: chart_name: aodh release: openstack-aodh namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-aodh test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-aodh - type: pod labels: release_group: osh-openstack-aodh component: test values: images: tags: aodh_alarms_cleaner: docker.io/starlingx/stx-aodh:master-centos-stable-latest aodh_api: docker.io/starlingx/stx-aodh:master-centos-stable-latest aodh_db_sync: docker.io/starlingx/stx-aodh:master-centos-stable-latest aodh_evaluator: docker.io/starlingx/stx-aodh:master-centos-stable-latest aodh_listener: docker.io/starlingx/stx-aodh:master-centos-stable-latest aodh_notifier: docker.io/starlingx/stx-aodh:master-centos-stable-latest bootstrap: docker.io/starlingx/stx-heat:master-centos-stable-latest db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest image_repo_sync: null ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest pod: user: aodh: uid: 0 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution jobs: alarms_cleaner: # daily at the 35 minute mark cron: "35 */24 * * *" source: type: tar location: http://172.17.0.1/helm_charts/starlingx/aodh-0.1.0.tgz subpath: aodh reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-gnocchi data: chart_name: gnocchi release: openstack-gnocchi namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-gnocchi test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-gnocchi - type: pod labels: release_group: osh-openstack-gnocchi component: test values: images: tags: db_init: docker.io/starlingx/stx-gnocchi:master-centos-stable-latest db_init_indexer: docker.io/starlingx/stx-gnocchi:master-centos-stable-latest db_sync: docker.io/starlingx/stx-gnocchi:master-centos-stable-latest gnocchi_api: docker.io/starlingx/stx-gnocchi:master-centos-stable-latest gnocchi_metricd: docker.io/starlingx/stx-gnocchi:master-centos-stable-latest gnocchi_resources_cleaner: docker.io/starlingx/stx-gnocchi:master-centos-stable-latest gnocchi_statsd: docker.io/starlingx/stx-gnocchi:master-centos-stable-latest gnocchi_storage_init: docker.io/starlingx/ceph-config-helper:v1.15.0 image_repo_sync: null ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest conf: gnocchi: indexer: driver: mariadb keystone_authtoken: interface: internal apache: | Listen 0.0.0.0:{{ tuple "metric" "internal" "api" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }} SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded CustomLog /dev/stdout combined env=!forwarded CustomLog /dev/stdout proxy env=forwarded WSGIDaemonProcess gnocchi processes=1 threads=2 user=gnocchi group=gnocchi display-name=%{GROUP} WSGIProcessGroup gnocchi WSGIScriptAlias / "/var/lib/openstack/bin/gnocchi-api" WSGIApplicationGroup %{GLOBAL} ErrorLog /dev/stdout SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded CustomLog /dev/stdout combined env=!forwarded CustomLog /dev/stdout proxy env=forwarded Require all granted paste: composite:gnocchi+basic: use: egg:Paste#urlmap /: gnocchiversions_pipeline /v1: gnocchiv1+noauth /healthcheck: healthcheck composite:gnocchi+keystone: use: egg:Paste#urlmap /: gnocchiversions_pipeline /v1: gnocchiv1+keystone /healthcheck: healthcheck composite:gnocchi+remoteuser: use: egg:Paste#urlmap /: gnocchiversions_pipeline /v1: gnocchiv1+noauth /healthcheck: healthcheck pipeline:gnocchiv1+noauth: pipeline: gnocchiv1 pipeline:gnocchiv1+keystone: pipeline: keystone_authtoken gnocchiv1 pipeline:gnocchiversions_pipeline: pipeline: gnocchiversions app:gnocchiversions: paste.app_factory: gnocchi.rest.app:app_factory root: gnocchi.rest.api.VersionsController app:gnocchiv1: paste.app_factory: gnocchi.rest.app:app_factory root: gnocchi.rest.api.V1Controller filter:keystone_authtoken: use: egg:keystonemiddleware#auth_token oslo_config_project: gnocchi app:healthcheck: use: egg:oslo.middleware#healthcheck oslo_config_project: gnocchi pod: affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution dependencies: static: db_sync: jobs: - gnocchi-storage-init - gnocchi-db-init services: - endpoint: internal service: oslo_db metricd: services: - endpoint: internal service: oslo_db - endpoint: internal service: oslo_cache - endpoint: internal service: metric tests: services: - endpoint: internal service: identity - endpoint: internal service: oslo_db - endpoint: internal service: metric manifests: daemonset_statsd: false job_db_init_indexer: false secret_db_indexer: false service_statsd: false endpoints: oslo_cache: hosts: default: memcached source: type: tar location: http://172.17.0.1/helm_charts/starlingx/gnocchi-0.1.0.tgz subpath: gnocchi reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-panko data: chart_name: panko release: openstack-panko namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-panko test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-panko - type: pod labels: release_group: osh-openstack-panko component: test values: pod: user: panko: uid: 0 affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution images: tags: bootstrap: docker.io/starlingx/stx-heat:master-centos-stable-latest db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest image_repo_sync: null ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest panko_api: docker.io/starlingx/stx-panko:master-centos-stable-latest panko_db_sync: docker.io/starlingx/stx-panko:master-centos-stable-latest panko_events_cleaner: docker.io/starlingx/stx-panko:master-centos-stable-latest test: null conf: paste: composite:panko+noauth: use: egg:Paste#urlmap /: pankoversions_pipeline /v2: pankov2_noauth_pipeline composite:panko+keystone: use: egg:Paste#urlmap /: pankoversions_pipeline /v2: pankov2_keystone_pipeline pipeline:pankoversions_pipeline: pipeline: cors http_proxy_to_wsgi pankoversions app:pankoversions: paste.app_factory: panko.api.app:app_factory root: panko.api.controllers.root.VersionsController pipeline:pankov2_keystone_pipeline: pipeline: cors http_proxy_to_wsgi request_id authtoken pankov2 pipeline:pankov2_noauth_pipeline: pipeline: cors http_proxy_to_wsgi request_id pankov2 app:pankov2: paste.app_factory: panko.api.app:app_factory root: panko.api.controllers.v2.root.V2Controller filter:authtoken: paste.filter_factory: keystonemiddleware.auth_token:filter_factory oslo_config_project: panko filter:request_id: paste.filter_factory: oslo_middleware:RequestId.factory filter:cors: paste.filter_factory: oslo_middleware.cors:filter_factory oslo_config_project: panko filter:http_proxy_to_wsgi: paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory oslo_config_project: panko jobs: events_cleaner: # hourly at the 10 minute mark cron: "10 * * * *" source: type: tar location: http://172.17.0.1/helm_charts/starlingx/panko-0.1.0.tgz subpath: panko reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-ceilometer data: chart_name: ceilometer release: openstack-ceilometer namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-ceilometer test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-ceilometer - type: pod labels: release_group: osh-openstack-ceilometer component: test values: images: tags: ceilometer_api: docker.io/starlingx/stx-ceilometer:master-centos-stable-latest ceilometer_central: docker.io/starlingx/stx-ceilometer:master-centos-stable-latest ceilometer_collector: docker.io/starlingx/stx-ceilometer:master-centos-stable-latest ceilometer_compute: docker.io/starlingx/stx-ceilometer:master-centos-stable-latest ceilometer_db_sync: docker.io/starlingx/stx-ceilometer:master-centos-stable-latest ceilometer_ipmi: docker.io/starlingx/stx-ceilometer:master-centos-stable-latest ceilometer_notification: docker.io/starlingx/stx-ceilometer:master-centos-stable-latest db_init: docker.io/starlingx/stx-ceilometer:master-centos-stable-latest db_init_mongodb: docker.io/starlingx/stx-ceilometer:master-centos-stable-latest image_repo_sync: null ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest test: null dependencies: static: central: jobs: - ceilometer-db-sync - ceilometer-rabbit-init services: null compute: jobs: - ceilometer-db-sync - ceilometer-rabbit-init services: null ipmi: jobs: - ceilometer-db-sync - ceilometer-rabbit-init services: null notification: jobs: - ceilometer-db-sync - ceilometer-rabbit-init services: - endpoint: internal service: event db_sync: jobs: - ceilometer-ks-user - ceilometer-ks-service services: - endpoint: internal service: identity - endpoint: internal service: metric manifests: deployment_api: false deployment_collector: false service_api: false job_db_init: false job_db_init_mongodb: false job_ks_endpoints: false secret_db: false secret_mongodb: false endpoints: oslo_cache: hosts: default: memcached event: name: panko hosts: default: panko-api public: panko host_fqdn_override: default: null path: default: null scheme: default: 'http' port: api: default: 8977 public: 80 conf: ceilometer: cache: expiration_time: 86400 compute: resource_update_interval: 60 instance_discovery_method: workload_partitioning oslo_messaging_notifications: topics: - notifications pipeline: sources: - name: meter_source meters: - "*" sinks: - meter_sink sinks: - name: meter_sink publishers: - gnocchi:// event_pipeline: sources: - name: event_source events: - "*" sinks: - event_sink sinks: - name: event_sink publishers: - panko:// - gnocchi:// polling: sources: - name: instance_cpu_pollster interval: 30 meters: - cpu - name: instance_disk_pollster interval: 600 meters: - disk.capacity - disk.allocation - disk.usage - disk.device.read.requests - disk.device.write.requests - disk.device.read.bytes - disk.device.write.bytes - disk.device.capacity - disk.device.allocation - disk.device.usage - name: ipmi_pollster interval: 600 meters: - hardware.ipmi.node.power - hardware.ipmi.node.temperature - hardware.ipmi.node.outlet_temperature - hardware.ipmi.node.airflow - hardware.ipmi.node.cups - hardware.ipmi.node.cpu_util - hardware.ipmi.node.mem_util - hardware.ipmi.node.io_util - hardware.ipmi.temperature - hardware.ipmi.voltage - hardware.ipmi.current - hardware.ipmi.fan - name: ceph_pollster interval: 600 meters: - radosgw.objects - radosgw.objects.size - radosgw.objects.containers - radosgw.api.request - radosgw.containers.objects - radosgw.containers.objects.size - name: image_pollster interval: 600 meters: - image.size - name: volume_pollster interval: 600 meters: - volume.size - volume.snapshot.size - volume.backup.size gnocchi_resources: archive_policy_default: ceilometer-low archive_policies: - name: ceilometer-low aggregation_methods: - mean back_window: 0 definition: - granularity: 5 minutes timespan: 7 days - name: ceilometer-low-rate aggregation_methods: - mean - rate:mean back_window: 0 definition: - granularity: 5 minutes timespan: 7 days resources: - resource_type: identity metrics: identity.authenticate.success: identity.authenticate.pending: identity.authenticate.failure: identity.user.created: identity.user.deleted: identity.user.updated: identity.group.created: identity.group.deleted: identity.group.updated: identity.role.created: identity.role.deleted: identity.role.updated: identity.project.created: identity.project.deleted: identity.project.updated: identity.trust.created: identity.trust.deleted: identity.role_assignment.created: identity.role_assignment.deleted: - resource_type: ceph_account metrics: radosgw.objects: radosgw.objects.size: radosgw.objects.containers: radosgw.api.request: radosgw.containers.objects: radosgw.containers.objects.size: - resource_type: instance metrics: memory: memory.usage: memory.resident: memory.swap.in: memory.swap.out: memory.bandwidth.total: memory.bandwidth.local: vcpus: archive_policy_name: ceilometer-low-rate cpu: archive_policy_name: ceilometer-low-rate cpu_l3_cache: disk.root.size: disk.ephemeral.size: disk.latency: disk.iops: disk.capacity: disk.allocation: disk.usage: compute.instance.booting.time: perf.cpu.cycles: perf.instructions: perf.cache.references: perf.cache.misses: attributes: host: resource_metadata.(instance_host|host) image_ref: resource_metadata.image_ref launched_at: resource_metadata.launched_at created_at: resource_metadata.created_at deleted_at: resource_metadata.deleted_at display_name: resource_metadata.display_name flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)|flavor_id) flavor_name: resource_metadata.(instance_type|(flavor.name)|flavor_name) server_group: resource_metadata.user_metadata.server_group event_delete: compute.instance.delete.start event_attributes: id: instance_id event_associated_resources: instance_network_interface: '{"=": {"instance_id": "%s"}}' instance_disk: '{"=": {"instance_id": "%s"}}' - resource_type: instance_network_interface metrics: network.outgoing.packets: archive_policy_name: ceilometer-low-rate network.incoming.packets: archive_policy_name: ceilometer-low-rate network.outgoing.packets.drop: archive_policy_name: ceilometer-low-rate network.incoming.packets.drop: archive_policy_name: ceilometer-low-rate network.outgoing.packets.error: archive_policy_name: ceilometer-low-rate network.incoming.packets.error: archive_policy_name: ceilometer-low-rate network.outgoing.bytes: archive_policy_name: ceilometer-low-rate network.incoming.bytes: archive_policy_name: ceilometer-low-rate attributes: name: resource_metadata.vnic_name instance_id: resource_metadata.instance_id - resource_type: instance_disk metrics: disk.device.read.requests: archive_policy_name: ceilometer-low-rate disk.device.write.requests: archive_policy_name: ceilometer-low-rate disk.device.read.bytes: archive_policy_name: ceilometer-low-rate disk.device.write.bytes: archive_policy_name: ceilometer-low-rate disk.device.latency: disk.device.read.latency: disk.device.write.latency: disk.device.iops: disk.device.capacity: disk.device.allocation: disk.device.usage: attributes: name: resource_metadata.disk_name instance_id: resource_metadata.instance_id - resource_type: image metrics: image.size: image.download: image.serve: attributes: name: resource_metadata.name container_format: resource_metadata.container_format disk_format: resource_metadata.disk_format event_delete: image.delete event_attributes: id: resource_id - resource_type: ipmi metrics: hardware.ipmi.node.power: hardware.ipmi.node.temperature: hardware.ipmi.node.inlet_temperature: hardware.ipmi.node.outlet_temperature: hardware.ipmi.node.fan: hardware.ipmi.node.current: hardware.ipmi.node.voltage: hardware.ipmi.node.airflow: hardware.ipmi.node.cups: hardware.ipmi.node.cpu_util: hardware.ipmi.node.mem_util: hardware.ipmi.node.io_util: - resource_type: ipmi_sensor metrics: hardware.ipmi.fan: hardware.ipmi.temperature: hardware.ipmi.current: hardware.ipmi.voltage: attributes: node: resource_metadata.node - resource_type: network metrics: bandwidth: ip.floating: event_delete: floatingip.delete.end event_attributes: id: resource_id - resource_type: stack metrics: stack.create: stack.update: stack.delete: stack.resume: stack.suspend: - resource_type: swift_account metrics: storage.objects.incoming.bytes: storage.objects.outgoing.bytes: storage.objects.size: storage.objects: storage.objects.containers: storage.containers.objects: storage.containers.objects.size: - resource_type: volume metrics: volume: volume.size: snapshot.size: volume.snapshot.size: volume.backup.size: attributes: display_name: resource_metadata.(display_name|name) volume_type: resource_metadata.volume_type image_id: resource_metadata.image_id instance_id: resource_metadata.instance_id event_delete: volume.delete.start event_attributes: id: resource_id - resource_type: volume_provider metrics: volume.provider.capacity.total: volume.provider.capacity.free: volume.provider.capacity.allocated: volume.provider.capacity.provisioned: volume.provider.capacity.virtual_free: - resource_type: volume_provider_pool metrics: volume.provider.pool.capacity.total: volume.provider.pool.capacity.free: volume.provider.pool.capacity.allocated: volume.provider.pool.capacity.provisioned: volume.provider.pool.capacity.virtual_free: attributes: provider: resource_metadata.provider - resource_type: host metrics: hardware.cpu.load.1min: hardware.cpu.load.5min: hardware.cpu.load.15min: hardware.cpu.util: hardware.memory.total: hardware.memory.used: hardware.memory.swap.total: hardware.memory.swap.avail: hardware.memory.buffer: hardware.memory.cached: hardware.network.ip.outgoing.datagrams: hardware.network.ip.incoming.datagrams: hardware.system_stats.cpu.idle: hardware.system_stats.io.outgoing.blocks: hardware.system_stats.io.incoming.blocks: attributes: host_name: resource_metadata.resource_url - resource_type: host_disk metrics: hardware.disk.size.total: hardware.disk.size.used: hardware.disk.read.bytes: hardware.disk.write.bytes: hardware.disk.read.requests: hardware.disk.write.requests: attributes: host_name: resource_metadata.resource_url device_name: resource_metadata.device - resource_type: host_network_interface metrics: hardware.network.incoming.bytes: hardware.network.outgoing.bytes: hardware.network.outgoing.errors: attributes: host_name: resource_metadata.resource_url device_name: resource_metadata.name - resource_type: nova_compute metrics: compute.node.cpu.frequency: compute.node.cpu.idle.percent: compute.node.cpu.idle.time: compute.node.cpu.iowait.percent: compute.node.cpu.iowait.time: compute.node.cpu.kernel.percent: compute.node.cpu.kernel.time: compute.node.cpu.percent: compute.node.cpu.user.percent: compute.node.cpu.user.time: attributes: host_name: resource_metadata.host - resource_type: manila_share metrics: manila.share.size: attributes: name: resource_metadata.name host: resource_metadata.host status: resource_metadata.status availability_zone: resource_metadata.availability_zone protocol: resource_metadata.protocol - resource_type: switch metrics: switch: switch.ports: attributes: controller: resource_metadata.controller - resource_type: switch_port metrics: switch.port: switch.port.uptime: switch.port.receive.packets: switch.port.transmit.packets: switch.port.receive.bytes: switch.port.transmit.bytes: switch.port.receive.drops: switch.port.transmit.drops: switch.port.receive.errors: switch.port.transmit.errors: switch.port.receive.frame_error: switch.port.receive.overrun_error: switch.port.receive.crc_error: switch.port.collision.count: attributes: switch: resource_metadata.switch port_number_on_switch: resource_metadata.port_number_on_switch neutron_port_id: resource_metadata.neutron_port_id controller: resource_metadata.controller - resource_type: port metrics: port: port.uptime: port.receive.packets: port.transmit.packets: port.receive.bytes: port.transmit.bytes: port.receive.drops: port.receive.errors: attributes: controller: resource_metadata.controller - resource_type: switch_table metrics: switch.table.active.entries: attributes: controller: resource_metadata.controller switch: resource_metadata.switch pod: affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution source: type: tar location: http://172.17.0.1/helm_charts/starlingx/ceilometer-0.1.0.tgz subpath: ceilometer reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-fm-rest-api data: chart_name: fm-rest-api release: openstack-fm-rest-api namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-fm-rest-api test: enabled: false install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-fm-rest-api component: db-init - type: job labels: release_group: osh-openstack-fm-rest-api component: db-sync - type: job labels: release_group: osh-openstack-fm-rest-api component: ks-user - type: job labels: release_group: osh-openstack-fm-rest-api component: ks-service - type: job labels: release_group: osh-openstack-fm-rest-api component: ks-endpoints values: pod: affinity: anti: type: default: requiredDuringSchedulingIgnoredDuringExecution labels: api: node_selector_key: openstack-control-plane node_selector_value: enabled job: node_selector_key: openstack-control-plane node_selector_value: enabled images: tags: fm_rest_api: docker.io/starlingx/stx-fm-rest-api:master-centos-stable-latest ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest fm_db_sync: docker.io/starlingx/stx-fm-rest-api:master-centos-stable-latest db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest source: type: tar location: http://172.17.0.1/helm_charts/starlingx/fm-rest-api-0.1.0.tgz subpath: fm-rest-api reference: master dependencies: - helm-toolkit --- schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: openstack-horizon data: chart_name: horizon release: openstack-horizon namespace: openstack wait: timeout: 1800 labels: release_group: osh-openstack-horizon install: no_hooks: false upgrade: no_hooks: false pre: delete: - type: job labels: release_group: osh-openstack-horizon values: images: tags: db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest horizon: docker.io/starlingx/stx-horizon:master-centos-stable-latest horizon_db_sync: docker.io/starlingx/stx-horizon:master-centos-stable-latest image_repo_sync: null network: node_port: enabled: 'true' port: 31000 pod: mounts: horizon: horizon: # Branding directory mount volumeMounts: - mountPath: /opt/branding name: horizon-branding volumes: - hostPath: path: /opt/branding type: Directory name: horizon-branding conf: horizon: local_settings: config: # Region Modes ss_enabled: 'False' dc_mode: 'False' # Security https_enabled: 'False' lockout_period_sec: '300' lockout_retries_num: '3' # Turn off domain support as we aren't using keystone_multidomain_support: 'False' template: | import os from django.utils.translation import ugettext_lazy as _ from openstack_dashboard import exceptions DEBUG = {{ .Values.conf.horizon.local_settings.config.debug }} TEMPLATE_DEBUG = DEBUG COMPRESS_OFFLINE = True COMPRESS_CSS_HASHING_METHOD = "hash" # WEBROOT is the location relative to Webserver root # should end with a slash. WEBROOT = '/' # LOGIN_URL = WEBROOT + 'auth/login/' # LOGOUT_URL = WEBROOT + 'auth/logout/' # # LOGIN_REDIRECT_URL can be used as an alternative for # HORIZON_CONFIG.user_home, if user_home is not set. # Do not set it to '/home/', as this will cause circular redirect loop # LOGIN_REDIRECT_URL = WEBROOT # Required for Django 1.5. # If horizon is running in production (DEBUG is False), set this # with the list of host/domain names that the application can serve. # For more information see: # https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts ALLOWED_HOSTS = ['*'] # Set SSL proxy settings: # For Django 1.4+ pass this header from the proxy after terminating the SSL, # and don't forget to strip it from the client's request. # For more information see: # https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header #SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') # https://docs.djangoproject.com/en/1.5/ref/settings/#secure-proxy-ssl-header #SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # If Horizon is being served through SSL, then uncomment the following two # settings to better secure the cookies from security exploits #CSRF_COOKIE_SECURE = True #SESSION_COOKIE_SECURE = True # Overrides for OpenStack API versions. Use this setting to force the # OpenStack dashboard to use a specific API version for a given service API. # Versions specified here should be integers or floats, not strings. # NOTE: The version should be formatted as it appears in the URL for the # service API. For example, The identity service APIs have inconsistent # use of the decimal point, so valid options would be 2.0 or 3. #OPENSTACK_API_VERSIONS = { # "data-processing": 1.1, # "identity": 3, # "volume": 2, #} OPENSTACK_API_VERSIONS = { "identity": 3, } # Set this to True if running on multi-domain model. When this is enabled, it # will require user to enter the Domain name in addition to username for login. OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = {{ .Values.conf.horizon.local_settings.config.keystone_multidomain_support }} # Overrides the default domain used when running on single-domain model # with Keystone V3. All entities will be created in the default domain. OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = '{{ .Values.conf.horizon.local_settings.config.keystone_default_domain }}' # Set Console type: # valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL" or None # Set to None explicitly if you want to deactivate the console. #CONSOLE_TYPE = "AUTO" # Default OpenStack Dashboard configuration. HORIZON_CONFIG = { 'user_home': 'openstack_dashboard.views.get_user_home', 'ajax_queue_limit': 10, 'auto_fade_alerts': { 'delay': 3000, 'fade_duration': 1500, 'types': ['alert-success', 'alert-info'] }, 'help_url': "http://docs.openstack.org", 'exceptions': {'recoverable': exceptions.RECOVERABLE, 'not_found': exceptions.NOT_FOUND, 'unauthorized': exceptions.UNAUTHORIZED}, 'modal_backdrop': 'static', 'angular_modules': [], 'js_files': [], 'js_spec_files': [], } # Specify a regular expression to validate user passwords. #HORIZON_CONFIG["password_validator"] = { # "regex": '.*', # "help_text": _("Your password does not meet the requirements."), #} # Disable simplified floating IP address management for deployments with # multiple floating IP pools or complex network requirements. #HORIZON_CONFIG["simple_ip_management"] = False # Turn off browser autocompletion for forms including the login form and # the database creation workflow if so desired. #HORIZON_CONFIG["password_autocomplete"] = "off" # Setting this to True will disable the reveal button for password fields, # including on the login form. #HORIZON_CONFIG["disable_password_reveal"] = False LOCAL_PATH = '/tmp' # Set custom secret key: # You can either set it to a specific value or you can let horizon generate a # default secret key that is unique on this machine, e.i. regardless of the # amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, # there may be situations where you would want to set this explicitly, e.g. # when multiple dashboard instances are distributed on different machines # (usually behind a load-balancer). Either you have to make sure that a session # gets all requests routed to the same dashboard instance or you set the same # SECRET_KEY for all of them. SECRET_KEY='{{ .Values.conf.horizon.local_settings.config.horizon_secret_key }}' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '{{ tuple "oslo_cache" "internal" "memcache" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}', } } DATABASES = { 'default': { # Database configuration here 'ENGINE': 'django.db.backends.mysql', 'NAME': '{{ .Values.endpoints.oslo_db.path | base }}', 'USER': '{{ .Values.endpoints.oslo_db.auth.horizon.username }}', 'PASSWORD': '{{ .Values.endpoints.oslo_db.auth.horizon.password }}', 'HOST': '{{ tuple "oslo_db" "internal" . | include "helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup" }}', 'default-character-set': 'utf8', 'PORT': '{{ tuple "oslo_db" "internal" "mysql" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}' } } SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db' # Send email to the console by default EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # Or send them to /dev/null #EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' # Configure these for your outgoing email host #EMAIL_HOST = 'smtp.my-company.com' #EMAIL_PORT = 25\\ #EMAIL_HOST_USER = 'djangomail' #EMAIL_HOST_PASSWORD = 'top-secret!' # For multiple regions uncomment this configuration, and add (endpoint, title). #AVAILABLE_REGIONS = [ # ('http://cluster1.example.com:5000/v2.0', 'cluster1'), # ('http://cluster2.example.com:5000/v2.0', 'cluster2'), #] OPENSTACK_KEYSTONE_URL = "{{ tuple "identity" "internal" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}" OPENSTACK_KEYSTONE_DEFAULT_ROLE = "member" {{- if .Values.conf.horizon.local_settings.config.auth.sso.enabled }} # Enables keystone web single-sign-on if set to True. WEBSSO_ENABLED = True # Determines which authentication choice to show as default. WEBSSO_INITIAL_CHOICE = "{{ .Values.conf.horizon.local_settings.config.auth.sso.initial_choice }}" # The list of authentication mechanisms # which include keystone federation protocols. # Current supported protocol IDs are 'saml2' and 'oidc' # which represent SAML 2.0, OpenID Connect respectively. # Do not remove the mandatory credentials mechanism. WEBSSO_CHOICES = ( ("credentials", _("Keystone Credentials")), {{- range $i, $sso := .Values.conf.horizon.local_settings.config.auth.idp_mapping }} ({{ $sso.name | quote }}, {{ $sso.label | quote }}), {{- end }} ) WEBSSO_IDP_MAPPING = { {{- range $i, $sso := .Values.conf.horizon.local_settings.config.auth.idp_mapping }} {{ $sso.name | quote}}: ({{ $sso.idp | quote }}, {{ $sso.protocol | quote }}), {{- end }} } {{- end }} # Disable SSL certificate checks (useful for self-signed certificates): #OPENSTACK_SSL_NO_VERIFY = True # The CA certificate to use to verify SSL connections #OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' # The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the # capabilities of the auth backend for Keystone. # If Keystone has been configured to use LDAP as the auth backend then set # can_edit_user to False and name to 'ldap'. # # TODO(tres): Remove these once Keystone has an API to identify auth backend. OPENSTACK_KEYSTONE_BACKEND = { 'name': 'native', 'can_edit_user': True, 'can_edit_group': True, 'can_edit_project': True, 'can_edit_domain': True, 'can_edit_role': True, } # Setting this to True, will add a new "Retrieve Password" action on instance, # allowing Admin session password retrieval/decryption. #OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False # The Launch Instance user experience has been significantly enhanced. # You can choose whether to enable the new launch instance experience, # the legacy experience, or both. The legacy experience will be removed # in a future release, but is available as a temporary backup setting to ensure # compatibility with existing deployments. Further development will not be # done on the legacy experience. Please report any problems with the new # experience via the Launchpad tracking system. # # Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to # determine the experience to enable. Set them both to true to enable # both. #LAUNCH_INSTANCE_LEGACY_ENABLED = True #LAUNCH_INSTANCE_NG_ENABLED = False # The Xen Hypervisor has the ability to set the mount point for volumes # attached to instances (other Hypervisors currently do not). Setting # can_set_mount_point to True will add the option to set the mount point # from the UI. OPENSTACK_HYPERVISOR_FEATURES = { 'can_set_mount_point': False, 'can_set_password': False, } # The OPENSTACK_CINDER_FEATURES settings can be used to enable optional # services provided by cinder that is not exposed by its extension API. OPENSTACK_CINDER_FEATURES = { 'enable_backup': {{ .Values.conf.horizon.local_settings.config.openstack_cinder_features.enable_backup }}, } # The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional # services provided by neutron. Options currently available are load # balancer service, security groups, quotas, VPN service. OPENSTACK_NEUTRON_NETWORK = { 'enable_router': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_router }}, 'enable_quotas': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_quotas }}, 'enable_ipv6': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_ipv6 }}, 'enable_distributed_router': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_distributed_router }}, 'enable_ha_router': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_ha_router }}, 'enable_lb': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_lb }}, 'enable_firewall': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_firewall }}, 'enable_vpn': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_vpn }}, 'enable_fip_topology_check': {{ .Values.conf.horizon.local_settings.config.openstack_neutron_network.enable_fip_topology_check }}, # The profile_support option is used to detect if an external router can be # configured via the dashboard. When using specific plugins the # profile_support can be turned on if needed. 'profile_support': None, #'profile_support': 'cisco', # Set which provider network types are supported. Only the network types # in this list will be available to choose from when creating a network. # Network types include local, flat, vlan, gre, and vxlan. 'supported_provider_types': ['*'], # Set which VNIC types are supported for port binding. Only the VNIC # types in this list will be available to choose from when creating a # port. # VNIC types include 'normal', 'macvtap' and 'direct'. 'supported_vnic_types': ['*'] } # The OPENSTACK_IMAGE_BACKEND settings can be used to customize features # in the OpenStack Dashboard related to the Image service, such as the list # of supported image formats. #OPENSTACK_IMAGE_BACKEND = { # 'image_formats': [ # ('', _('Select format')), # ('aki', _('AKI - Amazon Kernel Image')), # ('ami', _('AMI - Amazon Machine Image')), # ('ari', _('ARI - Amazon Ramdisk Image')), # ('docker', _('Docker')), # ('iso', _('ISO - Optical Disk Image')), # ('ova', _('OVA - Open Virtual Appliance')), # ('qcow2', _('QCOW2 - QEMU Emulator')), # ('raw', _('Raw')), # ('vdi', _('VDI - Virtual Disk Image')), # ('vhd', ('VHD - Virtual Hard Disk')), # ('vmdk', _('VMDK - Virtual Machine Disk')), # ] #} # The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for # image custom property attributes that appear on image detail pages. IMAGE_CUSTOM_PROPERTY_TITLES = { "architecture": _("Architecture"), "kernel_id": _("Kernel ID"), "ramdisk_id": _("Ramdisk ID"), "image_state": _("Euca2ools state"), "project_id": _("Project ID"), "image_type": _("Image Type"), } # The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image # custom properties should not be displayed in the Image Custom Properties # table. IMAGE_RESERVED_CUSTOM_PROPERTIES = [] # OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints # in the Keystone service catalog. Use this setting when Horizon is running # external to the OpenStack environment. The default is 'publicURL'. OPENSTACK_ENDPOINT_TYPE = "internalURL" # SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the # case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints # in the Keystone service catalog. Use this setting when Horizon is running # external to the OpenStack environment. The default is None. This # value should differ from OPENSTACK_ENDPOINT_TYPE if used. SECONDARY_ENDPOINT_TYPE = "publicURL" # The number of objects (Swift containers/objects or images) to display # on a single page before providing a paging element (a "more" link) # to paginate results. API_RESULT_LIMIT = 1000 API_RESULT_PAGE_SIZE = 20 # The size of chunk in bytes for downloading objects from Swift SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024 # Specify a maximum number of items to display in a dropdown. DROPDOWN_MAX_ITEMS = 30 # The timezone of the server. This should correspond with the timezone # of your entire OpenStack installation, and hopefully be in UTC. TIME_ZONE = "UTC" # When launching an instance, the menu of available flavors is # sorted by RAM usage, ascending. If you would like a different sort order, # you can provide another flavor attribute as sorting key. Alternatively, you # can provide a custom callback method to use for sorting. You can also provide # a flag for reverse sort. For more info, see # http://docs.python.org/2/library/functions.html#sorted #CREATE_INSTANCE_FLAVOR_SORT = { # 'key': 'name', # # or # 'key': my_awesome_callback_method, # 'reverse': False, #} # Set this to True to display an 'Admin Password' field on the Change Password # form to verify that it is indeed the admin logged-in who wants to change # the password. # ENFORCE_PASSWORD_CHECK = False # Modules that provide /auth routes that can be used to handle different types # of user authentication. Add auth plugins that require extra route handling to # this list. #AUTHENTICATION_URLS = [ # 'openstack_auth.urls', #] # The Horizon Policy Enforcement engine uses these values to load per service # policy rule files. The content of these files should match the files the # OpenStack services are using to determine role based access control in the # target installation. # Path to directory containing policy.json files POLICY_FILES_PATH = '/etc/openstack-dashboard' # Map of local copy of service policy files #POLICY_FILES = { # 'identity': 'keystone_policy.json', # 'compute': 'nova_policy.json', # 'volume': 'cinder_policy.json', # 'image': 'glance_policy.json', # 'orchestration': 'heat_policy.json', # 'network': 'neutron_policy.json', # 'telemetry': 'ceilometer_policy.json', #} # Trove user and database extension support. By default support for # creating users and databases on database instances is turned on. # To disable these extensions set the permission here to something # unusable such as ["!"]. # TROVE_ADD_USER_PERMS = [] # TROVE_ADD_DATABASE_PERMS = [] # Change this patch to the appropriate static directory containing # two files: _variables.scss and _styles.scss #CUSTOM_THEME_PATH = 'static/themes/default' LOGGING = { 'version': 1, # When set to True this will disable all logging except # for loggers specified in this configuration dictionary. Note that # if nothing is specified here and disable_existing_loggers is True, # django.db.backends will still log unless it is disabled explicitly. 'disable_existing_loggers': False, 'handlers': { 'null': { 'level': 'DEBUG', 'class': 'logging.NullHandler', }, 'console': { # Set the level to "DEBUG" for verbose output logging. 'level': 'INFO', 'class': 'logging.StreamHandler', }, }, 'loggers': { # Logging from django.db.backends is VERY verbose, send to null # by default. 'django.db.backends': { 'handlers': ['null'], 'propagate': False, }, 'requests': { 'handlers': ['null'], 'propagate': False, }, 'horizon': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'openstack_dashboard': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'novaclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'cinderclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'glanceclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'glanceclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'neutronclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'heatclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'ceilometerclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'troveclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'swiftclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'openstack_auth': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'nose.plugins.manager': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'django': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'iso8601': { 'handlers': ['null'], 'propagate': False, }, 'scss': { 'handlers': ['null'], 'propagate': False, }, } } # 'direction' should not be specified for all_tcp/udp/icmp. # It is specified in the form. SECURITY_GROUP_RULES = { 'all_tcp': { 'name': _('All TCP'), 'ip_protocol': 'tcp', 'from_port': '1', 'to_port': '65535', }, 'all_udp': { 'name': _('All UDP'), 'ip_protocol': 'udp', 'from_port': '1', 'to_port': '65535', }, 'all_icmp': { 'name': _('All ICMP'), 'ip_protocol': 'icmp', 'from_port': '-1', 'to_port': '-1', }, 'ssh': { 'name': 'SSH', 'ip_protocol': 'tcp', 'from_port': '22', 'to_port': '22', }, 'smtp': { 'name': 'SMTP', 'ip_protocol': 'tcp', 'from_port': '25', 'to_port': '25', }, 'dns': { 'name': 'DNS', 'ip_protocol': 'tcp', 'from_port': '53', 'to_port': '53', }, 'http': { 'name': 'HTTP', 'ip_protocol': 'tcp', 'from_port': '80', 'to_port': '80', }, 'pop3': { 'name': 'POP3', 'ip_protocol': 'tcp', 'from_port': '110', 'to_port': '110', }, 'imap': { 'name': 'IMAP', 'ip_protocol': 'tcp', 'from_port': '143', 'to_port': '143', }, 'ldap': { 'name': 'LDAP', 'ip_protocol': 'tcp', 'from_port': '389', 'to_port': '389', }, 'https': { 'name': 'HTTPS', 'ip_protocol': 'tcp', 'from_port': '443', 'to_port': '443', }, 'smtps': { 'name': 'SMTPS', 'ip_protocol': 'tcp', 'from_port': '465', 'to_port': '465', }, 'imaps': { 'name': 'IMAPS', 'ip_protocol': 'tcp', 'from_port': '993', 'to_port': '993', }, 'pop3s': { 'name': 'POP3S', 'ip_protocol': 'tcp', 'from_port': '995', 'to_port': '995', }, 'ms_sql': { 'name': 'MS SQL', 'ip_protocol': 'tcp', 'from_port': '1433', 'to_port': '1433', }, 'mysql': { 'name': 'MYSQL', 'ip_protocol': 'tcp', 'from_port': '3306', 'to_port': '3306', }, 'rdp': { 'name': 'RDP', 'ip_protocol': 'tcp', 'from_port': '3389', 'to_port': '3389', }, } # Deprecation Notice: # # The setting FLAVOR_EXTRA_KEYS has been deprecated. # Please load extra spec metadata into the Glance Metadata Definition Catalog. # # The sample quota definitions can be found in: # /etc/metadefs/compute-quota.json # # The metadata definition catalog supports CLI and API: # $glance --os-image-api-version 2 help md-namespace-import # $glance-manage db_load_metadefs # # See Metadata Definitions on: http://docs.openstack.org/developer/glance/ # Indicate to the Sahara data processing service whether or not # automatic floating IP allocation is in effect. If it is not # in effect, the user will be prompted to choose a floating IP # pool for use in their cluster. False by default. You would want # to set this to True if you were running Nova Networking with # auto_assign_floating_ip = True. #SAHARA_AUTO_IP_ALLOCATION_ENABLED = False # The hash algorithm to use for authentication tokens. This must # match the hash algorithm that the identity server and the # auth_token middleware are using. Allowed values are the # algorithms supported by Python's hashlib library. #OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5' # AngularJS requires some settings to be made available to # the client side. Some settings are required by in-tree / built-in horizon # features. These settings must be added to REST_API_REQUIRED_SETTINGS in the # form of ['SETTING_1','SETTING_2'], etc. # # You may remove settings from this list for security purposes, but do so at # the risk of breaking a built-in horizon feature. These settings are required # for horizon to function properly. Only remove them if you know what you # are doing. These settings may in the future be moved to be defined within # the enabled panel configuration. # You should not add settings to this list for out of tree extensions. # See: https://wiki.openstack.org/wiki/Horizon/RESTAPI REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES', 'LAUNCH_INSTANCE_DEFAULTS', 'OPENSTACK_IMAGE_FORMATS'] # Additional settings can be made available to the client side for # extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS # !! Please use extreme caution as the settings are transferred via HTTP/S # and are not encrypted on the browser. This is an experimental API and # may be deprecated in the future without notice. #REST_API_ADDITIONAL_SETTINGS = [] # DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded # within an iframe. Legacy browsers are still vulnerable to a Cross-Frame # Scripting (XFS) vulnerability, so this option allows extra security hardening # where iframes are not used in deployment. Default setting is True. # For more information see: # http://tinyurl.com/anticlickjack # DISALLOW_IFRAME_EMBED = True STATIC_ROOT = '/var/www/html/horizon' #OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST #present OPENSTACK_API_VERSIONS={"identity":3} # Use reigon configuration to access platform depoloyment and containerized # deployment from a single horizon deployment OPENSTACK_KEYSTONE_URL = "{{ tuple "identity" "internal" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}" OPENSTACK_NEUTRON_NETWORK['enable_distributed_router'] = True # TODO(tsmith) remove this, only for HP custom, this isnt being used # Load Region Config params, if present # Config OPENSTACK_HOST is still required in region mode since StarlingX # does not use the local_settings populated via packstack {{- if eq .Values.conf.horizon.local_settings.config.ss_enabled "True"}} SS_ENABLED = "True" OPENSTACK_KEYSTONE_URL = {{ .Values.conf.horizon.local_settings.config.openstack_keystone_url }} AVAILABLE_REGIONS = [(OPENSTACK_KEYSTONE_URL, {{ .Values.conf.horizon.local_settings.config.region_name }}),] REGION_NAME = {{ .Values.conf.horizon.local_settings.config.region_name }} {{- else }} SS_ENABLED = "False" {{- end }} DC_MODE = {{ .Values.conf.horizon.local_settings.config.dc_mode }} # Override openstack-dashboard NG_CACHE_TEMPLATE_AGE NG_TEMPLATE_CACHE_AGE = 300 # OperationLogMiddleware Configuration OPERATION_LOG_ENABLED = True OPERATION_LOG_OPTIONS = { 'mask_fields': ['password', 'bm_password', 'bm_confirm_password', 'current_password', 'confirm_password', 'new_password'], 'target_methods': ['POST', 'PUT', 'DELETE'], 'format': ("[%(project_name)s %(project_id)s] [%(user_name)s %(user_id)s]" " [%(method)s %(request_url)s %(http_status)s]" " parameters:[%(param)s] message:[%(message)s]"), } # Custom Theme Override for root, dirs, files in os.walk('/opt/branding/applied'): if 'manifest.py' in files: execfile(os.path.join(root, 'manifest.py')) AVAILABLE_THEMES = [ ('default', 'Default', 'themes/default'), ('material', 'Material', 'themes/material'), ('starlingx', 'StarlingX', 'themes/starlingx'), ('custom', 'Custom', '/opt/branding/applied'), ] DEFAULT_THEME = 'custom' # Secure site configuration SESSION_COOKIE_HTTPONLY = True {{- if eq .Values.conf.horizon.local_settings.config.https_enabled "True"}} CSRF_COOKIE_SECURE = True SESSION_COOKIE_SECURE = True {{- end }} # The OPENSTACK_HEAT_STACK settings can be used to disable password # field required while launching the stack. OPENSTACK_HEAT_STACK = { 'enable_user_pass': False, } HORIZON_CONFIG["password_autocomplete"] = "off" source: type: tar location: http://172.17.0.1/helm_charts/starlingx/horizon-0.1.0.tgz subpath: horizon reference: master dependencies: - helm-toolkit --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: kube-system-ingress data: description: "System Ingress Controller" sequenced: false chart_group: - kube-system-ingress --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-ingress data: description: "OpenStack Ingress Controller" sequenced: false chart_group: - openstack-ingress - openstack-nginx-ports-control --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-mariadb data: description: "Mariadb" sequenced: true chart_group: - openstack-mariadb - openstack-garbd --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-memcached data: description: "Memcached" sequenced: true chart_group: - openstack-memcached --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-rabbitmq data: description: "Rabbitmq" sequenced: true chart_group: - openstack-rabbitmq --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-keystone data: description: "Deploy keystone" sequenced: true chart_group: - openstack-keystone --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-keystone-api-proxy data: description: "Deploy keystone api proxy" sequenced: true chart_group: - openstack-keystone-api-proxy --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-barbican data: description: "Deploy barbican" sequenced: true chart_group: - openstack-barbican --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-glance data: description: "Deploy glance" sequenced: true chart_group: - openstack-glance --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-ceph-rgw data: description: "Deploy swift" sequenced: true chart_group: - openstack-ceph-rgw --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-cinder data: description: "Deploy cinder" sequenced: true chart_group: - openstack-cinder --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-compute-kit data: description: "Deploy nova and neutron, as well as supporting services" sequenced: false chart_group: - openstack-libvirt - openstack-nova - openstack-nova-api-proxy - openstack-neutron - openstack-placement --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-heat data: description: "Deploy heat" sequenced: true chart_group: - openstack-heat --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-fm-rest-api data: description: "Deploy Fault management" sequenced: true chart_group: - openstack-fm-rest-api --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-horizon data: description: "Deploy horizon" sequenced: false chart_group: - openstack-horizon --- schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: openstack-telemetry data: description: "Deploy telemetry" sequenced: true chart_group: - openstack-aodh - openstack-gnocchi - openstack-panko - openstack-ceilometer --- schema: armada/Manifest/v1 metadata: schema: metadata/Document/v1 name: armada-manifest data: release_prefix: osh chart_groups: - kube-system-ingress - openstack-ingress - openstack-mariadb - openstack-memcached - openstack-rabbitmq - openstack-keystone - openstack-barbican - openstack-glance - openstack-cinder - openstack-ceph-rgw - openstack-compute-kit - openstack-heat - openstack-fm-rest-api - openstack-horizon - openstack-telemetry