openstack-helm/ironic/values.yaml
Thiago Brito 8ab6013409 Changing all policies to yaml format
In the Victoria cycle oslo.policy decided to change all default policies
to yaml format. Today on openstack-helm we have a mix of json and yaml
on projects and, after having a bad time debugging policies that should
have beeing mounted somewhere but was being mounted elsewhere, I'm
proposing this change so we can unify the delivery method for all
policies across components on yaml (that is supported for quite some
time). This will also avoid having problems in the future as the
services move from json to yaml.

[1] https://specs.openstack.org/openstack/oslo-specs/specs/victoria/policy-json-to-yaml.html

Signed-off-by: Thiago Brito <thiago.brito@windriver.com>
Change-Id: Id170bf184e44fd77cd53929d474582022a5b6d4f
2021-05-26 18:15:41 -03:00

709 lines
17 KiB
YAML

# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for ironic.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
---
release_group: null
labels:
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
conductor:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
tags:
ironic_manage_cleaning_network: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
ironic_retrive_cleaning_network: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
ironic_retrive_swift_config: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
# Bootstrap image requires curl
bootstrap: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
db_drop: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
db_init: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
ironic_db_sync: docker.io/openstackhelm/ironic:ocata-ubuntu_xenial
ks_user: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
ks_service: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
ks_endpoints: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
rabbit_init: docker.io/rabbitmq:3.7-management
ironic_api: docker.io/openstackhelm/ironic:ocata-ubuntu_xenial
ironic_conductor: docker.io/openstackhelm/ironic:ocata-ubuntu_xenial
ironic_pxe: docker.io/openstackhelm/ironic:ocata-ubuntu_xenial
ironic_pxe_init: docker.io/openstackhelm/ironic:ocata-ubuntu_xenial
ironic_pxe_http: docker.io/nginx:1.13.3
dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
image_repo_sync: docker.io/docker:17.07.0
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
conf:
paste:
override:
append:
policy: {}
tftp_map_file: |
re ^(/tftpboot/) /tftpboot/\2
re ^/tftpboot/ /tftpboot/
re ^(^/) /tftpboot/\1
re ^([^/]) /tftpboot/\1
nginx: |
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
server {
listen OSH_PXE_IP:{{ tuple "baremetal" "internal" "pxe_http" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }};
server_name localhost;
#charset koi8-r;
#access_log /var/log/nginx/host.access.log main;
location / {
root /var/lib/openstack-helm/httpboot;
}
}
}
ironic:
DEFAULT:
log_config_append: /etc/ironic/logging.conf
api:
port: null
conductor:
api_url: null
database:
connection: null
deploy:
http_root: /var/lib/openstack-helm/httpboot
glance:
auth_type: password
swift_temp_url_duration: 86400
temp_url_endpoint_type: swift
swift_container: glance
swift_api_version: v1
auth_section: glance
inspector:
auth_type: password
keystone_authtoken:
auth_type: password
auth_version: v3
neutron:
auth_type: password
pxe:
pxe_append_params: "nofb nomodeset vga=normal ipa-debug=1"
images_path: /var/lib/openstack-helm/ironic/images
instance_master_path: /var/lib/openstack-helm/ironic/master_images
pxe_config_template: $pybasedir/drivers/modules/ipxe_config.template
uefi_pxe_config_template: $pybasedir/drivers/modules/ipxe_config.template
tftp_root: /var/lib/openstack-helm/tftpboot
tftp_master_path: /var/lib/openstack-helm/tftpboot/master_images
pxe_bootfile_name: undionly.kpxe
uefi_pxe_bootfile_name: ipxe.efi
ipxe_enabled: true
service_catalog:
auth_type: password
swift:
auth_url: null
oslo_policy:
policy_file: /etc/ironic/policy.yaml
logging:
loggers:
keys:
- root
- ironic
handlers:
keys:
- stdout
- stderr
- "null"
formatters:
keys:
- context
- default
logger_root:
level: WARNING
handlers: 'null'
logger_ironic:
level: INFO
handlers:
- stdout
qualname: ironic
logger_amqp:
level: WARNING
handlers: stderr
qualname: amqp
logger_amqplib:
level: WARNING
handlers: stderr
qualname: amqplib
logger_eventletwsgi:
level: WARNING
handlers: stderr
qualname: eventlet.wsgi.server
logger_sqlalchemy:
level: WARNING
handlers: stderr
qualname: sqlalchemy
logger_boto:
level: WARNING
handlers: stderr
qualname: boto
handler_null:
class: logging.NullHandler
formatter: default
args: ()
handler_stdout:
class: StreamHandler
args: (sys.stdout,)
formatter: context
handler_stderr:
class: StreamHandler
args: (sys.stderr,)
formatter: context
formatter_context:
class: oslo_log.formatters.ContextFormatter
datefmt: "%Y-%m-%d %H:%M:%S"
formatter_default:
format: "%(message)s"
datefmt: "%Y-%m-%d %H:%M:%S"
network:
pxe:
device: ironic-pxe
neutron_network_name: baremetal
neutron_subnet_name: baremetal
neutron_provider_network: ironic
neutron_subnet_gateway: 172.24.6.1/24
neutron_subnet_cidr: 172.24.6.0/24
neutron_subnet_alloc_start: 172.24.6.100
neutron_subnet_alloc_end: 172.24.6.200
neutron_subnet_dns_nameserver: 10.96.0.10
api:
ingress:
public: true
classes:
namespace: "nginx"
cluster: "nginx-cluster"
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
node_port:
enabled: false
port: 30511
bootstrap:
image:
enabled: true
openstack:
enabled: true
ks_user: ironic
# NOTE: if source_base is null the source will be used as is
source_base: http://tarballs.openstack.org/ironic-python-agent/tinyipa/files
structured:
ironic-agent.initramfs:
source: tinyipa-stable-ocata.gz
disk_format: ari
container_format: ari
ironic-agent.kernel:
source: tinyipa-stable-ocata.vmlinuz
disk_format: aki
container_format: aki
network:
enabled: true
openstack:
enabled: true
object_store:
enabled: true
openstack:
enabled: true
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- ironic-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
api:
jobs:
- ironic-db-sync
- ironic-ks-user
- ironic-ks-endpoints
- ironic-manage-cleaning-network
- ironic-rabbit-init
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: identity
- endpoint: internal
service: oslo_messaging
bootstrap:
jobs: null
services:
- endpoint: internal
service: identity
- endpoint: internal
service: image
- endpoint: internal
service: baremetal
conductor:
jobs:
- ironic-db-sync
- ironic-ks-user
- ironic-ks-endpoints
- ironic-manage-cleaning-network
- ironic-rabbit-init
services:
- endpoint: internal
service: oslo_db
- endpoint: internal
service: identity
- endpoint: internal
service: baremetal
- endpoint: internal
service: oslo_messaging
db_drop:
services:
- endpoint: internal
service: oslo_db
db_init:
services:
- endpoint: internal
service: oslo_db
db_sync:
jobs:
- ironic-db-init
services:
- endpoint: internal
service: oslo_db
ks_endpoints:
jobs:
- ironic-ks-service
services:
- endpoint: internal
service: identity
ks_service:
services:
- endpoint: internal
service: identity
ks_user:
services:
- endpoint: internal
service: identity
rabbit_init:
services:
- endpoint: internal
service: oslo_messaging
manage_cleaning_network:
services:
- endpoint: internal
service: network
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
# Names of secrets used by bootstrap and environmental checks
secrets:
identity:
admin: ironic-keystone-admin
ironic: ironic-keystone-user
glance: ironic-glance-keystone-user
oslo_db:
admin: ironic-db-admin
ironic: ironic-db-user
oslo_messaging:
admin: ironic-rabbitmq-admin
ironic: ironic-rabbitmq-user
# typically overridden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
identity:
name: keystone
auth:
admin:
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
glance:
role: admin
region_name: RegionOne
username: glance
password: password
project_name: service
user_domain_name: service
project_domain_name: service
ironic:
role: admin
region_name: RegionOne
username: ironic
password: password
project_name: service
user_domain_name: service
project_domain_name: service
hosts:
default: keystone
internal: keystone-api
host_fqdn_override:
default: null
path:
default: /v3
scheme:
default: http
port:
api:
default: 80
internal: 5000
baremetal:
name: ironic
hosts:
default: ironic-api
public: ironic
host_fqdn_override:
default: null
path:
default: null
scheme:
default: http
port:
api:
default: 6385
public: 80
pxe_http:
default: 8080
image:
name: glance
hosts:
default: glance-api
public: glance
host_fqdn_override:
default: null
path:
default: null
scheme:
default: http
port:
api:
default: 9292
public: 80
oslo_db:
auth:
admin:
username: root
password: password
ironic:
username: ironic
password: password
hosts:
default: mariadb
host_fqdn_override:
default: null
path: /ironic
scheme: mysql+pymysql
port:
mysql:
default: 3306
oslo_cache:
auth:
# NOTE(portdirect): this is used to define the value for keystone
# authtoken cache encryption key, if not set it will be populated
# automatically with a random value, but to take advantage of
# this feature all services should be set to use the same key,
# and memcache service.
memcache_secret_key: null
hosts:
default: memcached
host_fqdn_override:
default: null
port:
memcache:
default: 11211
oslo_messaging:
auth:
admin:
username: rabbitmq
password: password
ironic:
username: ironic
password: password
statefulset:
replicas: 2
name: rabbitmq-rabbitmq
hosts:
default: rabbitmq
host_fqdn_override:
default: null
path: /ironic
scheme: rabbit
port:
amqp:
default: 5672
http:
default: 15672
network:
name: neutron
hosts:
default: neutron-server
public: neutron
host_fqdn_override:
default: null
path:
default: null
scheme:
default: 'http'
port:
api:
default: 9696
public: 80
object_store:
name: swift
namespace: ceph
auth:
glance:
tmpurlkey: supersecret
hosts:
default: ceph-rgw
host_fqdn_override:
default: null
path:
default: /swift/v1/KEY_$(tenant_id)s
scheme:
default: http
port:
api:
default: 8088
fluentd:
namespace: null
name: fluentd
hosts:
default: fluentd-logging
host_fqdn_override:
default: null
path:
default: null
scheme: 'http'
port:
service:
default: 24224
metrics:
default: 24220
pod:
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
weight:
default: 10
mounts:
ironic_api:
init_container: null
ironic_api:
volumeMounts:
volumes:
ironic_conductor:
init_container: null
ironic_conductor:
volumeMounts:
volumes:
ironic_bootstrap:
init_container: null
ironic_bootstrap:
volumeMounts:
volumes:
ironic_db_sync:
ironic_db_sync:
volumeMounts:
volumes:
replicas:
api: 1
conductor: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
disruption_budget:
api:
min_available: 0
termination_grace_period:
api:
timeout: 30
resources:
enabled: false
api:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
conductor:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
jobs:
bootstrap:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_drop:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_endpoints:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_service:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
rabbit_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
network_policy:
ironic:
ingress:
- {}
egress:
- {}
manifests:
configmap_bin: true
configmap_etc: true
deployment_api: true
ingress_api: true
job_bootstrap: true
job_db_drop: false
job_db_init: true
job_db_sync: true
job_image_repo_sync: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
job_manage_cleaning_network: true
job_rabbit_init: true
pdb_api: true
network_policy: false
secret_db: true
secret_keystone: true
secret_rabbitmq: true
service_api: true
service_ingress_api: true
statefulset_conductor: true
...