openstack-helm/neutron/values.yaml
Pete Birley 35601e2bd3 Charts: make manifests optional for all OpenStack elements
This PS allows the rendering of manifests to be controlled. It enables
both increased control over deployment when required but also makes
development of a feature easier to target.

Change-Id: I1716e8ee23fe5c53f935bd739ea283bc4a2a9963
2017-08-07 14:59:48 -05:00

666 lines
15 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for neutron.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
release_group: null
images:
bootstrap: docker.io/kolla/ubuntu-source-neutron-server:3.0.3
test: docker.io/kolla/ubuntu-binary-rally:4.0.0
db_init: docker.io/kolla/ubuntu-source-neutron-server:3.0.3
db_sync: docker.io/kolla/ubuntu-source-neutron-server:3.0.3
ks_user: docker.io/kolla/ubuntu-source-kolla-toolbox:3.0.3
ks_service: docker.io/kolla/ubuntu-source-kolla-toolbox:3.0.3
ks_endpoints: docker.io/kolla/ubuntu-source-kolla-toolbox:3.0.3
server: docker.io/kolla/ubuntu-source-neutron-server:3.0.3
dhcp: docker.io/kolla/ubuntu-source-neutron-dhcp-agent:3.0.3
metadata: docker.io/kolla/ubuntu-source-neutron-metadata-agent:3.0.3
l3: docker.io/kolla/ubuntu-source-neutron-l3-agent:3.0.3
neutron_openvswitch_agent: docker.io/kolla/ubuntu-source-neutron-openvswitch-agent:3.0.3
openvswitch_db_server: docker.io/kolla/ubuntu-source-openvswitch-db-server:3.0.3
openvswitch_vswitchd: docker.io/kolla/ubuntu-source-openvswitch-vswitchd:3.0.3
dep_check: docker.io/kolla/ubuntu-source-kubernetes-entrypoint:4.0.0
pull_policy: "IfNotPresent"
labels:
# ovs is a special case, requiring a special
# label that can apply to both control hosts
# and compute hosts, until we get more sophisticated
# with our daemonset scheduling
ovs:
node_selector_key: openvswitch
node_selector_value: enabled
agent:
dhcp:
node_selector_key: openstack-control-plane
node_selector_value: enabled
l3:
node_selector_key: openstack-control-plane
node_selector_value: enabled
metadata:
node_selector_key: openstack-control-plane
node_selector_value: enabled
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
network:
dns:
kubernetes_domain: cluster.local
# this must list the skydns server first, and in calico
# this is consistently 10.96.0.10
servers:
- 10.96.0.10
- 8.8.8.8
external_bridge: br-ex
ip_address: 0.0.0.0
interface:
# External interface will be automatically added to external_bridge. Default is null.
# Tunnel interface will be used for VXLAN tunneling. Default is null, with
# fallback mechanism to search for interface with default routing.
external: null
tunnel: null
# To automatically add a physical interface to a specific bridge using,
# for example eth3 to bridge br-physnet1 define the following key/value
# in auto_bridge_add:
# br-physnet1: eth3
auto_bridge_add:
# br0: if0
# br1: iface_two
server:
port: 9696
ingress:
public: true
node_port:
enabled: false
port: 30096
metadata:
port: 8775
bootstrap:
enabled: false
script: |
neutron agent-list
dependencies:
db_init:
services:
- service: oslo_db
endpoint: internal
db_sync:
jobs:
- neutron-db-init
services:
- service: oslo_db
endpoint: internal
ks_user:
services:
- service: identity
endpoint: internal
ks_service:
services:
- service: identity
endpoint: internal
ks_endpoints:
jobs:
- neutron-ks-service
services:
- service: identity
endpoint: internal
server:
jobs:
- neutron-db-sync
- neutron-ks-user
- neutron-ks-endpoints
services:
- service: oslo_db
endpoint: internal
- service: oslo_messaging
endpoint: internal
- service: oslo_cache
endpoint: internal
- service: identity
endpoint: internal
dhcp:
services:
- service: oslo_messaging
endpoint: internal
- service: network
endpoint: internal
- service: compute
endpoint: internal
daemonset:
- ovs-agent
metadata:
services:
- service: oslo_messaging
endpoint: internal
- service: network
endpoint: internal
- service: compute
endpoint: internal
daemonset:
- ovs-agent
ovs_agent:
services:
- service: oslo_messaging
endpoint: internal
- service: network
endpoint: internal
l3:
services:
- service: oslo_messaging
endpoint: internal
- service: network
endpoint: internal
- service: compute
endpoint: internal
daemonset:
- ovs-agent
tests:
services:
- service: network
endpoint: internal
- service: compute
endpoint: internal
bootstrap:
services:
- service: network
endpoint: internal
- service: compute
endpoint: internal
pod:
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
mounts:
neutron_server:
init_container: null
neutron_server:
neutron_dhcp_agent:
init_container: null
neutron_dhcp_agent:
neutron_l3_agent:
init_container: null
neutron_l3_agent:
neutron_metadata_agent:
init_container: null
neutron_metadata_agent:
neutron_ovs_agent:
init_container: null
neutron_ovs_agent:
neutron_tests:
init_container: null
neutron_tests:
neutron_bootstrap:
init_container: null
neutron_bootstrap:
replicas:
server: 1
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
daemonsets:
pod_replacement_strategy: RollingUpdate
dhcp_agent:
enabled: false
min_ready_seconds: 0
max_unavailable: 1
l3_agent:
enabled: false
min_ready_seconds: 0
max_unavailable: 1
metadata_agent:
enabled: true
min_ready_seconds: 0
max_unavailable: 1
ovs_agent:
enabled: true
min_ready_seconds: 0
max_unavailable: 1
ovs_db:
enabled: false
min_ready_seconds: 0
max_unavailable: 1
ovs_vswitchd:
enabled: false
min_ready_seconds: 0
max_unavailable: 1
disruption_budget:
server:
min_available: 0
termination_grace_period:
server:
timeout: 30
resources:
enabled: false
agent:
dhcp:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
l3:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
metadata:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ovs:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ovs:
db:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
vswitchd:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
server:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
jobs:
bootstrap:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_init:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
db_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_endpoints:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_service:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
ks_user:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
conf:
paste:
override:
append:
policy:
override:
append:
neutron:
override:
append:
default:
neutron:
default_availability_zones: nova
api_workers: 4
allow_overlapping_ips: True
core_plugin: ml2
service_plugins: router
interface_driver: openvswitch
metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
db:
allow_automatic_l3agent_failover: True
l3_ha: True
min_l3_agents_per_router: 2
max_l3_agents_per_router: 2
l3_ha_network_type: vxlan
dhcp_agents_per_network: 3
network_auto_schedule: True
router_auto_schedule: True
agent:
interface_driver: openvswitch
oslo_concurrency:
oslo:
concurrency:
lock_path: /var/lib/neutron/tmp
database:
oslo:
db:
max_retries: -1
agent:
neutron:
agent:
root_helper: sudo /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
oslo_messaging_notifications:
oslo:
messaging:
driver:
- noop
nova:
nova:
auth:
auth_type: password
region_name: RegionOne
project_domain_name: default
project_name: service
user_domain_name: default
username: nova
password: password
keystone_authtoken:
keystonemiddleware:
auth_token:
auth_type: password
auth_version: v3
ml2_conf:
override:
append:
ml2:
neutron:
ml2:
extension_drivers: port_security
mechanism_drivers: openvswitch,l2population
type_drivers: flat,vlan,vxlan
tenant_network_types: vxlan
ml2_type_vxlan:
neutron:
ml2:
vni_ranges: 1:1000
vxlan_group: 239.1.1.1
ml2_type_flat:
neutron:
ml2:
flat_networks: "*"
# If you want to use the external network as a tagged provider network,
# a range should be specified including the intended VLAN target
# using ml2_type_vlan.neutron.ml2.network_vlan_ranges:
# ml2_type_vlan:
# neutron:
# ml2:
# network_vlan_ranges: "external:1100:1110"
ml2_conf_sriov:
override:
append:
dhcp_agent:
override:
append:
default:
neutron:
base:
agent:
interface_driver: openvswitch
dhcp:
agent:
dnsmasq_config_file: /etc/neutron/dnsmasq.conf
enable_isolated_metadata: True
force_metadata: True
l3_agent:
override:
append:
default:
neutron:
base:
agent:
interface_driver: openvswitch
l3:
agent:
agent_mode: legacy
enable_metadata_proxy: True
enable_isolated_metadata: True
openvswitch_agent:
override:
append:
agent:
neutron:
ml2:
ovs:
agent:
tunnel_types: vxlan
l2_population: True
arp_responder: True
ovs:
neutron:
ml2:
ovs:
agent:
bridge_mappings: "external:br-ex"
ovsdb_connection: unix:/var/run/openvswitch/db.sock
securitygroup:
neutron:
ml2:
ovs:
agent:
firewall_driver: openvswitch
metering_agent:
override:
append:
metadata_agent:
override:
append:
default:
neutron:
metadata:
agent:
nova_metadata_ip: 10.97.120.234
nova_metadata_port: 80
nova_metadata_protocol: http
metadata_proxy_shared_secret: "password"
cache:
oslo:
cache:
enabled: true
backend: dogpile.cache.memcached
sriov_agent:
override:
append:
macvtap_agent:
override:
append:
linuxbridge_agent:
override:
append:
# Names of secrets used by bootstrap and environmental checks
secrets:
identity:
admin: neutron-keystone-admin
user: neutron-keystone-user
oslo_db:
admin: neutron-db-admin
user: neutron-db-user
# typically overriden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
oslo_db:
auth:
admin:
username: root
password: password
user:
username: neutron
password: password
hosts:
default: mariadb
path: /neutron
scheme: mysql+pymysql
port:
mysql:
default: 3306
oslo_messaging:
auth:
admin:
username: admin
password: password
user:
username: rabbitmq
password: password
hosts:
default: rabbitmq
path: /
scheme: rabbit
port:
amqp:
default: 5672
oslo_cache:
hosts:
default: memcached
port:
memcache:
default: 11211
compute:
name: nova
hosts:
default: nova-api
public: nova
path:
default: "/v2/%(tenant_id)s"
scheme:
default: 'http'
port:
api:
default: 8774
public: 80
metadata:
default: 8775
novncproxy:
default: 6080
identity:
name: keystone
auth:
admin:
region_name: RegionOne
username: admin
password: password
project_name: admin
user_domain_name: default
project_domain_name: default
user:
role: admin
region_name: RegionOne
username: neutron
password: password
project_name: service
user_domain_name: default
project_domain_name: default
hosts:
default: keystone-api
public: keystone
path:
default: /v3
scheme:
default: http
port:
admin:
default: 35357
api:
default: 80
network:
name: neutron
hosts:
default: neutron-server
public: neutron
path:
default: null
scheme:
default: 'http'
port:
api:
default: 9696
public: 80
manifests:
configmap_bin: true
configmap_etc: true
daemonset_dhcp_agent: true
daemonset_l3_agent: true
daemonset_metadata_agent: true
daemonset_ovs_agent: true
daemonset_ovs_db: true
daemonset_ovs_vswitchd: true
deployment_server: true
ingress_server: true
job_bootstrap: true
job_db_init: true
job_db_sync: true
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
pdb_server: true
pod_rally_test: true
secret_db: true
secret_keystone: true
service_ingress_server: true
service_server: true