# Copyright 2017 The Openstack-Helm Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Default values for neutron. # This is a YAML-formatted file. # Declare name/value pairs to be passed into your templates. # name: value release_group: null images: bootstrap: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 test: docker.io/kolla/ubuntu-source-rally:4.0.0 db_init: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 db_sync: docker.io/kolla/ubuntu-source-neutron-server:3.0.3 ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 server: docker.io/kolla/ubuntu-source-neutron-server:3.0.3 dhcp: docker.io/kolla/ubuntu-source-neutron-dhcp-agent:3.0.3 metadata: docker.io/kolla/ubuntu-source-neutron-metadata-agent:3.0.3 l3: docker.io/kolla/ubuntu-source-neutron-l3-agent:3.0.3 neutron_openvswitch_agent: docker.io/kolla/ubuntu-source-neutron-openvswitch-agent:3.0.3 neutron_linuxbridge_agent: docker.io/kolla/ubuntu-source-neutron-linuxbridge-agent:3.0.3 dep_check: docker.io/kolla/ubuntu-source-kubernetes-entrypoint:4.0.0 pull_policy: "IfNotPresent" labels: # ovs is a special case, requiring a special # label that can apply to both control hosts # and compute hosts, until we get more sophisticated # with our daemonset scheduling ovs: node_selector_key: openvswitch node_selector_value: enabled lb: node_selector_key: linuxbridge node_selector_value: enabled agent: dhcp: node_selector_key: openstack-control-plane node_selector_value: enabled l3: node_selector_key: openstack-control-plane node_selector_value: enabled metadata: node_selector_key: openstack-control-plane node_selector_value: enabled server: node_selector_key: openstack-control-plane node_selector_value: enabled network: # provide what type of network wiring will be used # possible options: ovs, linuxbridge backend: ovs external_bridge: br-ex ip_address: 0.0.0.0 interface: # External interface will be automatically added to external_bridge. Default is null. # Tunnel interface will be used for VXLAN tunneling. Default is null, with # fallback mechanism to search for interface with default routing. external: null tunnel: null # To automatically add a physical interface to a specific bridge using, # for example eth3 to bridge br-physnet1 define the following key/value # in auto_bridge_add: # br-physnet1: eth3 auto_bridge_add: # br0: if0 # br1: iface_two server: port: 9696 ingress: public: true node_port: enabled: false port: 30096 metadata: port: 8775 bootstrap: enabled: false script: | neutron agent-list dependencies: db_init: services: - service: oslo_db endpoint: internal db_sync: jobs: - neutron-db-init services: - service: oslo_db endpoint: internal ks_user: services: - service: identity endpoint: internal ks_service: services: - service: identity endpoint: internal ks_endpoints: jobs: - neutron-ks-service services: - service: identity endpoint: internal server: jobs: - neutron-db-sync - neutron-ks-user - neutron-ks-endpoints services: - service: oslo_db endpoint: internal - service: oslo_messaging endpoint: internal - service: oslo_cache endpoint: internal - service: identity endpoint: internal dhcp: services: - service: oslo_messaging endpoint: internal - service: network endpoint: internal - service: compute endpoint: internal daemonset: # this should be set to corresponding neutron L2 agent - ovs-agent metadata: services: - service: oslo_messaging endpoint: internal - service: network endpoint: internal - service: compute endpoint: internal daemonset: # this should be set to corresponding neutron L2 agent - ovs-agent ovs_agent: services: - service: oslo_messaging endpoint: internal - service: network endpoint: internal daemonset: - ovs-vswitchd - ovs-db lb_agent: services: - service: oslo_messaging endpoint: internal - service: network endpoint: internal l3: services: - service: oslo_messaging endpoint: internal - service: network endpoint: internal - service: compute endpoint: internal daemonset: # this should be set to corresponding neutron L2 agent - ovs-agent tests: services: - service: network endpoint: internal - service: compute endpoint: internal bootstrap: services: - service: network endpoint: internal - service: compute endpoint: internal pod: user: neutron: uid: 1000 affinity: anti: type: default: preferredDuringSchedulingIgnoredDuringExecution topologyKey: default: kubernetes.io/hostname mounts: neutron_server: init_container: null neutron_server: neutron_dhcp_agent: init_container: null neutron_dhcp_agent: neutron_l3_agent: init_container: null neutron_l3_agent: neutron_lb_agent: init_container: null neutron_lb_agent: neutron_metadata_agent: init_container: null neutron_metadata_agent: neutron_ovs_agent: init_container: null neutron_ovs_agent: neutron_tests: init_container: null neutron_tests: neutron_bootstrap: init_container: null neutron_bootstrap: replicas: server: 1 lifecycle: upgrades: deployments: revision_history: 3 pod_replacement_strategy: RollingUpdate rolling_update: max_unavailable: 1 max_surge: 3 daemonsets: pod_replacement_strategy: RollingUpdate dhcp_agent: enabled: false min_ready_seconds: 0 max_unavailable: 1 l3_agent: enabled: false min_ready_seconds: 0 max_unavailable: 1 lb_agent: enabled: true min_ready_seconds: 0 max_unavailable: 1 metadata_agent: enabled: true min_ready_seconds: 0 max_unavailable: 1 ovs_agent: enabled: true min_ready_seconds: 0 max_unavailable: 1 disruption_budget: server: min_available: 0 termination_grace_period: server: timeout: 30 resources: enabled: false agent: dhcp: requests: memory: "128Mi" cpu: "100m" limits: memory: "1024Mi" cpu: "2000m" l3: requests: memory: "128Mi" cpu: "100m" limits: memory: "1024Mi" cpu: "2000m" lb: requests: memory: "128Mi" cpu: "100m" limits: memory: "1024Mi" cpu: "2000m" metadata: requests: memory: "128Mi" cpu: "100m" limits: memory: "1024Mi" cpu: "2000m" ovs: requests: memory: "128Mi" cpu: "100m" limits: memory: "1024Mi" cpu: "2000m" server: requests: memory: "128Mi" cpu: "100m" limits: memory: "1024Mi" cpu: "2000m" jobs: bootstrap: requests: memory: "128Mi" cpu: "100m" limits: memory: "1024Mi" cpu: "2000m" db_init: requests: memory: "128Mi" cpu: "100m" limits: memory: "1024Mi" cpu: "2000m" db_sync: requests: memory: "128Mi" cpu: "100m" limits: memory: "1024Mi" cpu: "2000m" ks_endpoints: requests: memory: "128Mi" cpu: "100m" limits: memory: "1024Mi" cpu: "2000m" ks_service: requests: memory: "128Mi" cpu: "100m" limits: memory: "1024Mi" cpu: "2000m" ks_user: requests: memory: "128Mi" cpu: "100m" limits: memory: "1024Mi" cpu: "2000m" tests: requests: memory: "128Mi" cpu: "100m" limits: memory: "1024Mi" cpu: "2000m" conf: rally_tests: run_tempest: false override: append: paste: override: append: policy: context_is_admin: role:admin owner: tenant_id:%(tenant_id)s admin_or_owner: rule:context_is_admin or rule:owner context_is_advsvc: role:advsvc admin_or_network_owner: rule:context_is_admin or tenant_id:%(network:tenant_id)s admin_owner_or_network_owner: rule:owner or rule:admin_or_network_owner admin_only: rule:context_is_admin regular_user: '' shared: field:networks:shared=True shared_subnetpools: field:subnetpools:shared=True shared_address_scopes: field:address_scopes:shared=True external: field:networks:router:external=True default: rule:admin_or_owner create_subnet: rule:admin_or_network_owner create_subnet:segment_id: rule:admin_only create_subnet:service_types: rule:admin_only get_subnet: rule:admin_or_owner or rule:shared get_subnet:segment_id: rule:admin_only update_subnet: rule:admin_or_network_owner update_subnet:service_types: rule:admin_only delete_subnet: rule:admin_or_network_owner create_subnetpool: '' create_subnetpool:shared: rule:admin_only create_subnetpool:is_default: rule:admin_only get_subnetpool: rule:admin_or_owner or rule:shared_subnetpools update_subnetpool: rule:admin_or_owner update_subnetpool:is_default: rule:admin_only delete_subnetpool: rule:admin_or_owner create_address_scope: '' create_address_scope:shared: rule:admin_only get_address_scope: rule:admin_or_owner or rule:shared_address_scopes update_address_scope: rule:admin_or_owner update_address_scope:shared: rule:admin_only delete_address_scope: rule:admin_or_owner create_network: '' get_network: rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc get_network:router:external: rule:regular_user get_network:segments: rule:admin_only get_network:provider:network_type: rule:admin_only get_network:provider:physical_network: rule:admin_only get_network:provider:segmentation_id: rule:admin_only get_network:queue_id: rule:admin_only get_network_ip_availabilities: rule:admin_only get_network_ip_availability: rule:admin_only create_network:shared: rule:admin_only create_network:router:external: rule:admin_only create_network:is_default: rule:admin_only create_network:segments: rule:admin_only create_network:provider:network_type: rule:admin_only create_network:provider:physical_network: rule:admin_only create_network:provider:segmentation_id: rule:admin_only update_network: rule:admin_or_owner update_network:segments: rule:admin_only update_network:shared: rule:admin_only update_network:provider:network_type: rule:admin_only update_network:provider:physical_network: rule:admin_only update_network:provider:segmentation_id: rule:admin_only update_network:router:external: rule:admin_only delete_network: rule:admin_or_owner create_segment: rule:admin_only get_segment: rule:admin_only update_segment: rule:admin_only delete_segment: rule:admin_only network_device: 'field:port:device_owner=~^network:' create_port: '' create_port:device_owner: not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner create_port:mac_address: rule:context_is_advsvc or rule:admin_or_network_owner create_port:fixed_ips: rule:context_is_advsvc or rule:admin_or_network_owner create_port:port_security_enabled: rule:context_is_advsvc or rule:admin_or_network_owner create_port:binding:host_id: rule:admin_only create_port:binding:profile: rule:admin_only create_port:mac_learning_enabled: rule:context_is_advsvc or rule:admin_or_network_owner create_port:allowed_address_pairs: rule:admin_or_network_owner get_port: rule:context_is_advsvc or rule:admin_owner_or_network_owner get_port:queue_id: rule:admin_only get_port:binding:vif_type: rule:admin_only get_port:binding:vif_details: rule:admin_only get_port:binding:host_id: rule:admin_only get_port:binding:profile: rule:admin_only update_port: rule:admin_or_owner or rule:context_is_advsvc update_port:device_owner: not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner update_port:mac_address: rule:admin_only or rule:context_is_advsvc update_port:fixed_ips: rule:context_is_advsvc or rule:admin_or_network_owner update_port:port_security_enabled: rule:context_is_advsvc or rule:admin_or_network_owner update_port:binding:host_id: rule:admin_only update_port:binding:profile: rule:admin_only update_port:mac_learning_enabled: rule:context_is_advsvc or rule:admin_or_network_owner update_port:allowed_address_pairs: rule:admin_or_network_owner delete_port: rule:context_is_advsvc or rule:admin_owner_or_network_owner get_router:ha: rule:admin_only create_router: rule:regular_user create_router:external_gateway_info:enable_snat: rule:admin_only create_router:distributed: rule:admin_only create_router:ha: rule:admin_only get_router: rule:admin_or_owner get_router:distributed: rule:admin_only update_router:external_gateway_info:enable_snat: rule:admin_only update_router:distributed: rule:admin_only update_router:ha: rule:admin_only delete_router: rule:admin_or_owner add_router_interface: rule:admin_or_owner remove_router_interface: rule:admin_or_owner create_router:external_gateway_info:external_fixed_ips: rule:admin_only update_router:external_gateway_info:external_fixed_ips: rule:admin_only insert_rule: rule:admin_or_owner remove_rule: rule:admin_or_owner create_qos_queue: rule:admin_only get_qos_queue: rule:admin_only update_agent: rule:admin_only delete_agent: rule:admin_only get_agent: rule:admin_only create_dhcp-network: rule:admin_only delete_dhcp-network: rule:admin_only get_dhcp-networks: rule:admin_only create_l3-router: rule:admin_only delete_l3-router: rule:admin_only get_l3-routers: rule:admin_only get_dhcp-agents: rule:admin_only get_l3-agents: rule:admin_only get_loadbalancer-agent: rule:admin_only get_loadbalancer-pools: rule:admin_only get_agent-loadbalancers: rule:admin_only get_loadbalancer-hosting-agent: rule:admin_only create_floatingip: rule:regular_user create_floatingip:floating_ip_address: rule:admin_only update_floatingip: rule:admin_or_owner delete_floatingip: rule:admin_or_owner get_floatingip: rule:admin_or_owner create_network_profile: rule:admin_only update_network_profile: rule:admin_only delete_network_profile: rule:admin_only get_network_profiles: '' get_network_profile: '' update_policy_profiles: rule:admin_only get_policy_profiles: '' get_policy_profile: '' create_metering_label: rule:admin_only delete_metering_label: rule:admin_only get_metering_label: rule:admin_only create_metering_label_rule: rule:admin_only delete_metering_label_rule: rule:admin_only get_metering_label_rule: rule:admin_only get_service_provider: rule:regular_user get_lsn: rule:admin_only create_lsn: rule:admin_only create_flavor: rule:admin_only update_flavor: rule:admin_only delete_flavor: rule:admin_only get_flavors: rule:regular_user get_flavor: rule:regular_user create_service_profile: rule:admin_only update_service_profile: rule:admin_only delete_service_profile: rule:admin_only get_service_profiles: rule:admin_only get_service_profile: rule:admin_only get_policy: rule:regular_user create_policy: rule:admin_only update_policy: rule:admin_only delete_policy: rule:admin_only get_policy_bandwidth_limit_rule: rule:regular_user create_policy_bandwidth_limit_rule: rule:admin_only delete_policy_bandwidth_limit_rule: rule:admin_only update_policy_bandwidth_limit_rule: rule:admin_only get_policy_dscp_marking_rule: rule:regular_user create_policy_dscp_marking_rule: rule:admin_only delete_policy_dscp_marking_rule: rule:admin_only update_policy_dscp_marking_rule: rule:admin_only get_rule_type: rule:regular_user get_policy_minimum_bandwidth_rule: rule:regular_user create_policy_minimum_bandwidth_rule: rule:admin_only delete_policy_minimum_bandwidth_rule: rule:admin_only update_policy_minimum_bandwidth_rule: rule:admin_only restrict_wildcard: "(not field:rbac_policy:target_tenant=*) or rule:admin_only" create_rbac_policy: '' create_rbac_policy:target_tenant: rule:restrict_wildcard update_rbac_policy: rule:admin_or_owner update_rbac_policy:target_tenant: rule:restrict_wildcard and rule:admin_or_owner get_rbac_policy: rule:admin_or_owner delete_rbac_policy: rule:admin_or_owner create_flavor_service_profile: rule:admin_only delete_flavor_service_profile: rule:admin_only get_flavor_service_profile: rule:regular_user get_auto_allocated_topology: rule:admin_or_owner create_trunk: rule:regular_user get_trunk: rule:admin_or_owner delete_trunk: rule:admin_or_owner get_subports: '' add_subports: rule:admin_or_owner remove_subports: rule:admin_or_owner neutron_sudoers: override: append: rootwrap: override: append: rootwrap_filters: debug: override: append: dibbler: override: append: ipset_firewall: override: append: l3: override: append: netns_cleanup: override: append: dhcp: override: append: ebtables: override: append: iptables_firewall: override: append: linuxbridge_plugin: override: append: openvswitch_plugin: override: append: neutron: override: append: default: neutron: default_availability_zones: nova api_workers: 4 allow_overlapping_ips: True # core_plugin can be: ml2, calico core_plugin: ml2 # service_plugin can be: router, odl-router, empty for calico, # networking_ovn.l3.l3_ovn.OVNL3RouterPlugin for OVN service_plugins: router metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy db: allow_automatic_l3agent_failover: True l3_ha: True min_l3_agents_per_router: 2 max_l3_agents_per_router: 2 l3_ha_network_type: vxlan dhcp_agents_per_network: 3 network_auto_schedule: True router_auto_schedule: True agent: # we can define here, which driver we are using: openvswitch or linuxbridge interface_driver: openvswitch oslo_concurrency: oslo: concurrency: lock_path: /var/lib/neutron/tmp database: oslo: db: max_retries: -1 agent: neutron: agent: root_helper: sudo /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf oslo_messaging_notifications: oslo: messaging: driver: - noop nova: nova: auth: auth_type: password region_name: RegionOne project_domain_name: default project_name: service user_domain_name: default username: nova password: password keystone_authtoken: keystonemiddleware: auth_token: auth_type: password auth_version: v3 memcache_security_strategy: ENCRYPT ml2_conf: override: append: ml2: neutron: ml2: extension_drivers: port_security # mechnism_drivers can be: openvswitch, linuxbridge, # opendaylight, ovn mechanism_drivers: openvswitch,l2population type_drivers: flat,vlan,vxlan tenant_network_types: vxlan ml2_type_vxlan: neutron: ml2: vni_ranges: 1:1000 vxlan_group: 239.1.1.1 ml2_type_flat: neutron: ml2: flat_networks: "*" # If you want to use the external network as a tagged provider network, # a range should be specified including the intended VLAN target # using ml2_type_vlan.neutron.ml2.network_vlan_ranges: # ml2_type_vlan: # neutron: # ml2: # network_vlan_ranges: "external:1100:1110" ml2_conf_sriov: override: append: dhcp_agent: override: append: default: neutron: base: agent: # we can define here, which driver we are using: # openvswitch or linuxbridge interface_driver: openvswitch dhcp: agent: dnsmasq_config_file: /etc/neutron/dnsmasq.conf enable_isolated_metadata: True force_metadata: True l3_agent: override: append: default: neutron: base: agent: # we can define here, which driver we are using: # openvswitch or linuxbridge interface_driver: openvswitch l3: agent: agent_mode: legacy enable_metadata_proxy: True enable_isolated_metadata: True openvswitch_agent: override: append: agent: neutron: ml2: ovs: agent: tunnel_types: vxlan l2_population: True arp_responder: True ovs: neutron: ml2: ovs: agent: bridge_mappings: "external:br-ex" ovsdb_connection: unix:/var/run/openvswitch/db.sock securitygroup: neutron: ml2: ovs: agent: firewall_driver: openvswitch metering_agent: override: append: metadata_agent: override: append: default: neutron: metadata: agent: nova_metadata_ip: 10.97.120.234 nova_metadata_port: 80 nova_metadata_protocol: http metadata_proxy_shared_secret: "password" cache: oslo: cache: enabled: true backend: dogpile.cache.memcached sriov_agent: override: append: macvtap_agent: override: append: linuxbridge_agent: override: append: linux_bridge: neutron: ml2: linuxbridge: agent: # To define Flat and VLAN connections, in LB we can assign # specific interface to the flat/vlan network name using: # physical_interface_mappings: "external:eth3" # Or we can set the mapping between the network and bridge: bridge_mappings: "external:br-ex" # The two above options are exclusive, do not use both of them at once securitygroup: neutron: ml2: linuxbridge: agent: firewall_driver: neutron.agent.linux.iptables_firewall.IptablesFirewallDriver vxlan: neutron: ml2: linuxbridge: agent: l2_population: True arp_responder: True # Names of secrets used by bootstrap and environmental checks secrets: identity: admin: neutron-keystone-admin user: neutron-keystone-user oslo_db: admin: neutron-db-admin user: neutron-db-user # typically overriden by environmental # values, but should include all endpoints # required by this chart endpoints: cluster_domain_suffix: cluster.local oslo_db: auth: admin: username: root password: password user: username: neutron password: password hosts: default: mariadb host_fqdn_overide: default: null path: /neutron scheme: mysql+pymysql port: mysql: default: 3306 oslo_messaging: auth: admin: username: admin password: password user: username: rabbitmq password: password hosts: default: rabbitmq host_fqdn_overide: default: null path: / scheme: rabbit port: amqp: default: 5672 oslo_cache: hosts: default: memcached host_fqdn_overide: default: null port: memcache: default: 11211 compute: name: nova hosts: default: nova-api public: nova host_fqdn_overide: default: null path: default: "/v2/%(tenant_id)s" scheme: default: 'http' port: api: default: 8774 public: 80 metadata: default: 8775 novncproxy: default: 6080 identity: name: keystone auth: admin: region_name: RegionOne username: admin password: password project_name: admin user_domain_name: default project_domain_name: default user: role: admin region_name: RegionOne username: neutron password: password project_name: service user_domain_name: default project_domain_name: default hosts: default: keystone-api public: keystone host_fqdn_overide: default: null path: default: /v3 scheme: default: http port: admin: default: 35357 api: default: 80 network: name: neutron hosts: default: neutron-server public: neutron host_fqdn_overide: default: null path: default: null scheme: default: 'http' port: api: default: 9696 public: 80 manifests: configmap_bin: true configmap_etc: true daemonset_dhcp_agent: true daemonset_l3_agent: true daemonset_lb_agent: false daemonset_metadata_agent: true daemonset_ovs_agent: true deployment_server: true ingress_server: true job_bootstrap: true job_db_init: true job_db_sync: true job_ks_endpoints: true job_ks_service: true job_ks_user: true pdb_server: true pod_rally_test: true secret_db: true secret_keystone: true service_ingress_server: true service_server: true