2f828486c7
The handlers that need to be enabled to correctly handle Kubernetes Services events are (endpoints,service,kuryrloadbalancer) and not (lb,lbaasspec) as placed in most of the docs. This was due to the recent movement to KuryrLoadBalancer custom resources definitions (CRD). Change-Id: I0eff3b68839a659d39650e2cb22956e30d2c7332 Closes-Bug: #1899013
187 lines
6.6 KiB
Plaintext
187 lines
6.6 KiB
Plaintext
[[local|localrc]]
|
|
|
|
enable_plugin kuryr-kubernetes \
|
|
https://opendev.org/openstack/kuryr-kubernetes
|
|
|
|
# If you do not want stacking to clone new versions of the enabled services,
|
|
# like for example when you did local modifications and need to ./unstack.sh
|
|
# and ./stack.sh again, uncomment the following
|
|
# RECLONE="no"
|
|
|
|
# Log settings for better readability
|
|
LOGFILE=devstack.log
|
|
LOG_COLOR=False
|
|
|
|
|
|
# Credentials
|
|
ADMIN_PASSWORD=pass
|
|
DATABASE_PASSWORD=pass
|
|
RABBIT_PASSWORD=pass
|
|
SERVICE_PASSWORD=pass
|
|
SERVICE_TOKEN=pass
|
|
# Enable Keystone v3
|
|
IDENTITY_API_VERSION=3
|
|
|
|
# In pro of speed and being lightweight, we will be explicit in regards to
|
|
# which services we enable
|
|
ENABLED_SERVICES=""
|
|
|
|
# Neutron services
|
|
enable_plugin neutron https://opendev.org/openstack/neutron
|
|
enable_service q-dhcp
|
|
enable_service q-api
|
|
enable_service q-meta
|
|
enable_service q-svc
|
|
|
|
# LBaaSv2 service and Haproxy agent
|
|
enable_plugin neutron-lbaas \
|
|
https://opendev.org/openstack/neutron-lbaas
|
|
enable_service q-lbaasv2
|
|
|
|
# Currently there is problem with the ODL LBaaS driver integration, so we
|
|
# default to the default neutron one
|
|
#NEUTRON_LBAAS_SERVICE_PROVIDERV2="LOADBALANCERV2:opendaylight:networking_odl.lbaas.driver_v2.OpenDaylightLbaasDriverV2:default"
|
|
NEUTRON_LBAAS_SERVICE_PROVIDERV2="LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default"
|
|
|
|
ODL_MODE=allinone
|
|
ODL_RELEASE=carbon-snapshot-0.6
|
|
Q_USE_PUBLIC_VETH=False
|
|
PUBLIC_BRIDGE=br-ex
|
|
PUBLIC_PHYSICAL_NETWORK=public
|
|
ODL_PROVIDER_MAPPINGS=public:br-ex
|
|
ODL_L3=True
|
|
ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-netvirt-openstack,odl-neutron-logger,odl-neutron-hostconfig-ovs
|
|
ODL_PORT_BINDING_CONTROLLER=pseudo-agentdb-binding
|
|
ODL_TIMEOUT=60
|
|
ODL_V2DRIVER=True
|
|
ODL_NETVIRT_DEBUG_LOGS=True
|
|
EBTABLES_RACE_FIX=True
|
|
enable_plugin networking-odl http://opendev.org/openstack/networking-odl
|
|
|
|
# Keystone
|
|
enable_service key
|
|
|
|
# dependencies
|
|
enable_service mysql
|
|
enable_service rabbit
|
|
|
|
# By default use all the services from the kuryr-kubernetes plugin
|
|
|
|
# Docker
|
|
# ======
|
|
# If you already have docker configured, running and with its socket writable
|
|
# by the stack user, you can omit the following line.
|
|
enable_plugin devstack-plugin-container https://opendev.org/openstack/devstack-plugin-container
|
|
|
|
|
|
# Etcd
|
|
# ====
|
|
# If you already have etcd configured and running, you can just comment out
|
|
enable_service etcd3
|
|
# then uncomment and set the following line:
|
|
# KURYR_ETCD_CLIENT_URL="http://etcd_ip:etcd_client_port"
|
|
|
|
# Kubernetes
|
|
# ==========
|
|
#
|
|
# Kubernetes is run from the hyperkube docker image
|
|
# If you already have a Kubernetes deployment, you can use it instead and omit
|
|
# enabling the Kubernetes service (except Kubelet, which must be run by
|
|
# devstack so that it uses our development CNI driver.
|
|
#
|
|
# The default is, again, for devstack to run the Kubernetes services:
|
|
enable_service kubernetes-api
|
|
enable_service kubernetes-controller-manager
|
|
enable_service kubernetes-scheduler
|
|
|
|
# We use hyperkube to run the services. You can select the hyperkube image and/
|
|
# or version by uncommenting and setting the following ENV vars different
|
|
# to the following defaults:
|
|
# KURYR_HYPERKUBE_IMAGE="gcr.io/google_containers/hyperkube-amd64"
|
|
# KURYR_HYPERKUBE_VERSION="v1.3.7"
|
|
#
|
|
# If you have the 8080 port already bound to another service, you will need to
|
|
# have kubernetes API server bind to another port. In order to do that,
|
|
# uncomment and set a different port number in:
|
|
# KURYR_K8S_API_PORT="8080"
|
|
#
|
|
# If you want to test with a different range for the Cluster IPs uncomment and
|
|
# set the following ENV var to a different CIDR
|
|
# KURYR_K8S_CLUSTER_IP_RANGE="10.0.0.0/24"
|
|
#
|
|
# If, however, you are reusing an existing deployment, you should uncomment and
|
|
# set an ENV var so that the Kubelet devstack runs can find the API server:
|
|
# KURYR_K8S_API_URL="http://k8s_api_ip:k8s_api_port"
|
|
#
|
|
|
|
# Kubelet
|
|
# =======
|
|
#
|
|
# Kubelet should almost invariably be run by devstack
|
|
enable_service kubelet
|
|
|
|
# You can specify a different location for the hyperkube binary that will be
|
|
# extracted from the hyperkube container into the Host filesystem:
|
|
# KURYR_HYPERKUBE_BINARY=/usr/local/bin/hyperkube
|
|
#
|
|
# NOTE: KURYR_HYPERKUBE_IMAGE, KURYR_HYPERKUBE_VERSION also affect which
|
|
# the selected binary for the Kubelet.
|
|
|
|
# Kuryr watcher
|
|
# =============
|
|
#
|
|
# Just like the Kubelet, you'll want to have the watcher enabled. It is the
|
|
# part of the codebase that connects to the Kubernetes API server to read the
|
|
# resource events and convert them to Neutron actions
|
|
enable_service kuryr-kubernetes
|
|
|
|
# Kuryr Daemon
|
|
# ============
|
|
#
|
|
# Kuryr runs CNI plugin in daemonized way - i.e. kubelet will run kuryr CNI
|
|
# driver and the driver will pass requests to Kuryr daemon running on the node,
|
|
# instead of processing them on its own. This limits the number of Kubernetes
|
|
# API requests (as only Kuryr Daemon will watch for new pod events) and should
|
|
# increase scalability in environments that often delete and create pods.
|
|
# Since Rocky release this is a default deployment configuration.
|
|
enable_service kuryr-daemon
|
|
|
|
# Kuryr POD VIF Driver
|
|
# ====================
|
|
#
|
|
# Set up the VIF Driver to be used. The default one is the neutron-vif, but if
|
|
# a nested deployment is desired, the corresponding driver need to be set,
|
|
# e.g.: nested-vlan or nested-macvlan
|
|
# KURYR_POD_VIF_DRIVER=neutron-vif
|
|
|
|
# Kuryr Enabled Handlers
|
|
# ======================
|
|
#
|
|
# By default, some Kuryr Handlers are set for DevStack installation. This can be
|
|
# further tweaked in order to enable additional ones such as Network Policy. If
|
|
# you want to add additional handlers those can be set here:
|
|
# KURYR_ENABLED_HANDLERS = vif,endpoints,service,kuryrloadbalancer,kuryrport
|
|
|
|
# Kuryr Ports Pools
|
|
# =================
|
|
#
|
|
# To speed up containers boot time the kuryr ports pool driver can be enabled
|
|
# by uncommenting the next line, so that neutron port resources are precreated
|
|
# and ready to be used by the pods when needed
|
|
# KURYR_USE_PORTS_POOLS=True
|
|
#
|
|
# By default the pool driver is noop, i.e., there is no pool. If pool
|
|
# optimizations want to be used you need to set it to 'neutron' for the
|
|
# baremetal case, or to 'nested' for the nested case
|
|
# KURYR_VIF_POOL_DRIVER=noop
|
|
#
|
|
# There are extra configuration options for the pools that can be set to decide
|
|
# on the minimum number of ports that should be ready to use at each pool, the
|
|
# maximum (0 to unset), and the batch size for the repopulation actions, i.e.,
|
|
# the number of neutron ports to create in bulk operations. Finally, the update
|
|
# frequency between actions over the pool can be set too
|
|
# KURYR_VIF_POOL_MIN=2
|
|
# KURYR_VIF_POOL_MAX=0
|
|
# KURYR_VIF_POOL_BATCH=5
|
|
# KURYR_VIF_POOL_UPDATE_FREQ=30
|