kuryr-kubernetes/devstack/local.conf.ovs.sample
Roman Dobosz 5fb8104abf Bump cri-o version.
We recently bumped Kubernetes version to 1.25.x, but we forgot to do
this on cri-o as well.

Change-Id: If57888d6f48fad904bf4c7cdd5bc6c70233b95be
Depends-On: https://review.opendev.org/c/openstack/devstack-plugin-container/+/866130
2022-12-02 08:56:14 +01:00

226 lines
7.9 KiB
Plaintext

[[local|localrc]]
enable_plugin kuryr-kubernetes https://opendev.org/openstack/kuryr-kubernetes
# If you do not want stacking to clone new versions of the enabled services,
# like for example when you did local modifications and need to ./unstack.sh
# and ./stack.sh again, uncomment the following
# RECLONE="no"
# Log settings for better readability
LOGFILE=devstack.log
LOG_COLOR=False
# Credentials
ADMIN_PASSWORD=pass
DATABASE_PASSWORD=pass
RABBIT_PASSWORD=pass
SERVICE_PASSWORD=pass
SERVICE_TOKEN=pass
# disable services, to conserve the resources usage
disable_service cinder
disable_service dstat
disable_service n-novnc
disable_service horizon
# If you plan to run tempest tests on devstack, you should comment out/remove
# below line
disable_service tempest
# Neutron services
# ================
enable_plugin neutron https://opendev.org/openstack/neutron
enable_service q-agt
enable_service q-dhcp
enable_service q-l3
enable_service q-meta
enable_service q-svc
enable_service neutron-tag-ports-during-bulk-creation
# Disable OVN in favor of OVS
Q_AGENT="openvswitch"
Q_ML2_PLUGIN_MECHANISM_DRIVERS="openvswitch"
Q_ML2_TENANT_NETWORK_TYPE="vxlan"
# Set workaround for
FLOATING_RANGE="172.24.5.0/24"
PUBLIC_NETWORK_GATEWAY="172.24.5.1"
# VAR RUN PATH
# =============
# VAR_RUN_PATH=/var/run
# OCTAVIA
# =======
# Uncomment it to use L2 communication between loadbalancer and member pods
# KURYR_K8S_OCTAVIA_MEMBER_MODE=L2
# Uncomment to change Octavia loadbalancer listener client and member
# inactivity timeout from 50000ms.
# KURYR_TIMEOUT_CLIENT_DATA=50000
# KURYR_TIMEOUT_MEMBER_DATA=50000
# Octavia LBaaSv2
LIBS_FROM_GIT+=python-octaviaclient
enable_plugin octavia https://opendev.org/openstack/octavia
enable_service octavia
enable_service o-api
enable_service o-cw
enable_service o-hm
enable_service o-hk
## Octavia Deps
# In order to skip building the Octavia Amphora image you can fetch a
# precreated qcow image from here [1] and set up octavia to use it by
# uncommenting the following lines.
# [1] https://tarballs.openstack.org/octavia/test-images/test-only-amphora-x64-haproxy-ubuntu-xenial.qcow2
# OCTAVIA_AMP_IMAGE_FILE=/tmp/test-only-amphora-x64-haproxy-ubuntu-xenial.qcow2
# OCTAVIA_AMP_IMAGE_SIZE=3
# OCTAVIA_AMP_IMAGE_NAME=test-only-amphora-x64-haproxy-ubuntu-xenial
# CRI
# ===
# If you already have either CRI-O or Docker configured, running and with its
# socket writable by the stack user, you can omit the following lines.
enable_plugin devstack-plugin-container https://opendev.org/openstack/devstack-plugin-container
# We are using CRI-O by default. The version should match K8s version:
CONTAINER_ENGINE="crio"
CRIO_VERSION="1.25"
# Etcd
# ====
# The default is for devstack to run etcd for you. Remove comment to disable
# it, if you already have etcd running.
#disable_service etcd3
# If you already have an etcd cluster configured and running, you can just
# comment out the lines enabling legacy_etcd and etcd3
# then uncomment and set the following line:
# KURYR_ETCD_CLIENT_URL="http://etcd_ip:etcd_client_port"
# Kubernetes
# ==========
#
# Kubernetes is installed by kubeadm (which is installed from proper
# repository).
# If you already have a Kubernetes deployment, you can use it instead and omit
# enabling the Kubernetes service.
# TODO(gryf): review the part whith existsing cluster for kubelet
# configuration instead of runing it via devstack - it need to be
# configured for use our CNI.
#
# The default is, again, for devstack to run the Kubernetes services:
enable_service kubernetes-master
# If you have the 6443 port already bound to another service, you will need to
# have kubernetes API server bind to another port. In order to do that,
# uncomment and set a different port number in:
# KURYR_K8S_API_PORT="6443"
#
# If, however, you are reusing an existing deployment, you should uncomment and
# set an ENV var so that the Kubelet devstack runs can find the API server:
#
# TODO(gryf): revisit this scenario. Do we even support this in devstack?
#
# KURYR_K8S_API_URL="http (or https, if K8S is SSL/TLS enabled)://k8s_api_ip:k8s_api_port"
#
# If kubernetes API server is 'https' enabled, set path of the ssl cert files
# KURYR_K8S_API_CERT="/etc/kubernetes/certs/kubecfg.crt"
# KURYR_K8S_API_KEY="/etc/kubernetes/certs/kubecfg.key"
# KURYR_K8S_API_CACERT="/etc/kubernetes/certs/ca.crt"
enable_service kubernetes-master
# Kuryr watcher
# =============
#
# Just like the Kubelet, you'll want to have the watcher enabled. It is the
# part of the codebase that connects to the Kubernetes API server to read the
# resource events and convert them to Neutron actions
enable_service kuryr-kubernetes
# Kuryr Daemon
# ============
#
# Kuryr can run CNI plugin in daemonized way - i.e. kubelet will run kuryr CNI
# driver and the driver will pass requests to Kuryr daemon running on the node,
# instead of processing them on its own. This limits the number of Kubernetes
# API requests (as only Kuryr Daemon will watch for new pod events) and should
# increase scalability in environments that often delete and create pods.
# Since Rocky release this is a default deployment configuration.
enable_service kuryr-daemon
# Containerized Kuryr
# ===================
#
# Kuryr can be installed on Kubernetes as a pair of Deployment
# (kuryr-controller) and DaemonSet (kuryr-cni) or as systemd services. If you
# want DevStack to deploy Kuryr services as pods on Kubernetes, comment (or
# remove) next line.
KURYR_K8S_CONTAINERIZED_DEPLOYMENT=False
# Kuryr POD VIF Driver
# ====================
#
# Set up the VIF Driver to be used. The default one is the neutron-vif, but if
# a nested deployment is desired, the corresponding driver need to be set,
# e.g.: nested-vlan or nested-macvlan
# KURYR_POD_VIF_DRIVER=neutron-vif
# Kuryr Enabled Handlers
# ======================
#
# By default, some Kuryr Handlers are set for DevStack installation. This can be
# further tweaked in order to enable additional ones such as Network Policy. If
# you want to add additional handlers those can be set here:
# KURYR_ENABLED_HANDLERS = vif,endpoints,service,kuryrloadbalancer,kuryrport
# Kuryr Ports Pools
# =================
#
# To speed up containers boot time the kuryr ports pool driver can be enabled
# by uncommenting the next line, so that neutron port resources are precreated
# and ready to be used by the pods when needed
# KURYR_USE_PORTS_POOLS=True
#
# By default the pool driver is noop, i.e., there is no pool. If pool
# optimizations want to be used you need to set it to 'neutron' for the
# baremetal case, or to 'nested' for the nested case
# KURYR_VIF_POOL_DRIVER=noop
#
# There are extra configuration options for the pools that can be set to decide
# on the minimum number of ports that should be ready to use at each pool, the
# maximum (0 to unset), and the batch size for the repopulation actions, i.e.,
# the number of neutron ports to create in bulk operations. Finally, the update
# frequency between actions over the pool can be set too
# KURYR_VIF_POOL_MIN=2
# KURYR_VIF_POOL_MAX=0
# KURYR_VIF_POOL_BATCH=5
# KURYR_VIF_POOL_UPDATE_FREQ=30
# Kuryr VIF Pool Manager
# ======================
#
# Uncomment the next line to enable the pool manager. Note it requires the
# nested-vlan pod vif driver, as well as the ports pool being enabled and
# configured with the nested driver
# KURYR_VIF_POOL_MANAGER=True
# Kuryr Multi-VIF Driver
# ======================
# Uncomment the next line to enable the npwg multi-vif driver.
# Default value: noop
# KURYR_MULTI_VIF_DRIVER=npwg_multiple_interfaces
# Kuryr own router
# ================
# Uncomment the next line to force devstack to create a new router for kuryr
# networks instead of using the default one being created by devstack
# KURYR_NEUTRON_DEFAULT_ROUTER = kuryr-router
# Increase Octavia amphorae timeout so that the first LB amphora has time to
# build and boot
IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
[[post-config|$OCTAVIA_CONF]]
[controller_worker]
amp_active_retries=9999