kuryr-kubernetes/devstack/local.conf.openshift.sample
Roman Dobosz f76abf92d6 Enable neutron-tag-ports-during-bulk-creation extension.
Enable neutron extension for bulk creating port with tags.

Change-Id: I31e5062e1037980f73def7d8f861b9c7e59da803
2020-11-18 09:22:04 +01:00

205 lines
6.8 KiB
Plaintext

[[local|localrc]]
enable_plugin kuryr-kubernetes \
https://opendev.org/openstack/kuryr-kubernetes
# If you do not want stacking to clone new versions of the enabled services,
# like for example when you did local modifications and need to ./unstack.sh
# and ./stack.sh again, uncomment the following
# RECLONE="no"
# Log settings for better readability
LOGFILE=devstack.log
LOG_COLOR=False
# Credentials
ADMIN_PASSWORD=pass
DATABASE_PASSWORD=pass
RABBIT_PASSWORD=pass
SERVICE_PASSWORD=pass
SERVICE_TOKEN=pass
# Enable Keystone v3
IDENTITY_API_VERSION=3
# In pro of speed and being lightweight, we will be explicit in regards to
# which services we enable
ENABLED_SERVICES=""
# Neutron services
enable_plugin neutron https://opendev.org/openstack/neutron
enable_service q-agt
enable_service q-dhcp
enable_service q-l3
enable_service q-api
enable_service q-meta
enable_service q-svc
enable_service neutron-tag-ports-during-bulk-creation
# OCTAVIA
# Uncomment it to use L2 communication between loadbalancer and member pods
# KURYR_K8S_OCTAVIA_MEMBER_MODE=L2
# Octavia LBaaSv2
LIBS_FROM_GIT+=python-octaviaclient
enable_plugin octavia https://opendev.org/openstack/octavia
enable_service octavia
enable_service o-api
enable_service o-cw
enable_service o-hm
enable_service o-hk
## Octavia Deps
### Nova
enable_service n-api
enable_service n-api-meta
enable_service n-cpu
enable_service n-cond
enable_service n-sch
enable_service placement-api
enable_service placement-client
### Glance
enable_service g-api
enable_service g-reg
# Keystone
enable_service key
# dependencies
enable_service mysql
enable_service rabbit
# By default use all the services from the kuryr-kubernetes plugin
# Docker
# ======
# If you already have docker configured, running and with its socket writable
# by the stack user, you can omit the following line.
enable_plugin devstack-plugin-container https://opendev.org/openstack/devstack-plugin-container
# Etcd
# ====
# The default is for devstack to run etcd for you.
enable_service etcd3
# If you already have an etcd cluster configured and running, you can just
# comment out the lines enabling legacy_etcd and etcd3
# then uncomment and set the following line:
# KURYR_ETCD_CLIENT_URL="http://etcd_ip:etcd_client_port"
# OpenShift
# ==========
#
# OpenShift is run from the binaries conained in a binary release tarball
enable_service openshift-master
enable_service openshift-node
enable_service openshift-dnsmasq
enable_service openshift-dns
# OpenShift node uses systemd as its cgroup driver. Thus we need Docker to
# use the same.
DOCKER_CGROUP_DRIVER="systemd"
# We default to the 3.6 release, but you should be able to replace with other
# releases by redefining the following
# OPENSHIFT_BINARY_URL=https://github.com/openshift/origin/releases/download/v3.6.0/openshift-origin-server-v3.6.0-c4dd4cf-linux-64bit.tar.gz
#
# If you want to test with a different range for the Cluster IPs uncomment and
# set the following ENV var to a different CIDR
# KURYR_K8S_CLUSTER_IP_RANGE="10.0.0.0/24"
#
# If, however, you are reusing an existing deployment, you should uncomment and
# set an ENV var so that the Kubelet devstack runs can find the API server:
# OPENSHIFT_API_URL="http (or https, if OpenShift is SSL/TLS enabled)://openshift_api_ip:openshift_api_port"
#
# Since OpenShift defaults to its API server being 'https' enabled, set path of
# the ssl cert files if you are reusing an environment, otherwise devstack will
# do it for you.
# KURYR_K8S_API_CERT="/etc/origin/master/kuryr.crt"
# KURYR_K8S_API_KEY="/etc/origin/master/kuryr.key"
# KURYR_K8S_API_CACERT="/etc/origin/master/ca.crt"
# Kuryr watcher
# =============
#
# Just like the Kubelet, you'll want to have the watcher enabled. It is the
# part of the codebase that connects to the Kubernetes API server to read the
# resource events and convert them to Neutron actions
enable_service kuryr-kubernetes
# Kuryr Daemon
# ============
#
# Kuryr runs CNI plugin in daemonized way - i.e. kubelet will run kuryr CNI
# driver and the driver will pass requests to Kuryr daemon running on the node,
# instead of processing them on its own. This limits the number of Kubernetes
# API requests (as only Kuryr Daemon will watch for new pod events) and should
# increase scalability in environments that often delete and create pods.
# Since Rocky release this is a default deployment configuration.
enable_service kuryr-daemon
# Containerized Kuryr
# ===================
#
# Kuryr can be installed on Kubernetes as a pair of Deployment
# (kuryr-controller) and DaemonSet (kuryr-cni). If you want DevStack to deploy
# Kuryr services as pods on Kubernetes uncomment next line.
# KURYR_K8S_CONTAINERIZED_DEPLOYMENT=True
# Kuryr POD VIF Driver
# ====================
#
# Set up the VIF Driver to be used. The default one is the neutron-vif, but if
# a nested deployment is desired, the corresponding driver need to be set,
# e.g.: nested-vlan or nested-macvlan
# KURYR_POD_VIF_DRIVER=neutron-vif
# Kuryr Enabled Handlers
# ======================
#
# By default, some Kuryr Handlers are set for DevStack installation. This can be
# further tweaked in order to enable additional ones such as Network Policy. If
# you want to add additional handlers those can be set here:
# KURYR_ENABLED_HANDLERS = vif,endpoints,service,kuryrloadbalancer,kuryrport
# Kuryr Ports Pools
# =================
#
# To speed up containers boot time the kuryr ports pool driver can be enabled
# by uncommenting the next line, so that neutron port resources are precreated
# and ready to be used by the pods when needed
# KURYR_USE_PORTS_POOLS=True
#
# By default the pool driver is noop, i.e., there is no pool. If pool
# optimizations want to be used you need to set it to 'neutron' for the
# baremetal case, or to 'nested' for the nested case
# KURYR_VIF_POOL_DRIVER=noop
#
# There are extra configuration options for the pools that can be set to decide
# on the minimum number of ports that should be ready to use at each pool, the
# maximum (0 to unset), and the batch size for the repopulation actions, i.e.,
# the number of neutron ports to create in bulk operations. Finally, the update
# frequency between actions over the pool can be set too
# KURYR_VIF_POOL_MIN=2
# KURYR_VIF_POOL_MAX=0
# KURYR_VIF_POOL_BATCH=5
# KURYR_VIF_POOL_UPDATE_FREQ=30
# Kuryr VIF Pool Manager
# ======================
#
# Uncomment the next line to enable the pool manager. Note it requires the
# nested-vlan pod vif driver, as well as the ports pool being enabled and
# configured with the nested driver
# KURYR_VIF_POOL_MANAGER=True
# Increase Octavia amphorae timeout so that the first LB amphora has time to
# build and boot
IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
[[post-config|$OCTAVIA_CONF]]
[controller_worker]
amp_active_retries=9999
[[post-config|/$Q_PLUGIN_CONF_FILE]]
[securitygroup]
firewall_driver = openvswitch