285456a672
When Octavia stopped needing neutron-lbaasv2 devstack plugin we tried it out with success but we still kept usage the plugin so that our devstack plugin could target older than pike. Unfortunately during this time the proxy became unnecessary and not well maintained and due to our gates still using it, when we tried queens with the supported proxy less Octavia we realized that it fails. This patch addresses it by making the existing neutronclient usage point to the load-balancer endpoint when the proxy is not in place. Change-Id: Iafd74f23bdf336a4d78ba4759f702cf989c8bc30 Closes-Bug: #1763045 Signed-off-by: Antoni Segura Puimedon <antonisp@celebdor.com>
225 lines
7.5 KiB
Plaintext
225 lines
7.5 KiB
Plaintext
[[local|localrc]]
|
|
|
|
enable_plugin kuryr-kubernetes \
|
|
https://git.openstack.org/openstack/kuryr-kubernetes
|
|
|
|
# If you do not want stacking to clone new versions of the enabled services,
|
|
# like for example when you did local modifications and need to ./unstack.sh
|
|
# and ./stack.sh again, uncomment the following
|
|
# RECLONE="no"
|
|
|
|
# Log settings for better readability
|
|
LOGFILE=devstack.log
|
|
LOG_COLOR=False
|
|
|
|
|
|
# Credentials
|
|
ADMIN_PASSWORD=pass
|
|
DATABASE_PASSWORD=pass
|
|
RABBIT_PASSWORD=pass
|
|
SERVICE_PASSWORD=pass
|
|
SERVICE_TOKEN=pass
|
|
# Enable Keystone v3
|
|
IDENTITY_API_VERSION=3
|
|
|
|
# In pro of speed and being lightweight, we will be explicit in regards to
|
|
# which services we enable
|
|
ENABLED_SERVICES=""
|
|
|
|
# Neutron services
|
|
enable_service neutron
|
|
enable_service q-agt
|
|
enable_service q-dhcp
|
|
enable_service q-l3
|
|
enable_service q-svc
|
|
|
|
# OCTAVIA
|
|
KURYR_K8S_LBAAS_USE_OCTAVIA=True
|
|
# Uncomment it to use L2 communication between loadbalancer and member pods
|
|
# KURYR_K8S_OCTAVIA_MEMBER_MODE=L2
|
|
|
|
if [[ "$KURYR_K8S_LBAAS_USE_OCTAVIA" == "True" ]]; then
|
|
# Octavia LBaaSv2
|
|
LIBS_FROM_GIT+=python-octaviaclient
|
|
enable_plugin octavia https://git.openstack.org/openstack/octavia
|
|
enable_service octavia
|
|
enable_service o-api
|
|
enable_service o-cw
|
|
enable_service o-hm
|
|
enable_service o-hk
|
|
## Octavia Deps
|
|
### Image
|
|
### Barbican
|
|
enable_plugin barbican https://git.openstack.org/openstack/barbican
|
|
### Nova
|
|
enable_service n-api
|
|
enable_service n-api-meta
|
|
enable_service n-cpu
|
|
enable_service n-cond
|
|
enable_service n-sch
|
|
enable_service placement-api
|
|
enable_service placement-client
|
|
### Glance
|
|
enable_service g-api
|
|
enable_service g-reg
|
|
else
|
|
# LBaaSv2 service and Haproxy agent
|
|
enable_plugin neutron-lbaas \
|
|
git://git.openstack.org/openstack/neutron-lbaas
|
|
enable_service q-lbaasv2
|
|
fi
|
|
|
|
|
|
# Keystone
|
|
enable_service key
|
|
|
|
# dependencies
|
|
enable_service mysql
|
|
enable_service rabbit
|
|
|
|
# By default use all the services from the kuryr-kubernetes plugin
|
|
|
|
# Docker
|
|
# ======
|
|
# If you already have docker configured, running and with its socket writable
|
|
# by the stack user, you can omit the following line.
|
|
enable_plugin devstack-plugin-container https://git.openstack.org/openstack/devstack-plugin-container
|
|
|
|
# Etcd
|
|
# ====
|
|
# The default is for devstack to run etcd for you.
|
|
enable_service etcd3
|
|
|
|
# You can also run the deprecated etcd containerized and select the image and
|
|
# version of it by commenting the etcd3 service enablement and uncommenting
|
|
#
|
|
# enable legacy_etcd
|
|
#
|
|
# You can also modify the following defaults.
|
|
# KURYR_ETCD_IMAGE="quay.io/coreos/etcd"
|
|
# KURYR_ETCD_VERSION="v3.0.8"
|
|
#
|
|
# You can select the listening and advertising client and peering Etcd
|
|
# addresses by uncommenting and changing from the following defaults:
|
|
# KURYR_ETCD_ADVERTISE_CLIENT_URL=http://my_host_ip:2379}
|
|
# KURYR_ETCD_ADVERTISE_PEER_URL=http://my_host_ip:2380}
|
|
# KURYR_ETCD_LISTEN_CLIENT_URL=http://0.0.0.0:2379}
|
|
# KURYR_ETCD_LISTEN_PEER_URL=http://0.0.0.0:2380}
|
|
#
|
|
# If you already have an etcd cluster configured and running, you can just
|
|
# comment out the lines enabling legacy_etcd and etcd3
|
|
# then uncomment and set the following line:
|
|
# KURYR_ETCD_CLIENT_URL="http://etcd_ip:etcd_client_port"
|
|
|
|
# OpenShift
|
|
# ==========
|
|
#
|
|
# OpenShift is run from the binaries conained in a binary release tarball
|
|
enable_service openshift-master
|
|
enable_service openshift-node
|
|
|
|
# OpenShift node uses systemd as its cgroup driver. Thus we need Docker to
|
|
# use the same.
|
|
DOCKER_CGROUP_DRIVER="systemd"
|
|
|
|
# We default to the 3.6 release, but you should be able to replace with other
|
|
# releases by redefining the following
|
|
# OPENSHIFT_BINARY_URL=https://github.com/openshift/origin/releases/download/v3.6.0/openshift-origin-server-v3.6.0-c4dd4cf-linux-64bit.tar.gz
|
|
#
|
|
# If you want to test with a different range for the Cluster IPs uncomment and
|
|
# set the following ENV var to a different CIDR
|
|
# KURYR_K8S_CLUSTER_IP_RANGE="10.0.0.0/24"
|
|
#
|
|
# If, however, you are reusing an existing deployment, you should uncomment and
|
|
# set an ENV var so that the Kubelet devstack runs can find the API server:
|
|
# OPENSHIFT_API_URL="http (or https, if OpenShift is SSL/TLS enabled)://openshift_api_ip:openshift_api_port"
|
|
#
|
|
# Since OpenShift defaults to its API server being 'https' enabled, set path of
|
|
# the ssl cert files if you are reusing an environment, otherwise devstack will
|
|
# do it for you.
|
|
# KURYR_K8S_API_CERT="/etc/origin/master/kuryr.crt"
|
|
# KURYR_K8S_API_KEY="/etc/origin/master/kuryr.key"
|
|
# KURYR_K8S_API_CACERT="/etc/origin/master/ca.crt"
|
|
|
|
# Kuryr watcher
|
|
# =============
|
|
#
|
|
# Just like the Kubelet, you'll want to have the watcher enabled. It is the
|
|
# part of the codebase that connects to the Kubernetes API server to read the
|
|
# resource events and convert them to Neutron actions
|
|
enable_service kuryr-kubernetes
|
|
|
|
# Kuryr Daemon
|
|
# ============
|
|
#
|
|
# Kuryr runs CNI plugin in daemonized way - i.e. kubelet will run kuryr CNI
|
|
# driver and the driver will pass requests to Kuryr daemon running on the node,
|
|
# instead of processing them on its own. This limits the number of Kubernetes
|
|
# API requests (as only Kuryr Daemon will watch for new pod events) and should
|
|
# increase scalability in environments that often delete and create pods.
|
|
# Since Rocky release this is a default deployment configuration.
|
|
enable_service kuryr-daemon
|
|
|
|
# Containerized Kuryr
|
|
# ===================
|
|
#
|
|
# Kuryr can be installed on Kubernetes as a pair of Deployment
|
|
# (kuryr-controller) and DaemonSet (kuryr-cni). If you want DevStack to deploy
|
|
# Kuryr services as pods on Kubernetes uncomment next line.
|
|
# KURYR_K8S_CONTAINERIZED_DEPLOYMENT=True
|
|
|
|
# Kuryr POD VIF Driver
|
|
# ====================
|
|
#
|
|
# Set up the VIF Driver to be used. The default one is the neutron-vif, but if
|
|
# a nested deployment is desired, the corresponding driver need to be set,
|
|
# e.g.: nested-vlan or nested-macvlan
|
|
# KURYR_POD_VIF_DRIVER=neutron-vif
|
|
|
|
# Kuryr Ports Pools
|
|
# =================
|
|
#
|
|
# To speed up containers boot time the kuryr ports pool driver can be enabled
|
|
# by uncommenting the next line, so that neutron port resources are precreated
|
|
# and ready to be used by the pods when needed
|
|
# KURYR_USE_PORTS_POOLS=True
|
|
#
|
|
# By default the pool driver is noop, i.e., there is no pool. If pool
|
|
# optimizations want to be used you need to set it to 'neutron' for the
|
|
# baremetal case, or to 'nested' for the nested case
|
|
# KURYR_VIF_POOL_DRIVER=noop
|
|
#
|
|
# There are extra configuration options for the pools that can be set to decide
|
|
# on the minimum number of ports that should be ready to use at each pool, the
|
|
# maximum (0 to unset), and the batch size for the repopulation actions, i.e.,
|
|
# the number of neutron ports to create in bulk operations. Finally, the update
|
|
# frequency between actions over the pool can be set too
|
|
# KURYR_VIF_POOL_MIN=5
|
|
# KURYR_VIF_POOL_MAX=0
|
|
# KURYR_VIF_POOL_BATCH=10
|
|
# KURYR_VIF_POOL_UPDATE_FREQ=20
|
|
|
|
# Kuryr VIF Pool Manager
|
|
# ======================
|
|
#
|
|
# Uncomment the next line to enable the pool manager. Note it requires the
|
|
# nested-vlan pod vif driver, as well as the ports pool being enabled and
|
|
# configured with the nested driver
|
|
# KURYR_VIF_POOL_MANAGER=True
|
|
|
|
# Increase Octavia amphorae timeout so that the first LB amphora has time to
|
|
# build and boot
|
|
if [[ "$KURYR_K8S_LBAAS_USE_OCTAVIA" == "True" ]]; then
|
|
IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
|
|
else
|
|
NEUTRON_LBAAS_SERVICE_PROVIDERV2="LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default"
|
|
fi
|
|
|
|
[[post-config|$OCTAVIA_CONF]]
|
|
[controller_worker]
|
|
amp_active_retries=9999
|
|
|
|
[[post-config|/$Q_PLUGIN_CONF_FILE]]
|
|
[securitygroup]
|
|
firewall_driver = openvswitch
|