[[local|localrc]] enable_plugin kuryr-kubernetes \ https://opendev.org/openstack/kuryr-kubernetes # If you do not want stacking to clone new versions of the enabled services, # like for example when you did local modifications and need to ./unstack.sh # and ./stack.sh again, uncomment the following # RECLONE="no" # Log settings for better readability LOGFILE=devstack.log LOG_COLOR=False # Credentials ADMIN_PASSWORD=pass DATABASE_PASSWORD=pass RABBIT_PASSWORD=pass SERVICE_PASSWORD=pass SERVICE_TOKEN=pass # Enable Keystone v3 IDENTITY_API_VERSION=3 # In pro of speed and being lightweight, we will be explicit in regards to # which services we enable ENABLED_SERVICES="" # OVN components Q_AGENT=ovn Q_ML2_PLUGIN_MECHANISM_DRIVERS=ovn,logger Q_ML2_PLUGIN_TYPE_DRIVERS=local,flat,vlan,geneve Q_ML2_TENANT_NETWORK_TYPE=geneve enable_service ovn-northd enable_service ovn-controller enable_service q-ovn-metadata-agent # Neutron services enable_plugin neutron https://opendev.org/openstack/neutron enable_service q-svc # VAR RUN PATH # ============= # VAR_RUN_PATH=/var/run VAR_RUN_PATH=/usr/local/var/run # OCTAVIA # Uncomment it to use L2 communication between loadbalancer and member pods # KURYR_K8S_OCTAVIA_MEMBER_MODE=L2 # Kuryr K8S-Endpoint driver Octavia provider # ========================================== # Kuryr uses LBaaS to provide the Kubernetes services # functionality. # In case Octavia is used for LBaaS, you can choose the # Octavia's Load Balancer provider. # KURYR_EP_DRIVER_OCTAVIA_PROVIDER=default # Uncomment the next lines to enable ovn provider. Note only one mode is # supported on ovn-octavia. As the member subnet must be added when adding # members, it must be set to L2 mode # KURYR_EP_DRIVER_OCTAVIA_PROVIDER=ovn # KURYR_K8S_OCTAVIA_MEMBER_MODE=L2 # KURYR_K8S_OCTAVIA_SG_MODE=create # KURYR_ENFORCE_SG_RULES=False # KURYR_LB_ALGORITHM=SOURCE_IP_PORT # Octavia LBaaSv2 LIBS_FROM_GIT+=python-octaviaclient enable_plugin octavia https://opendev.org/openstack/octavia enable_service octavia enable_service o-api enable_service o-cw enable_service o-hm enable_service o-hk ## Octavia Deps ### Nova enable_service n-api enable_service n-api-meta enable_service n-cpu enable_service n-cond enable_service n-sch enable_service placement-api enable_service placement-client ### Glance enable_service g-api enable_service g-reg # OVN octavia provider plugin enable_plugin ovn-octavia-provider https://opendev.org/openstack/ovn-octavia-provider # Keystone enable_service key # dependencies enable_service mysql enable_service rabbit # By default use all the services from the kuryr-kubernetes plugin # Docker # ====== # If you already have docker configured, running and with its socket writable # by the stack user, you can omit the following line. enable_plugin devstack-plugin-container https://opendev.org/openstack/devstack-plugin-container # Etcd # ==== # The default is for devstack to run etcd for you. enable_service etcd3 # If you already have an etcd cluster configured and running, you can just # comment out the lines enabling legacy_etcd and etcd3 # then uncomment and set the following line: # KURYR_ETCD_CLIENT_URL="http://etcd_ip:etcd_client_port" # Kubernetes # ========== # # Kubernetes is run from the hyperkube docker image # If you already have a Kubernetes deployment, you can use it instead and omit # enabling the Kubernetes service (except Kubelet, which must be run by # devstack so that it uses our development CNI driver. # # The default is, again, for devstack to run the Kubernetes services: enable_service kubernetes-api enable_service kubernetes-controller-manager enable_service kubernetes-scheduler # We use hyperkube to run the services. You can select the hyperkube image and/ # or version by uncommenting and setting the following ENV vars different # to the following defaults: # KURYR_HYPERKUBE_IMAGE="gcr.io/google_containers/hyperkube-amd64" # KURYR_HYPERKUBE_VERSION="v1.6.2" # # If you have the 8080 port already bound to another service, you will need to # have kubernetes API server bind to another port. In order to do that, # uncomment and set a different port number in: # KURYR_K8S_API_PORT="8080" # # If you want to test with a different range for the Cluster IPs uncomment and # set the following ENV var to a different CIDR # KURYR_K8S_CLUSTER_IP_RANGE="10.0.0.0/24" # # If, however, you are reusing an existing deployment, you should uncomment and # set an ENV var so that the Kubelet devstack runs can find the API server: # KURYR_K8S_API_URL="http (or https, if K8S is SSL/TLS enabled)://k8s_api_ip:k8s_api_port" # # If kubernetes API server is 'https' enabled, set path of the ssl cert files # KURYR_K8S_API_CERT="/etc/kubernetes/certs/kubecfg.crt" # KURYR_K8S_API_KEY="/etc/kubernetes/certs/kubecfg.key" # KURYR_K8S_API_CACERT="/etc/kubernetes/certs/ca.crt" # Kubelet # ======= # # Kubelet should almost invariably be run by devstack enable_service kubelet # You can specify a different location for the hyperkube binary that will be # extracted from the hyperkube container into the Host filesystem: # KURYR_HYPERKUBE_BINARY=/usr/local/bin/hyperkube # # NOTE: KURYR_HYPERKUBE_IMAGE, KURYR_HYPERKUBE_VERSION also affect which # the selected binary for the Kubelet. # Kuryr watcher # ============= # # Just like the Kubelet, you'll want to have the watcher enabled. It is the # part of the codebase that connects to the Kubernetes API server to read the # resource events and convert them to Neutron actions enable_service kuryr-kubernetes # Kuryr Daemon # ============ # # Kuryr can run CNI plugin in daemonized way - i.e. kubelet will run kuryr CNI # driver and the driver will pass requests to Kuryr daemon running on the node, # instead of processing them on its own. This limits the number of Kubernetes # API requests (as only Kuryr Daemon will watch for new pod events) and should # increase scalability in environments that often delete and create pods. # Since Rocky release this is a default deployment configuration. enable_service kuryr-daemon # Containerized Kuryr # =================== # # Kuryr can be installed on Kubernetes as a pair of Deployment # (kuryr-controller) and DaemonSet (kuryr-cni). If you want DevStack to deploy # Kuryr services as pods on Kubernetes uncomment next line. KURYR_K8S_CONTAINERIZED_DEPLOYMENT=True # Kuryr POD VIF Driver # ==================== # # Set up the VIF Driver to be used. The default one is the neutron-vif, but if # a nested deployment is desired, the corresponding driver need to be set, # e.g.: nested-vlan or nested-macvlan # KURYR_POD_VIF_DRIVER=neutron-vif # Kuryr Enabled Handlers # ====================== # # By default, some Kuryr Handlers are set for DevStack installation. This can be # further tweaked in order to enable additional ones such as Network Policy. If # you want to add additional handlers those can be set here: # KURYR_ENABLED_HANDLERS = vif,endpoints,service,kuryrloadbalancer,kuryrport # Kuryr Ports Pools # ================= # # To speed up containers boot time the kuryr ports pool driver can be enabled # by uncommenting the next line, so that neutron port resources are precreated # and ready to be used by the pods when needed # KURYR_USE_PORTS_POOLS=True # # By default the pool driver is noop, i.e., there is no pool. If pool # optimizations want to be used you need to set it to 'neutron' for the # baremetal case, or to 'nested' for the nested case # KURYR_VIF_POOL_DRIVER=noop # # There are extra configuration options for the pools that can be set to decide # on the minimum number of ports that should be ready to use at each pool, the # maximum (0 to unset), and the batch size for the repopulation actions, i.e., # the number of neutron ports to create in bulk operations. Finally, the update # frequency between actions over the pool can be set too # KURYR_VIF_POOL_MIN=2 # KURYR_VIF_POOL_MAX=0 # KURYR_VIF_POOL_BATCH=5 # KURYR_VIF_POOL_UPDATE_FREQ=30 # Kuryr VIF Pool Manager # ====================== # # Uncomment the next line to enable the pool manager. Note it requires the # nested-vlan pod vif driver, as well as the ports pool being enabled and # configured with the nested driver # KURYR_VIF_POOL_MANAGER=True # Increase Octavia amphorae timeout so that the first LB amphora has time to # build and boot IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img" [[post-config|$OCTAVIA_CONF]] [controller_worker] amp_active_retries=9999 [api_settings] enabled_provider_drivers = amphora:'Octavia Amphora driver',ovn:'Octavia OVN driver'