Files
openstack-helm/roles/deploy-env/defaults/main.yaml
Vladimir Kozhukalov 27b59c6df6 [deploy-env] Setup vxlan overlay before k8s deployment
Sometimes multinode nodesets consist of nodes
with private IPs from different L2 segments. So they can
not communicate with each other using their IPs
assigned to their gateway interfaces.

This PR sets up VXLAN overlay using nodes inventory public
IPs (those IPs used by zuul to get access to nodes).
So nodes are convinced they are in the same L2 segment.

We can use this overlay for internal K8s communication.
For this we have to disable overlay setup in a CNI implementation
(Calico, Cilium, etc.) and use the overlay interface for internal
Openstack communication.

Also:
- Upgrade Kubeadm config API version to kubeadm.k8s.io/v1beta4
- Ignore single cpu kubeadm errors. By default Kubeadm requires
  at least 2 cpus on nodes.

Change-Id: I34663de277951135c44607f093f8f1f93eafab86
Signed-off-by: Vladimir Kozhukalov <kozhukalov@gmail.com>
2025-07-09 09:58:03 -05:00

90 lines
2.8 KiB
YAML

# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
kube_version_repo: "v1.32"
# the list of k8s package versions are available here
# https://pkgs.k8s.io/core:/stable:/{{ kube_version_repo }}/deb/Packages
kube_version: "1.32.5-1.1"
helm_version: "v3.18.1"
crictl_version: "v1.33.0"
calico_setup: true
calico_version: "v3.30.1"
calico_manifest_url: "https://raw.githubusercontent.com/projectcalico/calico/{{ calico_version }}/manifests/calico.yaml"
cilium_setup: false
cilium_version: "1.17.4"
flannel_setup: false
flannel_version: v0.26.7
ingress_setup: false
ingress_nginx_version: "4.12.2"
ingress_openstack_setup: true
ingress_ceph_setup: true
ingress_osh_infra_setup: false
kubectl:
user: zuul
group: zuul
osh_plugin_repo: "https://opendev.org/openstack/openstack-helm-plugin.git"
kubeadm:
pod_network_cidr: "10.244.0.0/16"
service_cidr: "10.96.0.0/16"
docker:
root_path: /var/lib/docker
docker_users:
- zuul
containerd:
root_path: /var/lib/containerd
loopback_setup: false
loopback_device: /dev/loop100
loopback_image: /var/lib/openstack-helm/ceph-loop.img
loopback_image_size: 12G
coredns_resolver_setup: false
metallb_setup: true
metallb_version: "0.14.9"
metallb_pool_cidr: "172.24.128.0/24"
metallb_openstack_endpoint_cidr: "172.24.128.100/24"
client_cluster_ssh_setup: true
client_ssh_user: zuul
cluster_ssh_user: zuul
openstack_provider_gateway_setup: false
openstack_provider_network_cidr: "172.24.4.0/24"
openstack_provider_gateway_cidr: "172.24.4.1/24"
tunnel_network_cidr: "172.24.5.0/24"
tunnel_client_cidr: "172.24.5.2/24"
tunnel_cluster_cidr: "172.24.5.1/24"
dnsmasq_image: "quay.io/airshipit/neutron:2024.2-ubuntu_jammy"
nginx_image: "quay.io/airshipit/nginx:alpine3.18"
overlay_network_setup: true
overlay_network_prefix: "10.248.0."
overlay_network_vxlan_iface: vxlan42
overlay_network_vxlan_id: 42
# NOTE: This is to avoid conflicts with the vxlan overlay managed by Openstack
# which uses 4789 by default. Some alternative implementations used to
# leverage 8472, so let's use it.
overlay_network_vxlan_port: 8472
overlay_network_bridge_name: brvxlan
overlay_network_bridge_ip: "{{ overlay_network_prefix }}{{ (groups['all'] | sort).index(inventory_hostname) + 1 }}"
overlay_network_underlay_dev: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['interface'] }}"
...