8aaa92c6df
Change-Id: I037eb8122b768ac986b3d9b66212bb03a680f529
1642 lines
58 KiB
Bash
1642 lines
58 KiB
Bash
#!/bin/bash
|
|
#
|
|
# lib/kuryr
|
|
# Utilities for kuryr-kubernetes devstack
|
|
# bind_for_kubelet
|
|
# Description: Creates an OVS internal port so that baremetal kubelet will be
|
|
# able to make both liveness and readiness http/tcp probes.
|
|
# Params:
|
|
# project - Id or name of the project used for kuryr devstack
|
|
# port - Port to open for K8s API, relevant only for OpenStack infra
|
|
|
|
# Dependencies:
|
|
# (none)
|
|
|
|
KURYR_CONF_NEUTRON=$(trueorfalse True KURYR_CONFIGURE_NEUTRON_DEFAULTS)
|
|
KURYR_IPV6=$(trueorfalse False KURYR_IPV6)
|
|
KURYR_DUAL_STACK=$(trueorfalse False KURYR_DUAL_STACK)
|
|
KURYR_USE_LC=$(trueorfalse False KURYR_CONTAINERS_USE_LOWER_CONSTRAINTS)
|
|
|
|
|
|
function container_runtime {
|
|
# Ignore error at killing/removing a container doesn't running to avoid
|
|
# unstack is terminated.
|
|
# TODO: Support for CRI-O if it's required.
|
|
local regex_cmds_ignore="(kill|rm)\s+"
|
|
|
|
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
|
|
sudo podman "$@" || die $LINENO "Error when running podman command"
|
|
else
|
|
if [[ $@ =~ $regex_cmds_ignore ]]; then
|
|
docker "$@"
|
|
else
|
|
docker "$@" || die $LINENO "Error when running docker command"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
function ovs_bind_for_kubelet {
|
|
local port_id
|
|
local port_mac
|
|
local fixed_ips
|
|
local port_ips
|
|
local port_subnets
|
|
local prefix
|
|
local project_id
|
|
local port_number
|
|
local security_group
|
|
local ifname
|
|
local service_subnet_cidr
|
|
local pod_subnet_gw
|
|
local cidrs
|
|
local _sp_id=${KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID}
|
|
|
|
project_id="$1"
|
|
port_number="$2"
|
|
security_group=$(openstack security group list \
|
|
--project "$project_id" -c ID -c Name -f value | \
|
|
awk '{if ($2=="default") print $1}')
|
|
port_id=$(openstack port create \
|
|
--device-owner compute:kuryr \
|
|
--project "$project_id" \
|
|
--security-group "$security_group" \
|
|
--security-group service_pod_access \
|
|
--host "${HOSTNAME}" \
|
|
--network "${KURYR_NEUTRON_DEFAULT_POD_NET}" \
|
|
-f value -c id \
|
|
kubelet-"${HOSTNAME}")
|
|
|
|
ifname="kubelet${port_id}"
|
|
ifname="${ifname:0:14}"
|
|
port_mac=$(openstack port show "$port_id" -c mac_address -f value)
|
|
fixed_ips=$(openstack port show "$port_id" -f value -c fixed_ips)
|
|
port_ips=($(python3 -c "print(' '.join([x['ip_address'] for x in ${fixed_ips}]))"))
|
|
port_subnets=($(python3 -c "print(' '.join([x['subnet_id'] for x in ${fixed_ips}]))"))
|
|
|
|
sudo ovs-vsctl -- --may-exist add-port $OVS_BRIDGE "$ifname" \
|
|
-- set Interface "$ifname" type=internal \
|
|
-- set Interface "$ifname" external-ids:iface-status=active \
|
|
-- set Interface "$ifname" external-ids:attached-mac="$port_mac" \
|
|
-- set Interface "$ifname" external-ids:iface-id="$port_id"
|
|
|
|
sudo ip link set dev "$ifname" address "$port_mac"
|
|
sudo ip link set dev "$ifname" up
|
|
for i in "${!port_ips[@]}"; do
|
|
prefix=$(openstack subnet show "${port_subnets[$i]}" \
|
|
-c cidr -f value | \
|
|
cut -f2 -d/)
|
|
sudo ip addr add "${port_ips[$i]}/${prefix}" dev "$ifname"
|
|
done
|
|
|
|
# TODO(dulek): This hack is for compatibility with multinode job, we might
|
|
# want to do it better one day and actually support dual stack
|
|
# and NP here.
|
|
if [[ -z ${KURYR_SERVICE_SUBNETS_IDS} ]]; then
|
|
KURYR_SERVICE_SUBNETS_IDS=(${KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET}-IPv4)
|
|
KURYR_POD_SUBNETS_IDS=(${KURYR_NEUTRON_DEFAULT_POD_SUBNET}-IPv4)
|
|
fi
|
|
|
|
if [[ -z ${KURYR_SUBNETPOOLS_IDS} ]]; then
|
|
# NOTE(gryf): In case we are missing KURYR_SUBNETPOOLS_IDS variable
|
|
# populated, which probably means, that kuryr-kubernetes service is
|
|
# not enabled, but if kuryr-daemon service is enabled (which is the
|
|
# case for multi node setup, where worker nodes should have it
|
|
# enabled), we need to have it filled.
|
|
export KURYR_SUBNETPOOLS_IDS=()
|
|
export KURYR_ETHERTYPES=()
|
|
if [[ "$KURYR_IPV6" == "False" ]]; then
|
|
export KURYR_ETHERTYPE=IPv4
|
|
KURYR_ETHERTYPES+=("IPv4")
|
|
KURYR_SUBNETPOOLS_IDS+=(${_sp_id:-${SUBNETPOOL_V4_ID}})
|
|
else
|
|
KURYR_ETHERTYPES+=("IPv6")
|
|
KURYR_SUBNETPOOLS_IDS+=($(openstack \
|
|
--os-cloud devstack-admin \
|
|
--os-region "${REGION_NAME}" \
|
|
subnet pool show ${SUBNETPOOL_KURYR_NAME_V6} -c id -f value))
|
|
fi
|
|
fi
|
|
|
|
for i in "${!KURYR_SERVICE_SUBNETS_IDS[@]}"; do
|
|
pod_subnet_gw=$(openstack subnet show "${KURYR_POD_SUBNETS_IDS[$i]}" \
|
|
-c gateway_ip -f value)
|
|
if is_service_enabled kuryr-kubernetes && [[ "$KURYR_SUBNET_DRIVER" == "namespace" ]]; then
|
|
cidrs=$(openstack subnet pool show "${KURYR_SUBNETPOOLS_IDS[$i]}" -c prefixes -f value)
|
|
subnetpool_cidr=$(python3 -c "print(${cidrs}[0])")
|
|
sudo ip route add "$subnetpool_cidr" via "$pod_subnet_gw" dev "$ifname"
|
|
else
|
|
service_subnet_cidr=$(openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" \
|
|
subnet show "${KURYR_SERVICE_SUBNETS_IDS[$i]}" \
|
|
-c cidr -f value)
|
|
sudo ip route add "$service_subnet_cidr" via "$pod_subnet_gw" dev "$ifname"
|
|
fi
|
|
done
|
|
|
|
if [ -n "$port_number" ]; then
|
|
# if openstack-INPUT chain doesn't exist we create it in INPUT (for
|
|
# local development envs since openstack-INPUT is usually only in gates)
|
|
if [[ "$KURYR_IPV6" == "False" || "$KURYR_DUAL_STACK" == "True" ]]; then
|
|
sudo iptables -I openstack-INPUT 1 \
|
|
-p tcp -s 0.0.0.0/0 -d 0.0.0.0/0 --dport $port_number -j ACCEPT || \
|
|
sudo iptables -I INPUT 1 \
|
|
-p tcp -m conntrack --ctstate NEW \
|
|
-m tcp --dport "$port_number" \
|
|
-m comment --comment "kuryr-devstack: Access to OpenShift API" -j ACCEPT
|
|
fi
|
|
if [[ "$KURYR_IPV6" == "True" || "$KURYR_DUAL_STACK" == "True" ]]; then
|
|
sudo ip6tables -I openstack-INPUT 1 \
|
|
-p tcp -s ::/0 -d ::/0 --dport $port_number -j ACCEPT || \
|
|
sudo ip6tables -I INPUT 1 \
|
|
-p tcp -m conntrack --ctstate NEW \
|
|
-m tcp --dport "$port_number" \
|
|
-m comment --comment "kuryr-devstack: Access to OpenShift API" -j ACCEPT
|
|
fi
|
|
fi
|
|
}
|
|
|
|
# _allocation_range
|
|
# Description: Writes out tab separated usable ip range for a CIDR
|
|
# Params:
|
|
# cidr - The cidr to get the range for
|
|
# gateway_position - Whether to reserve at 'beginning' or at 'end'
|
|
function _allocation_range {
|
|
python3 - <<EOF "$@"
|
|
import sys
|
|
|
|
from netaddr import IPNetwork
|
|
|
|
|
|
n = IPNetwork(str(sys.argv[1]))
|
|
gateway_position = sys.argv[2]
|
|
|
|
if gateway_position == 'beginning':
|
|
beg_offset = 2
|
|
end_offset = 2
|
|
elif gateway_position == 'end':
|
|
beg_offset = 1
|
|
end_offset = 3
|
|
else:
|
|
raise ValueError('Disallowed gateway position %s' % gateway_position)
|
|
|
|
print("%s\\t%s" % (n[beg_offset], n[-end_offset]))
|
|
EOF
|
|
}
|
|
|
|
# create_k8s_icmp_sg_rules
|
|
# Description: Creates icmp sg rules for Kuryr-Kubernetes pods
|
|
# Params:
|
|
# sg_id - Kuryr's security group id
|
|
# direction - egress or ingress direction
|
|
function create_k8s_icmp_sg_rules {
|
|
local sg_id=$1
|
|
local direction="$2"
|
|
local project_id
|
|
|
|
project_id=$(get_or_create_project \
|
|
"$KURYR_NEUTRON_DEFAULT_PROJECT" default)
|
|
for ethertype in "${KURYR_ETHERTYPES[@]}"; do
|
|
icmp_sg_rules=$(openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" \
|
|
security group rule create \
|
|
--project "$project_id" \
|
|
--protocol icmp \
|
|
--ethertype "$ethertype" \
|
|
--"$direction" "$sg_id")
|
|
done
|
|
die_if_not_set $LINENO icmp_sg_rules \
|
|
"Failure creating icmp sg ${direction} rule for ${sg_id}"
|
|
}
|
|
|
|
# create_k8s_subnet
|
|
# Description: Creates a network and subnet for Kuryr-Kubernetes usage
|
|
# Params:
|
|
# project_id - Kuryr's project uuid
|
|
# net_id - ID of the network where to create subnet in
|
|
# subnet_name - Name of the subnet to create
|
|
# subnetpool_id - uuid of the subnet pool to use
|
|
# router - name of the router to plug the subnet to
|
|
# split_allocation - Whether to allocate on all the subnet or only the
|
|
# latter half
|
|
# ip_version - IPv4 or IPv6
|
|
function create_k8s_subnet {
|
|
local project_id=$1
|
|
local net_id="$2"
|
|
local subnet_name="$3"
|
|
local subnetpool_id="$4"
|
|
local router="$5"
|
|
local subnet_params="--project $project_id "
|
|
local subnet_cidr
|
|
local split_allocation
|
|
|
|
split_allocation="${6:-False}"
|
|
local ip_version="${7:-IPv4}"
|
|
|
|
if [ "$ip_version" == "IPv4" ]; then
|
|
subnet_params+="--ip-version 4 "
|
|
else
|
|
# NOTE(dulek): K8s API won't accept subnets bigger than 20 bits.
|
|
# And 20 will totally be fine for us.
|
|
subnet_params+="--ip-version 6 --prefix-length 108 "
|
|
fi
|
|
subnet_params+="--no-dhcp --gateway none "
|
|
subnet_params+="--subnet-pool $subnetpool_id "
|
|
subnet_params+="--network $net_id $subnet_name"
|
|
|
|
local subnet_id
|
|
subnet_id=$(openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" \
|
|
subnet create $subnet_params \
|
|
--project "$project_id" \
|
|
-c id -f value)
|
|
die_if_not_set $LINENO subnet_id \
|
|
"Failure creating K8s ${subnet_name} IPv4 subnet for ${project_id}"
|
|
|
|
subnet_cidr=$(openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" \
|
|
subnet show "$subnet_id" \
|
|
-c cidr -f value)
|
|
die_if_not_set $LINENO subnet_cidr \
|
|
"Failure getting K8s ${subnet_name} IPv4 subnet for $project_id"
|
|
|
|
# Since K8s has its own IPAM for services and allocates the first IP from
|
|
# service subnet CIDR to Kubernetes apiserver, we'll always put the router
|
|
# interface at the end of the range.
|
|
local router_ip
|
|
local allocation_start
|
|
local allocation_end
|
|
local allocation_subnet
|
|
router_ip=$(_cidr_range "$subnet_cidr" | cut -f3)
|
|
if [[ "$split_allocation" == "True" ]]; then
|
|
allocation_subnet=$(split_subnet "$subnet_cidr" | cut -f2)
|
|
allocation_start=$(_allocation_range "$allocation_subnet" end | cut -f1)
|
|
allocation_end=$(_allocation_range "$allocation_subnet" end | cut -f2)
|
|
else
|
|
allocation_start=$(_allocation_range "$subnet_cidr" end | cut -f1)
|
|
allocation_end=$(_allocation_range "$subnet_cidr" end | cut -f2)
|
|
fi
|
|
die_if_not_set $LINENO router_ip \
|
|
"Failed to determine K8s ${subnet_name} subnet router IP"
|
|
openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" subnet set \
|
|
--gateway "$router_ip" --no-allocation-pool "$subnet_id" \
|
|
|| die $LINENO "Failed to update K8s ${subnet_name} subnet"
|
|
# Set a new allocation pool for the subnet so ports can be created again
|
|
openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" subnet set \
|
|
--allocation-pool "start=${allocation_start},end=${allocation_end}" \
|
|
"$subnet_id" || die $LINENO "Failed to update K8s ${subnet_name} subnet"
|
|
openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" \
|
|
router add subnet "$router" "$subnet_id" \
|
|
|| die $LINENO \
|
|
"Failed to enable routing for K8s ${subnet_name} subnet"
|
|
echo "$subnet_id"
|
|
}
|
|
|
|
# build_kuryr_container_image
|
|
# Description: Generates a Kuryr controller or Kuryr CNI docker image in
|
|
# the local docker registry as kuryr/controller:latest for controller or
|
|
# kuryr/cni:latest for CNI.
|
|
function build_kuryr_container_image {
|
|
local target=$1 # controller or cni
|
|
local build_args
|
|
local build_dir
|
|
local tag="kuryr/${target}"
|
|
|
|
build_dir="${DEST}/kuryr-kubernetes"
|
|
pushd "$build_dir"
|
|
|
|
if [[ "$KURYR_USE_LC" == "True" ]]; then
|
|
build_args="--build-arg UPPER_CONSTRAINTS_FILE="`
|
|
`"/opt/kuryr-kubernetes/lower-constraints.txt"
|
|
fi
|
|
|
|
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
|
|
# NOTE(gryf): for crio/podman we need to have it tagged with docker.io
|
|
# (or whatever registry), or we would need to setup one, otherwise the
|
|
# default tag would be 'localhost/kuryr/*' instead of 'kuryr/*' as in
|
|
# docker case (which by default becomes 'docker.io/kuryr/*' if no
|
|
# registry has been specified). Creating registry for just two images
|
|
# is a little bit of overkill, hence the trick with docker.io tag, and
|
|
# image pull policy set to "Never" on deployment definition, so that
|
|
# we assure for taking images that we built.
|
|
tag="docker.io/${tag}"
|
|
fi
|
|
container_runtime build -t "${tag}" -f "${target}.Dockerfile" \
|
|
${build_args} .
|
|
popd
|
|
}
|
|
|
|
function indent {
|
|
sed 's/^/ /';
|
|
}
|
|
|
|
function generate_kuryr_configmap {
|
|
local output_dir
|
|
local conf_path
|
|
output_dir=$1
|
|
conf_path=${2:-""}
|
|
|
|
mkdir -p "$output_dir"
|
|
rm -f "${output_dir}/config_map.yml"
|
|
|
|
cat >> "${output_dir}/config_map.yml" << EOF
|
|
apiVersion: v1
|
|
kind: ConfigMap
|
|
metadata:
|
|
name: kuryr-config
|
|
namespace: kube-system
|
|
data:
|
|
kuryr.conf: |
|
|
EOF
|
|
|
|
indent < "${conf_path}" >> "${output_dir}/config_map.yml"
|
|
}
|
|
|
|
function generate_kuryr_certificates_secret {
|
|
local output_dir
|
|
local certs_bundle_path
|
|
output_dir=$1
|
|
certs_bundle_path=${2:-""}
|
|
|
|
mkdir -p "$output_dir"
|
|
rm -f "${output_dir}/certificates_secret.yml"
|
|
|
|
CA_CERT=\"\" # It's a "" string that will be inserted into yaml file.
|
|
|
|
if [ "$certs_bundle_path" -a -f "$certs_bundle_path" ]; then
|
|
CA_CERT=$(base64 -w0 < "$certs_bundle_path")
|
|
fi
|
|
|
|
cat >> "${output_dir}/certificates_secret.yml" << EOF
|
|
apiVersion: v1
|
|
kind: Secret
|
|
metadata:
|
|
name: kuryr-certificates
|
|
namespace: kube-system
|
|
type: Opaque
|
|
data:
|
|
kuryr-ca-bundle.crt: $CA_CERT
|
|
EOF
|
|
}
|
|
|
|
# Generates kuryr-controller service account and kuryr-cni service account.
|
|
function generate_kuryr_service_account {
|
|
output_dir=$1
|
|
mkdir -p "$output_dir"
|
|
rm -f "${output_dir}/controller_service_account.yml"
|
|
rm -f "${output_dir}/cni_service_account.yml"
|
|
cat >> "${output_dir}/controller_service_account.yml" << EOF
|
|
---
|
|
apiVersion: v1
|
|
kind: ServiceAccount
|
|
metadata:
|
|
name: kuryr-controller
|
|
namespace: kube-system
|
|
---
|
|
kind: ClusterRole
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
metadata:
|
|
name: kuryr-controller
|
|
rules:
|
|
- apiGroups:
|
|
- ""
|
|
verbs: ["*"]
|
|
resources:
|
|
- endpoints
|
|
- pods
|
|
- services
|
|
- services/status
|
|
- namespaces
|
|
- apiGroups:
|
|
- ""
|
|
verbs: ["get", "list", "watch"]
|
|
resources:
|
|
- nodes
|
|
- apiGroups:
|
|
- openstack.org
|
|
verbs: ["*"]
|
|
resources:
|
|
- kuryrnetworks
|
|
- kuryrnetworkpolicies
|
|
- kuryrloadbalancers
|
|
- kuryrports
|
|
- apiGroups: ["networking.k8s.io"]
|
|
resources:
|
|
- networkpolicies
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- update
|
|
- patch
|
|
- apiGroups: ["k8s.cni.cncf.io"]
|
|
resources:
|
|
- network-attachment-definitions
|
|
verbs:
|
|
- get
|
|
- apiGroups: ["", "events.k8s.io"]
|
|
resources:
|
|
- events
|
|
verbs:
|
|
- create
|
|
---
|
|
kind: ClusterRoleBinding
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
metadata:
|
|
name: kuryr-controller-global
|
|
subjects:
|
|
- kind: ServiceAccount
|
|
name: kuryr-controller
|
|
namespace: kube-system
|
|
roleRef:
|
|
kind: ClusterRole
|
|
name: kuryr-controller
|
|
apiGroup: rbac.authorization.k8s.io
|
|
EOF
|
|
|
|
cat >> "${output_dir}/cni_service_account.yml" << EOF
|
|
---
|
|
apiVersion: v1
|
|
kind: ServiceAccount
|
|
metadata:
|
|
name: kuryr-cni
|
|
namespace: kube-system
|
|
---
|
|
kind: ClusterRole
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
metadata:
|
|
name: kuryr-cni
|
|
rules:
|
|
- apiGroups:
|
|
- ""
|
|
verbs: ["*"]
|
|
resources:
|
|
- pods
|
|
- apiGroups:
|
|
- openstack.org
|
|
verbs: ["*"]
|
|
resources:
|
|
- kuryrports
|
|
- apiGroups: ["", "events.k8s.io"]
|
|
resources:
|
|
- events
|
|
verbs:
|
|
- create
|
|
---
|
|
kind: ClusterRoleBinding
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
metadata:
|
|
name: kuryr-cni-global
|
|
subjects:
|
|
- kind: ServiceAccount
|
|
name: kuryr-cni
|
|
namespace: kube-system
|
|
roleRef:
|
|
kind: ClusterRole
|
|
name: kuryr-cni
|
|
apiGroup: rbac.authorization.k8s.io
|
|
EOF
|
|
}
|
|
|
|
function generate_controller_deployment {
|
|
output_dir=$1
|
|
health_server_port=$2
|
|
controller_ha=$3
|
|
mkdir -p "$output_dir"
|
|
rm -f "${output_dir}/controller_deployment.yml"
|
|
cat >> "${output_dir}/controller_deployment.yml" << EOF
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
metadata:
|
|
labels:
|
|
name: kuryr-controller
|
|
name: kuryr-controller
|
|
namespace: kube-system
|
|
spec:
|
|
replicas: ${KURYR_CONTROLLER_REPLICAS:-1}
|
|
selector:
|
|
matchLabels:
|
|
name: kuryr-controller
|
|
EOF
|
|
|
|
# When running without HA we should make sure that we won't have more than
|
|
# one kuryr-controller pod in the deployment.
|
|
if [ "$controller_ha" == "False" ]; then
|
|
cat >> "${output_dir}/controller_deployment.yml" << EOF
|
|
strategy:
|
|
type: RollingUpdate
|
|
rollingUpdate:
|
|
maxSurge: 0
|
|
maxUnavailable: 1
|
|
EOF
|
|
fi
|
|
|
|
cat >> "${output_dir}/controller_deployment.yml" << EOF
|
|
template:
|
|
metadata:
|
|
labels:
|
|
name: kuryr-controller
|
|
name: kuryr-controller
|
|
spec:
|
|
serviceAccountName: kuryr-controller
|
|
automountServiceAccountToken: true
|
|
hostNetwork: true
|
|
containers:
|
|
EOF
|
|
|
|
if [ "$controller_ha" == "True" ]; then
|
|
cat >> "${output_dir}/controller_deployment.yml" << EOF
|
|
- image: gcr.io/google_containers/leader-elector:0.5
|
|
name: leader-elector
|
|
args:
|
|
- "--election=kuryr-controller"
|
|
- "--http=0.0.0.0:${KURYR_CONTROLLER_HA_PORT:-16401}"
|
|
- "--election-namespace=kube-system"
|
|
- "--ttl=5s"
|
|
ports:
|
|
- containerPort: ${KURYR_CONTROLLER_HA_PORT:-16401}
|
|
protocol: TCP
|
|
EOF
|
|
fi
|
|
|
|
cat >> "${output_dir}/controller_deployment.yml" << EOF
|
|
- image: kuryr/controller:latest
|
|
imagePullPolicy: Never
|
|
name: controller
|
|
terminationMessagePath: "/dev/termination-log"
|
|
volumeMounts:
|
|
- name: config-volume
|
|
mountPath: "/etc/kuryr"
|
|
- name: certificates-volume
|
|
mountPath: "/etc/ssl/certs"
|
|
readOnly: true
|
|
readinessProbe:
|
|
httpGet:
|
|
path: /ready
|
|
port: ${health_server_port}
|
|
scheme: HTTP
|
|
timeoutSeconds: 5
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /alive
|
|
port: ${health_server_port}
|
|
initialDelaySeconds: 15
|
|
EOF
|
|
|
|
cat >> "${output_dir}/controller_deployment.yml" << EOF
|
|
volumes:
|
|
- name: config-volume
|
|
configMap:
|
|
name: kuryr-config
|
|
- name: certificates-volume
|
|
secret:
|
|
secretName: kuryr-certificates
|
|
restartPolicy: Always
|
|
tolerations:
|
|
- key: "node-role.kubernetes.io/master"
|
|
operator: "Exists"
|
|
effect: "NoSchedule"
|
|
- key: "node.kubernetes.io/not-ready"
|
|
operator: "Exists"
|
|
effect: "NoSchedule"
|
|
EOF
|
|
}
|
|
|
|
function generate_cni_daemon_set {
|
|
output_dir=$1
|
|
cni_health_server_port=$2
|
|
local var_run=${VAR_RUN_PATH:-/var/run}
|
|
mkdir -p "$output_dir"
|
|
rm -f "${output_dir}/cni_ds.yml"
|
|
cat >> "${output_dir}/cni_ds.yml" << EOF
|
|
apiVersion: apps/v1
|
|
kind: DaemonSet
|
|
metadata:
|
|
name: kuryr-cni-ds
|
|
namespace: kube-system
|
|
labels:
|
|
tier: node
|
|
app: kuryr-cni
|
|
spec:
|
|
selector:
|
|
matchLabels:
|
|
app: kuryr-cni
|
|
template:
|
|
metadata:
|
|
labels:
|
|
tier: node
|
|
app: kuryr-cni
|
|
spec:
|
|
hostNetwork: true
|
|
tolerations:
|
|
- key: node-role.kubernetes.io/master
|
|
operator: Exists
|
|
effect: NoSchedule
|
|
- key: "node.kubernetes.io/not-ready"
|
|
operator: "Exists"
|
|
effect: "NoSchedule"
|
|
serviceAccountName: kuryr-cni
|
|
containers:
|
|
- name: kuryr-cni
|
|
image: kuryr/cni:latest
|
|
imagePullPolicy: Never
|
|
command: [ "cni_ds_init" ]
|
|
env:
|
|
- name: KUBERNETES_NODE_NAME
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: spec.nodeName
|
|
- name: KURYR_CNI_POD_NAME
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: metadata.name
|
|
securityContext:
|
|
privileged: true
|
|
volumeMounts:
|
|
- name: bin
|
|
mountPath: /opt/cni/bin
|
|
- name: net-conf
|
|
mountPath: /etc/cni/net.d
|
|
- name: config-volume
|
|
mountPath: /etc/kuryr
|
|
EOF
|
|
if [ "$CONTAINER_ENGINE" != "crio" ]; then
|
|
cat >> "${output_dir}/cni_ds.yml" << EOF
|
|
- name: proc
|
|
mountPath: /host_proc
|
|
EOF
|
|
|
|
fi
|
|
cat >> "${output_dir}/cni_ds.yml" << EOF
|
|
- name: var-pci
|
|
mountPath: /var/pci_address
|
|
- name: var-run
|
|
mountPath: /var/run
|
|
mountPropagation: HostToContainer
|
|
EOF
|
|
# NOTE(gryf): assuming the --namespaces-dir parameter would not be used,
|
|
# otherwise /var/run/$crio_netns_path is all wrong
|
|
if [ "$CONTAINER_ENGINE" = "crio" ] && \
|
|
[ "${VAR_RUN_PATH}" != "/var/run" ]; then
|
|
cat >> "${output_dir}/cni_ds.yml" << EOF
|
|
- name: netns
|
|
mountPath: /var/run/netns
|
|
mountPropagation: HostToContainer
|
|
EOF
|
|
fi
|
|
cat >> "${output_dir}/cni_ds.yml" << EOF
|
|
readinessProbe:
|
|
httpGet:
|
|
path: /ready
|
|
port: ${cni_health_server_port}
|
|
scheme: HTTP
|
|
initialDelaySeconds: 60
|
|
timeoutSeconds: 10
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /alive
|
|
port: ${cni_health_server_port}
|
|
initialDelaySeconds: 60
|
|
volumes:
|
|
- name: bin
|
|
hostPath:
|
|
path: ${CNI_PLUGIN_DIR}
|
|
- name: net-conf
|
|
hostPath:
|
|
path: ${CNI_CONF_DIR}
|
|
- name: config-volume
|
|
configMap:
|
|
name: kuryr-config
|
|
- name: var-run
|
|
hostPath:
|
|
path: ${var_run}
|
|
EOF
|
|
if [[ "$CONTAINER_ENGINE" != "crio" ]]; then
|
|
cat >> "${output_dir}/cni_ds.yml" << EOF
|
|
- name: proc
|
|
hostPath:
|
|
path: /proc
|
|
EOF
|
|
fi
|
|
cat >> "${output_dir}/cni_ds.yml" << EOF
|
|
- name: var-pci
|
|
hostPath:
|
|
path: /var/pci_address
|
|
EOF
|
|
if [ "${CONTAINER_ENGINE}" = "crio" ] && \
|
|
[ "${VAR_RUN_PATH}" != "/var/run" ]; then
|
|
cat >> "${output_dir}/cni_ds.yml" << EOF
|
|
- name: netns
|
|
hostPath:
|
|
path: /var/run/netns
|
|
EOF
|
|
fi
|
|
}
|
|
|
|
# lb_state
|
|
# Description: Returns the state of the load balancer
|
|
# Params:
|
|
# id - Id or name of the loadbalancer the state of which needs to be
|
|
# retrieved.
|
|
function lb_state {
|
|
local lb_id
|
|
|
|
lb_id="$1"
|
|
openstack loadbalancer show "$lb_id" | \
|
|
awk '/provisioning_status/ {print $4}'
|
|
}
|
|
|
|
function _wait_for_lb {
|
|
local lb_name
|
|
local curr_time
|
|
local time_diff
|
|
local start_time
|
|
|
|
lb_name="$1"
|
|
timeout=${2:-$KURYR_WAIT_TIMEOUT}
|
|
|
|
echo -n "Waiting for LB:$lb_name"
|
|
start_time=$(date +%s)
|
|
|
|
while [[ "$(lb_state "$lb_name")" != "ACTIVE" ]]; do
|
|
echo -n "Waiting till LB=$lb_name is ACTIVE."
|
|
curr_time=$(date +%s)
|
|
time_diff=$((curr_time - start_time))
|
|
[[ $time_diff -le $timeout ]] || die "Timed out waiting for $lb_name"
|
|
sleep 5
|
|
done
|
|
}
|
|
|
|
# create_load_balancer
|
|
# Description: Creates an OpenStack Load Balancer with either neutron LBaaS
|
|
# or Octavia
|
|
# Params:
|
|
# lb_name: Name to give to the load balancer.
|
|
# lb_vip_subnet: Id or name of the subnet where lb_vip should be
|
|
# allocated.
|
|
# project_id: Id of the project where the load balancer should be
|
|
# allocated.
|
|
# lb_vip: Virtual IP to give to the load balancer - optional.
|
|
function create_load_balancer {
|
|
local lb_name
|
|
local lb_vip_subnet
|
|
local lb_params
|
|
local project_id
|
|
|
|
lb_name="$1"
|
|
lb_vip_subnet="$2"
|
|
project_id="$3"
|
|
|
|
lb_params=" --name $lb_name "
|
|
if [ -z "$4" ]; then
|
|
echo -n "create_load_balancer LB=$lb_name, lb_vip not provided."
|
|
else
|
|
lb_params+=" --vip-address $4"
|
|
fi
|
|
|
|
lb_params+=" --project ${project_id} --vip-subnet-id $lb_vip_subnet"
|
|
openstack loadbalancer create $lb_params
|
|
}
|
|
|
|
# create_load_balancer_listener
|
|
# Description: Creates an OpenStack Load Balancer Listener for the specified
|
|
# Load Balancer with either neutron LBaaS or Octavia
|
|
# Params:
|
|
# name: Name to give to the load balancer listener.
|
|
# protocol: Whether it is HTTP, HTTPS, TCP, etc.
|
|
# port: The TCP port number to listen to.
|
|
# data_timeouts: Octavia's timeouts for client and server inactivity.
|
|
# lb: Id or name of the Load Balancer we want to add the Listener to.
|
|
# project_id: Id of the project where this listener belongs to.
|
|
function create_load_balancer_listener {
|
|
local name
|
|
local protocol
|
|
local port
|
|
local lb
|
|
local data_timeouts
|
|
local max_timeout
|
|
local project_id
|
|
|
|
name="$1"
|
|
protocol="$2"
|
|
port="$3"
|
|
lb="$4"
|
|
project_id="$5"
|
|
data_timeouts="$6"
|
|
|
|
max_timeout=1200
|
|
# Octavia needs the LB to be active for the listener
|
|
_wait_for_lb "$lb" "$max_timeout"
|
|
|
|
openstack loadbalancer listener create --name "$name" \
|
|
--protocol "$protocol" \
|
|
--protocol-port "$port" \
|
|
--timeout-client-data "$data_timeouts" \
|
|
--timeout-member-data "$data_timeouts" \
|
|
"$lb"
|
|
}
|
|
|
|
# create_load_balancer_pool
|
|
# Description: Creates an OpenStack Load Balancer Pool for the specified
|
|
# Load Balancer listener with either neutron LBaaS or Octavia
|
|
# Params:
|
|
# name: Name to give to the load balancer listener.
|
|
# protocol: Whether it is HTTP, HTTPS, TCP, etc.
|
|
# algorithm: Load Balancing algorithm to use.
|
|
# listener: Id or name of the Load Balancer Listener we want to add the
|
|
# pool to.
|
|
# project_id: Id of the project where this pool belongs to.
|
|
# lb: Id or name of the Load Balancer we want to add the pool to
|
|
# (optional).
|
|
function create_load_balancer_pool {
|
|
local name
|
|
local protocol
|
|
local algorithm
|
|
local listener
|
|
local lb
|
|
local project_id
|
|
|
|
name="$1"
|
|
protocol="$2"
|
|
algorithm="$3"
|
|
listener="$4"
|
|
project_id="$5"
|
|
lb="$6"
|
|
|
|
# We must wait for the LB to be active before we can put a Pool for it
|
|
_wait_for_lb "$lb"
|
|
|
|
openstack loadbalancer pool create --name "$name" \
|
|
--listener "$listener" \
|
|
--protocol "$protocol" \
|
|
--lb-algorithm "$algorithm"
|
|
}
|
|
|
|
# create_load_balancer_member
|
|
# Description: Creates an OpenStack load balancer pool member
|
|
# Params:
|
|
# name: Name to give to the load balancer pool member.
|
|
# address: Whether it is HTTP, HTTPS, TCP, etc.
|
|
# port: Port number the pool member is listening on.
|
|
# pool: Id or name of the Load Balancer pool this member belongs to.
|
|
# subnet: Id or name of the subnet the member address belongs to.
|
|
# lb: Id or name of the load balancer the member belongs to.
|
|
# project_id: Id of the project where this pool belongs to.
|
|
function create_load_balancer_member {
|
|
local name
|
|
local address
|
|
local port
|
|
local pool
|
|
local lb
|
|
local project_id
|
|
|
|
name="$1"
|
|
address="$2"
|
|
port="$3"
|
|
pool="$4"
|
|
lb="$5"
|
|
project_id="$6"
|
|
|
|
# We must wait for the pool creation update before we can add members
|
|
_wait_for_lb "$lb"
|
|
|
|
openstack loadbalancer member create --name "$name" \
|
|
--address "$address" \
|
|
--protocol-port "$port" \
|
|
"$pool"
|
|
}
|
|
|
|
# split_subnet
|
|
# Description: Splits a subnet in two subnets that constitute its halves
|
|
# Params:
|
|
# cidr: Subnet CIDR to split
|
|
# Returns: tab separated CIDRs of the two halves.
|
|
function split_subnet {
|
|
# precondition: The passed cidr must be of a prefix <= 30
|
|
python3 - <<EOF "$@"
|
|
import sys
|
|
|
|
from netaddr import IPNetwork
|
|
|
|
|
|
n = IPNetwork(str(sys.argv[1]))
|
|
first, last = n.subnet(n.prefixlen+1)
|
|
|
|
print("%s\\t%s" % (first, last))
|
|
EOF
|
|
}
|
|
|
|
# cleanup_kuryr_devstack_iptables
|
|
# Description: Fins all the iptables rules we set and deletes them
|
|
function cleanup_kuryr_devstack_iptables {
|
|
local chains
|
|
|
|
chains=( INPUT FORWARD OUTPUT )
|
|
for chain in "${chains[@]}"; do
|
|
sudo iptables -n -L "$chain" -v --line-numbers | \
|
|
awk -v chain="$chain" \
|
|
'/kuryr-devstack/ {print "sudo iptables -D " chain " " $1}' | \
|
|
tac | bash /dev/stdin
|
|
done
|
|
}
|
|
|
|
function build_install_kuryr_cni {
|
|
pushd "${KURYR_HOME}/kuryr_cni" || exit 1
|
|
hack/build-go.sh
|
|
sudo install -o "$STACK_USER" -m 0555 -D bin/kuryr-cni \
|
|
"${CNI_PLUGIN_DIR}/kuryr-cni"
|
|
popd
|
|
}
|
|
|
|
function create_kuryr_account {
|
|
create_service_user "kuryr" "admin"
|
|
get_or_create_service "kuryr-kubernetes" "kuryr-kubernetes" \
|
|
"Kuryr-Kubernetes Service"
|
|
}
|
|
|
|
|
|
function _create_kuryr_cache_dir {
|
|
# Create cache directory
|
|
sudo install -d -o "$STACK_USER" "$KURYR_AUTH_CACHE_DIR"
|
|
if [[ ! "$KURYR_AUTH_CACHE_DIR" == "" ]]; then
|
|
rm -f "$KURYR_AUTH_CACHE_DIR"/*
|
|
fi
|
|
}
|
|
|
|
function _create_kuryr_lock_dir {
|
|
# Create lock directory
|
|
sudo install -d -o "$STACK_USER" "$KURYR_LOCK_DIR"
|
|
}
|
|
|
|
function configure_kuryr {
|
|
local dir
|
|
|
|
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
|
|
# According of the documentation we need those kernel modules for
|
|
# CRI-O. They might already be loaded by neutron, so don't fail on it.
|
|
# https://kubernetes.io/docs/setup/production-environment/container-runtimes/#cri-o
|
|
sudo modprobe overlay || true
|
|
sudo modprobe br_netfilter || true
|
|
fi
|
|
|
|
sudo install -d -o "$STACK_USER" "$KURYR_CONFIG_DIR"
|
|
"${KURYR_HOME}/tools/generate_config_file_samples.sh"
|
|
sudo install -o "$STACK_USER" -m 640 -D \
|
|
"${KURYR_HOME}/etc/kuryr.conf.sample" "$KURYR_CONFIG"
|
|
|
|
iniset "$KURYR_CONFIG" kubernetes ssl_client_crt_file "$KURYR_K8S_API_CERT"
|
|
iniset "$KURYR_CONFIG" kubernetes ssl_client_key_file "$KURYR_K8S_API_KEY"
|
|
if [ "$KURYR_K8S_API_CACERT" ]; then
|
|
iniset "$KURYR_CONFIG" kubernetes ssl_ca_crt_file "$KURYR_K8S_API_CACERT"
|
|
iniset "$KURYR_CONFIG" kubernetes ssl_verify_server_crt True
|
|
fi
|
|
if [ "$KURYR_MULTI_VIF_DRIVER" ]; then
|
|
iniset "$KURYR_CONFIG" kubernetes multi_vif_drivers "$KURYR_MULTI_VIF_DRIVER"
|
|
fi
|
|
# REVISIT(ivc): 'use_stderr' is required for current CNI driver. Once a
|
|
# daemon-based CNI driver is implemented, this could be removed.
|
|
iniset "$KURYR_CONFIG" DEFAULT use_stderr true
|
|
|
|
iniset "$KURYR_CONFIG" DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
|
|
|
|
iniset "$KURYR_CONFIG" kubernetes port_debug "$KURYR_PORT_DEBUG"
|
|
|
|
iniset "$KURYR_CONFIG" kubernetes pod_subnets_driver "$KURYR_SUBNET_DRIVER"
|
|
iniset "$KURYR_CONFIG" kubernetes pod_security_groups_driver "$KURYR_SG_DRIVER"
|
|
iniset "$KURYR_CONFIG" kubernetes service_security_groups_driver "$KURYR_SG_DRIVER"
|
|
iniset "$KURYR_CONFIG" kubernetes enabled_handlers "$KURYR_ENABLED_HANDLERS"
|
|
|
|
# Let Kuryr retry connections to K8s API for 20 minutes.
|
|
iniset "$KURYR_CONFIG" kubernetes watch_retry_timeout 1200
|
|
|
|
|
|
if [[ "$KURYR_PROJECT_DRIVER" == "annotation" ]]; then
|
|
iniset "$KURYR_CONFIG" kubernetes pod_project_driver annotation
|
|
iniset "$KURYR_CONFIG" kubernetes service_project_driver annotation
|
|
iniset "$KURYR_CONFIG" kubernetes namespace_project_driver annotation
|
|
iniset "$KURYR_CONFIG" kubernetes network_policy_project_driver annotation
|
|
fi
|
|
|
|
if [ "${KURYR_CONT}" == "True" ]; then
|
|
# This works around the issue of being unable to set oslo.privsep mode
|
|
# to FORK in os-vif. When running in a container we disable `sudo` that
|
|
# was prefixed before `privsep-helper` command. This let's us run in
|
|
# envs without sudo and keep the same python environment as the parent
|
|
# process.
|
|
iniset "$KURYR_CONFIG" vif_plug_ovs_privileged helper_command privsep-helper
|
|
iniset "$KURYR_CONFIG" vif_plug_linux_bridge_privileged helper_command privsep-helper
|
|
|
|
if [ "${CONTAINER_ENGINE}" = "docker" ]; then
|
|
# When running kuryr-daemon or CNI in container we need to set up
|
|
# some configs.
|
|
iniset "$KURYR_CONFIG" cni_daemon docker_mode True
|
|
iniset "$KURYR_CONFIG" cni_daemon netns_proc_dir "/host_proc"
|
|
fi
|
|
else
|
|
iniset "$KURYR_CONFIG" oslo_concurrency lock_path "$KURYR_LOCK_DIR"
|
|
_create_kuryr_lock_dir
|
|
iniset "$KURYR_CONFIG" cni_health_server cg_path \
|
|
"/system.slice/system-devstack.slice/devstack@kuryr-daemon.service"
|
|
fi
|
|
|
|
_create_kuryr_cache_dir
|
|
|
|
# Neutron API server & Neutron plugin
|
|
if is_service_enabled kuryr-kubernetes; then
|
|
configure_auth_token_middleware "$KURYR_CONFIG" kuryr \
|
|
"$KURYR_AUTH_CACHE_DIR" neutron
|
|
iniset "$KURYR_CONFIG" kubernetes pod_vif_driver "$KURYR_POD_VIF_DRIVER"
|
|
if [ "$KURYR_USE_PORTS_POOLS" ]; then
|
|
iniset "$KURYR_CONFIG" kubernetes vif_pool_driver "$KURYR_VIF_POOL_DRIVER"
|
|
iniset "$KURYR_CONFIG" vif_pool ports_pool_min "$KURYR_VIF_POOL_MIN"
|
|
iniset "$KURYR_CONFIG" vif_pool ports_pool_max "$KURYR_VIF_POOL_MAX"
|
|
iniset "$KURYR_CONFIG" vif_pool ports_pool_batch "$KURYR_VIF_POOL_BATCH"
|
|
iniset "$KURYR_CONFIG" vif_pool ports_pool_update_frequency "$KURYR_VIF_POOL_UPDATE_FREQ"
|
|
if [ "$KURYR_VIF_POOL_MANAGER" ]; then
|
|
iniset "$KURYR_CONFIG" kubernetes enable_manager "$KURYR_VIF_POOL_MANAGER"
|
|
|
|
dir=`iniget "$KURYR_CONFIG" vif_pool manager_sock_file`
|
|
if [[ -z $dir ]]; then
|
|
dir="/run/kuryr/kuryr_manage.sock"
|
|
fi
|
|
dir=`dirname $dir`
|
|
sudo mkdir -p $dir
|
|
fi
|
|
fi
|
|
fi
|
|
}
|
|
|
|
function copy_kuryr_certs {
|
|
# copy kubelet key and make ubuntu user owner of it
|
|
sudo cp /etc/kubernetes/pki/apiserver-kubelet-client.key \
|
|
/etc/kubernetes/pki/kuryr-client.key
|
|
sudo chown $(whoami) /etc/kubernetes/pki/kuryr-client.key
|
|
}
|
|
|
|
function _generate_containerized_kuryr_resources {
|
|
if [[ $KURYR_CONTROLLER_REPLICAS -eq 1 ]]; then
|
|
KURYR_CONTROLLER_HA="False"
|
|
else
|
|
KURYR_CONTROLLER_HA="True"
|
|
fi
|
|
|
|
# Containerized deployment will use tokens provided by k8s itself.
|
|
inicomment "$KURYR_CONFIG" kubernetes ssl_client_crt_file
|
|
inicomment "$KURYR_CONFIG" kubernetes ssl_client_key_file
|
|
|
|
iniset "$KURYR_CONFIG" kubernetes controller_ha ${KURYR_CONTROLLER_HA}
|
|
iniset "$KURYR_CONFIG" kubernetes controller_ha_port ${KURYR_CONTROLLER_HA_PORT}
|
|
|
|
# NOTE(dulek): In the container the CA bundle will be mounted in a standard
|
|
# directory
|
|
iniset "$KURYR_CONFIG" neutron cafile /etc/ssl/certs/kuryr-ca-bundle.crt
|
|
|
|
# Generate kuryr resources in k8s formats.
|
|
local output_dir="${DATA_DIR}/kuryr-kubernetes"
|
|
generate_kuryr_configmap $output_dir $KURYR_CONFIG
|
|
generate_kuryr_certificates_secret $output_dir $SSL_BUNDLE_FILE
|
|
generate_kuryr_service_account $output_dir
|
|
generate_controller_deployment $output_dir $KURYR_HEALTH_SERVER_PORT $KURYR_CONTROLLER_HA
|
|
generate_cni_daemon_set $output_dir $KURYR_CNI_HEALTH_SERVER_PORT
|
|
}
|
|
|
|
function run_containerized_kuryr_resources {
|
|
local k8s_data_dir="${DATA_DIR}/kuryr-kubernetes"
|
|
kubectl create -f \
|
|
"${k8s_data_dir}/config_map.yml" \
|
|
|| die $LINENO "Failed to create kuryr-kubernetes ConfigMap."
|
|
kubectl create -f \
|
|
"${k8s_data_dir}/certificates_secret.yml" \
|
|
|| die $LINENO "Failed to create kuryr-kubernetes certificates Secret."
|
|
kubectl create -f \
|
|
"${k8s_data_dir}/controller_service_account.yml" \
|
|
|| die $LINENO "Failed to create kuryr-controller ServiceAccount."
|
|
kubectl create -f \
|
|
"${k8s_data_dir}/cni_service_account.yml" \
|
|
|| die $LINENO "Failed to create kuryr-cni ServiceAccount."
|
|
kubectl create -f \
|
|
"${k8s_data_dir}/controller_deployment.yml" \
|
|
|| die $LINENO "Failed to create kuryr-kubernetes Deployment."
|
|
kubectl create -f \
|
|
"${k8s_data_dir}/cni_ds.yml" \
|
|
|| die $LINENO "Failed to create kuryr-kubernetes CNI DaemonSet."
|
|
}
|
|
|
|
function _cidr_range {
|
|
python3 - <<EOF "$1"
|
|
import sys
|
|
from netaddr import IPAddress, IPNetwork
|
|
n = IPNetwork(sys.argv[1])
|
|
print("%s\\t%s\\t%s" % (IPAddress(n.first + 1), IPAddress(n.first + 2), IPAddress(n.last - 1)))
|
|
EOF
|
|
}
|
|
|
|
function copy_tempest_kubeconfig {
|
|
local tempest_home
|
|
tempest_home='/home/tempest'
|
|
if [ -d "$tempest_home" ]; then
|
|
sudo cp -r "${HOME}/.kube" "$tempest_home"
|
|
sudo chown -R tempest "${tempest_home}/.kube"
|
|
fi
|
|
}
|
|
|
|
function create_lb_for_services {
|
|
# This allows pods that need access to kubernetes API (like the
|
|
# containerized kuryr controller or kube-dns) to talk to the K8s API
|
|
# service
|
|
local api_port=6443
|
|
local service_cidr
|
|
local kubelet_iface_ip
|
|
local lb_name
|
|
local use_octavia
|
|
local project_id
|
|
local fixed_ips
|
|
local address
|
|
|
|
project_id=$(get_or_create_project \
|
|
"$KURYR_NEUTRON_DEFAULT_PROJECT" default)
|
|
lb_name='default/kubernetes'
|
|
# TODO(dulek): We only look at the first service subnet because kubernetes
|
|
# API service is only IPv4 in 1.20. It might be dual stack
|
|
# in the future.
|
|
service_cidr=$(openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" \
|
|
subnet show "${KURYR_SERVICE_SUBNETS_IDS[0]}" \
|
|
-c cidr -f value)
|
|
|
|
fixed_ips=$(openstack port show kubelet-"${HOSTNAME}" -c fixed_ips -f value)
|
|
kubelet_iface_ip=$(python3 -c "print(${fixed_ips}[0]['ip_address'])")
|
|
|
|
k8s_api_clusterip=$(_cidr_range "$service_cidr" | cut -f1)
|
|
|
|
echo "***********************************************************************"
|
|
echo "lbname: $lb_name subnet ${KURYR_SERVICE_SUBNETS_IDS[0]} pid: $project_id api-cluster: $k8s_api_clusteri"
|
|
echo "***********************************************************************"
|
|
create_load_balancer "$lb_name" "${KURYR_SERVICE_SUBNETS_IDS[0]}" \
|
|
"$project_id" "$k8s_api_clusterip"
|
|
create_load_balancer_listener default/kubernetes:${KURYR_K8S_API_LB_PORT} HTTPS ${KURYR_K8S_API_LB_PORT} "$lb_name" "$project_id" 3600000
|
|
create_load_balancer_pool default/kubernetes:${KURYR_K8S_API_LB_PORT} HTTPS ROUND_ROBIN \
|
|
default/kubernetes:${KURYR_K8S_API_LB_PORT} "$project_id" "$lb_name"
|
|
|
|
if [[ "${KURYR_OVS_BM}" == "True" ]]; then
|
|
address=${kubelet_iface_ip}
|
|
else
|
|
address="${HOST_IP}"
|
|
fi
|
|
|
|
# Regardless of the octavia mode, the k8s API will be behind an L3 mode
|
|
# amphora driver loadbalancer
|
|
create_load_balancer_member "$(hostname)" "$address" "$api_port" \
|
|
default/kubernetes:${KURYR_K8S_API_LB_PORT} "$lb_name" "$project_id"
|
|
}
|
|
|
|
function _configure_neutron_defaults {
|
|
local project_id
|
|
local sg_ids
|
|
local router
|
|
local router_id
|
|
local ext_svc_net_id
|
|
local addrs_prefix
|
|
local subnetpool_name
|
|
|
|
project_id=$(get_or_create_project \
|
|
"$KURYR_NEUTRON_DEFAULT_PROJECT" default)
|
|
ext_svc_net_id="$(openstack network show -c id -f value \
|
|
"${KURYR_NEUTRON_DEFAULT_EXT_SVC_NET}")"
|
|
|
|
# If a subnetpool is not passed, we get the one created in devstack's
|
|
# Neutron module
|
|
|
|
export KURYR_SUBNETPOOLS_IDS=()
|
|
export KURYR_ETHERTYPES=()
|
|
if [[ "$KURYR_IPV6" == "False" || "$KURYR_DUAL_STACK" == "True" ]]; then
|
|
export KURYR_ETHERTYPE=IPv4
|
|
KURYR_ETHERTYPES+=("IPv4")
|
|
KURYR_SUBNETPOOLS_IDS+=(${KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID:-${SUBNETPOOL_V4_ID}})
|
|
fi
|
|
if [[ "$KURYR_IPV6" == "True" || "$KURYR_DUAL_STACK" == "True" ]]; then
|
|
export KURYR_ETHERTYPE=IPv6
|
|
KURYR_ETHERTYPES+=("IPv6")
|
|
# NOTE(gryf): To not clash with subnets created by DevStack for IPv6,
|
|
# we create another subnetpool just for kuryr subnets.
|
|
# SUBNETPOOL_KURYR_V6_ID will be used in function configure_kuryr in
|
|
# case of namespace kuryr subnet driver.
|
|
# This is not required for IPv4, because DevStack is only adding a
|
|
# conflicting route for IPv6. On DevStack this route is opening public
|
|
# IPv6 network to be accessible from host, which doesn't have place in
|
|
# IPv4 net, because floating IPs are used instead.
|
|
IPV6_ID=$(uuidgen | sed s/-//g | cut -c 23- | \
|
|
sed -e "s/\(..\)\(....\)\(....\)/\1:\2:\3/")
|
|
addrs_prefix="fd${IPV6_ID}::/56"
|
|
subnetpool_name=${SUBNETPOOL_KURYR_NAME_V6}
|
|
KURYR_SUBNETPOOLS_IDS+=($(openstack \
|
|
--os-cloud devstack-admin \
|
|
--os-region "${REGION_NAME}" \
|
|
subnet pool create "${subnetpool_name}" \
|
|
--default-prefix-length "${SUBNETPOOL_SIZE_V6}" \
|
|
--pool-prefix "${addrs_prefix}" \
|
|
--share -f value -c id))
|
|
fi
|
|
|
|
router=${KURYR_NEUTRON_DEFAULT_ROUTER:-$Q_ROUTER_NAME}
|
|
if [ "$router" != "$Q_ROUTER_NAME" ]; then
|
|
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
|
|
router create --project "$project_id" "$router"
|
|
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
|
|
router set --external-gateway "$ext_svc_net_id" "$router"
|
|
fi
|
|
router_id="$(openstack router show -c id -f value "$router")"
|
|
|
|
pod_net_id=$(openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" \
|
|
network create --project "$project_id" \
|
|
"$KURYR_NEUTRON_DEFAULT_POD_NET" \
|
|
-c id -f value)
|
|
service_net_id=$(openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" \
|
|
network create --project "$project_id" \
|
|
"$KURYR_NEUTRON_DEFAULT_SERVICE_NET" \
|
|
-c id -f value)
|
|
|
|
export KURYR_POD_SUBNETS_IDS=()
|
|
export KURYR_SERVICE_SUBNETS_IDS=()
|
|
for i in "${!KURYR_SUBNETPOOLS_IDS[@]}"; do
|
|
KURYR_POD_SUBNETS_IDS+=($(create_k8s_subnet "$project_id" \
|
|
"$pod_net_id" \
|
|
"${KURYR_NEUTRON_DEFAULT_POD_SUBNET}-${KURYR_ETHERTYPES[$i]}" \
|
|
"${KURYR_SUBNETPOOLS_IDS[$i]}" \
|
|
"$router" "False" ${KURYR_ETHERTYPES[$i]}))
|
|
|
|
KURYR_SERVICE_SUBNETS_IDS+=($(create_k8s_subnet "$project_id" \
|
|
"$service_net_id" \
|
|
"${KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET}-${KURYR_ETHERTYPES[$i]}" \
|
|
"${KURYR_SUBNETPOOLS_IDS[$i]}" \
|
|
"$router" "True" ${KURYR_ETHERTYPES[$i]}))
|
|
done
|
|
|
|
sg_ids=()
|
|
if [[ "$KURYR_SG_DRIVER" == "default" ]]; then
|
|
sg_ids+=($(echo $(openstack security group list \
|
|
--project "$project_id" -c ID -f value) | tr ' ' ','))
|
|
fi
|
|
|
|
# In order for the ports to allow service traffic under Octavia L3 mode,
|
|
# it is necessary for the service subnet to be allowed into the port's
|
|
# security groups. If L3 is used, then the pods created will include it.
|
|
# Otherwise it will be just used by the kubelet port used for the K8s API
|
|
# load balancer
|
|
local service_pod_access_sg_id
|
|
service_pod_access_sg_id=$(openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" \
|
|
security group create --project "$project_id" \
|
|
service_pod_access -f value -c id)
|
|
|
|
for i in "${!KURYR_SERVICE_SUBNETS_IDS[@]}"; do
|
|
local service_cidr
|
|
service_cidr=$(openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" subnet show \
|
|
"${KURYR_SERVICE_SUBNETS_IDS[$i]}" -f value -c cidr)
|
|
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
|
|
security group rule create --project "$project_id" \
|
|
--description "k8s service subnet allowed" \
|
|
--remote-ip "$service_cidr" --ethertype "${KURYR_ETHERTYPES[$i]}" --protocol tcp \
|
|
"$service_pod_access_sg_id"
|
|
# Since Octavia supports also UDP load balancing, we need to allow
|
|
# also udp traffic
|
|
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
|
|
security group rule create --project "$project_id" \
|
|
--description "k8s service subnet UDP allowed" \
|
|
--remote-ip "$service_cidr" --ethertype "${KURYR_ETHERTYPES[$i]}" --protocol udp \
|
|
"$service_pod_access_sg_id"
|
|
# Octavia supports SCTP load balancing, we need to also allow SCTP traffic
|
|
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
|
|
security group rule create --project "$project_id" \
|
|
--description "k8s service subnet SCTP allowed" \
|
|
--remote-ip "$service_cidr" --ethertype "${KURYR_ETHERTYPES[$i]}" --protocol sctp \
|
|
"$service_pod_access_sg_id"
|
|
done
|
|
|
|
if [[ "$KURYR_K8S_OCTAVIA_MEMBER_MODE" == "L3" ]]; then
|
|
sg_ids+=(${service_pod_access_sg_id})
|
|
elif [[ "$KURYR_K8S_OCTAVIA_MEMBER_MODE" == "L2" ]]; then
|
|
# In case the member connectivity is L2, Octavia by default uses the
|
|
# admin 'default' sg to create a port for the amphora load balancer
|
|
# at the member ports subnet. Thus we need to allow L2 communication
|
|
# between the member ports and the octavia ports by allowing all
|
|
# access from the pod subnet range to the ports in that subnet, and
|
|
# include it into $sg_ids
|
|
local octavia_pod_access_sg_id
|
|
octavia_pod_access_sg_id=$(openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" \
|
|
security group create --project "$project_id" \
|
|
octavia_pod_access -f value -c id)
|
|
for i in "${!KURYR_POD_SUBNETS_IDS[@]}"; do
|
|
local pod_cidr
|
|
pod_cidr=$(openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" subnet show \
|
|
"${KURYR_POD_SUBNETS_IDS[$i]}" -f value -c cidr)
|
|
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
|
|
security group rule create --project "$project_id" \
|
|
--description "k8s pod subnet allowed from k8s-pod-subnet" \
|
|
--remote-ip "$pod_cidr" --ethertype "${KURYR_ETHERTYPES[$i]}" --protocol tcp \
|
|
"$octavia_pod_access_sg_id"
|
|
# Since Octavia supports also UDP load balancing, we need to allow
|
|
# also udp traffic
|
|
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
|
|
security group rule create --project "$project_id" \
|
|
--description "k8s pod subnet allowed from k8s-pod-subnet" \
|
|
--remote-ip "$pod_cidr" --ethertype "${KURYR_ETHERTYPES[$i]}" --protocol udp \
|
|
"$octavia_pod_access_sg_id"
|
|
# Octavia supports SCTP load balancing, we need to also support SCTP traffic
|
|
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
|
|
security group rule create --project "$project_id" \
|
|
--description "k8s pod subnet allowed from k8s-pod-subnet" \
|
|
--remote-ip "$pod_cidr" --ethertype "${KURYR_ETHERTYPES[$i]}" --protocol sctp \
|
|
"$octavia_pod_access_sg_id"
|
|
done
|
|
sg_ids+=(${octavia_pod_access_sg_id})
|
|
fi
|
|
|
|
iniset "$KURYR_CONFIG" neutron_defaults project "$project_id"
|
|
iniset "$KURYR_CONFIG" neutron_defaults pod_subnet "${KURYR_POD_SUBNETS_IDS[0]}"
|
|
iniset "$KURYR_CONFIG" neutron_defaults pod_subnets $(IFS=, ; echo "${KURYR_POD_SUBNETS_IDS[*]}")
|
|
iniset "$KURYR_CONFIG" neutron_defaults service_subnet "${KURYR_SERVICE_SUBNETS_IDS[0]}"
|
|
iniset "$KURYR_CONFIG" neutron_defaults service_subnets $(IFS=, ; echo "${KURYR_SERVICE_SUBNETS_IDS[*]}")
|
|
if [ "$KURYR_SUBNET_DRIVER" == "namespace" ]; then
|
|
iniset "$KURYR_CONFIG" namespace_subnet pod_subnet_pool "${KURYR_SUBNETPOOLS_IDS[0]}"
|
|
iniset "$KURYR_CONFIG" namespace_subnet pod_subnet_pools $(IFS=, ; echo "${KURYR_SUBNETPOOLS_IDS[*]}")
|
|
iniset "$KURYR_CONFIG" namespace_subnet pod_router "$router_id"
|
|
fi
|
|
if [[ "$KURYR_SG_DRIVER" == "policy" ]]; then
|
|
# NOTE(dulek): Using the default DevStack's SG is not enough to match
|
|
# the NP specification. We need to open ingress to everywhere, so we
|
|
# create allow-all group.
|
|
allow_all_sg_id=$(openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" \
|
|
security group create --project "$project_id" \
|
|
allow-all -f value -c id)
|
|
for ethertype in ${KURYR_ETHERTYPES[@]}; do
|
|
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
|
|
security group rule create --project "$project_id" \
|
|
--description "allow all ingress traffic" \
|
|
--ethertype "$ethertype" --ingress --protocol any \
|
|
"$allow_all_sg_id"
|
|
done
|
|
sg_ids+=(${allow_all_sg_id})
|
|
fi
|
|
iniset "$KURYR_CONFIG" neutron_defaults pod_security_groups $(IFS=, ; echo "${sg_ids[*]}")
|
|
|
|
if [[ "$KURYR_SG_DRIVER" == "policy" ]]; then
|
|
# NOTE(ltomasbo): As more security groups and rules are created, there
|
|
# is a need to increase the quota for it
|
|
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
|
|
quota set --secgroups 100 --secgroup-rules 300 "$project_id"
|
|
fi
|
|
|
|
# NOTE(dulek): DevStack's admin default for SG's and instances is 10, this
|
|
# is too little for our tests with Octavia configured to use
|
|
# amphora.
|
|
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
|
|
quota set --secgroups 100 --secgroup-rules 300 --instances 100 admin
|
|
|
|
if [ -n "$OVS_BRIDGE" ]; then
|
|
iniset "$KURYR_CONFIG" neutron_defaults ovs_bridge "$OVS_BRIDGE"
|
|
fi
|
|
iniset "$KURYR_CONFIG" neutron_defaults external_svc_net "$ext_svc_net_id"
|
|
iniset "$KURYR_CONFIG" octavia_defaults member_mode "$KURYR_K8S_OCTAVIA_MEMBER_MODE"
|
|
iniset "$KURYR_CONFIG" octavia_defaults enforce_sg_rules "$KURYR_ENFORCE_SG_RULES"
|
|
iniset "$KURYR_CONFIG" octavia_defaults lb_algorithm "$KURYR_LB_ALGORITHM"
|
|
iniset "$KURYR_CONFIG" octavia_defaults timeout_client_data "$KURYR_TIMEOUT_CLIENT_DATA"
|
|
iniset "$KURYR_CONFIG" octavia_defaults timeout_member_data "$KURYR_TIMEOUT_MEMBER_DATA"
|
|
# Octavia takes a very long time to start the LB in the gate. We need
|
|
# to tweak the timeout for the LB creation. Let's be generous and give
|
|
# it up to 20 minutes.
|
|
# FIXME(dulek): This might be removed when bug 1753653 is fixed and
|
|
# Kuryr restarts waiting for LB on timeouts.
|
|
iniset "$KURYR_CONFIG" neutron_defaults lbaas_activation_timeout 1200
|
|
iniset "$KURYR_CONFIG" kubernetes endpoints_driver_octavia_provider "$KURYR_EP_DRIVER_OCTAVIA_PROVIDER"
|
|
}
|
|
|
|
function configure_k8s_pod_sg_rules {
|
|
local project_id
|
|
local sg_id
|
|
|
|
project_id=$(get_or_create_project \
|
|
"$KURYR_NEUTRON_DEFAULT_PROJECT" default)
|
|
sg_id=$(openstack --os-cloud devstack-admin \
|
|
--os-region "$REGION_NAME" \
|
|
security group list \
|
|
--project "$project_id" -c ID -c Name -f value | \
|
|
awk '{if ($2=="default") print $1}')
|
|
create_k8s_icmp_sg_rules "$sg_id" ingress
|
|
}
|
|
|
|
function prepare_kubernetes_files {
|
|
mkdir -p "${KURYR_KUBERNETES_DATA_DIR}"
|
|
# Copy certs for Kuryr services to use
|
|
sudo install -m 644 /etc/kubernetes/pki/apiserver.crt \
|
|
"${KURYR_KUBERNETES_DATA_DIR}/kuryr.crt"
|
|
sudo install -m 644 /etc/kubernetes/pki/apiserver.key \
|
|
"${KURYR_KUBERNETES_DATA_DIR}/kuryr.key"
|
|
sudo install -m 644 /etc/kubernetes/pki/ca.crt \
|
|
"${KURYR_KUBERNETES_DATA_DIR}/kuryr-ca.crt"
|
|
}
|
|
|
|
function wait_for {
|
|
local name
|
|
local url
|
|
local cacert_path
|
|
local start_time
|
|
local curr_time
|
|
local time_diff
|
|
|
|
name="$1"
|
|
url="$2"
|
|
cacert_path=${3:-}
|
|
timeout=${4:-$KURYR_WAIT_TIMEOUT}
|
|
|
|
echo -n "Waiting for $name to respond"
|
|
|
|
extra_flags=${cacert_path:+"--cacert ${cacert_path}"}
|
|
|
|
start_time=$(date +%s)
|
|
until curl -o /dev/null -s "$extra_flags" "$url"; do
|
|
echo -n "."
|
|
curr_time=$(date +%s)
|
|
time_diff=$((curr_time - start_time))
|
|
[[ $time_diff -le $timeout ]] || die "Timed out waiting for $name"
|
|
sleep 1
|
|
done
|
|
echo ""
|
|
}
|
|
|
|
function _wait_for_ok_ready {
|
|
local name
|
|
local start_time
|
|
local curr_time
|
|
local time_diff
|
|
|
|
name="$1"
|
|
timeout=${2:-$KURYR_WAIT_TIMEOUT}
|
|
|
|
start_time=$(date +%s)
|
|
echo -n "Waiting for ${name} to be ready"
|
|
until [[ "$(kubectl get --raw='/readyz')" == "ok" ]]; do
|
|
echo -n "."
|
|
curr_time=$(date +%s)
|
|
time_diff=$((curr_time - start_time))
|
|
[[ $time_diff -le $timeout ]] || die "Timed out waiting for $name"
|
|
sleep 1
|
|
done
|
|
echo ""
|
|
}
|
|
|
|
function prepare_kubeconfig {
|
|
kubectl config set-cluster devstack-cluster \
|
|
--server="${KURYR_K8S_API_URL}" --certificate-authority
|
|
kubectl config set-credentials stack
|
|
kubectl config set-context devstack --cluster=devstack-cluster --user=stack
|
|
kubectl config use-context devstack
|
|
}
|
|
|
|
function prepare_kubelet {
|
|
local kubelet_plugin_dir="/etc/cni/net.d/"
|
|
sudo install -o "$STACK_USER" -m 0664 -D \
|
|
"${KURYR_HOME}${kubelet_plugin_dir}/10-kuryr.conflist" \
|
|
"${CNI_CONF_DIR}/10-kuryr.conflist"
|
|
}
|
|
|
|
function run_kuryr_kubernetes {
|
|
local controller_bin
|
|
|
|
_wait_for_ok_ready "kubernetes" 1200
|
|
|
|
controller_bin=$(which kuryr-k8s-controller)
|
|
run_process kuryr-kubernetes "$controller_bin --config-file $KURYR_CONFIG"
|
|
}
|
|
|
|
function configure_overcloud_vm_k8s_svc_sg {
|
|
local dst_port
|
|
local project_id
|
|
local security_group
|
|
|
|
if is_service_enabled octavia; then
|
|
dst_port=${KURYR_K8S_API_LB_PORT}
|
|
else
|
|
dst_port=${KURYR_K8S_API_PORT}
|
|
fi
|
|
|
|
project_id=$(get_or_create_project \
|
|
"$KURYR_NEUTRON_DEFAULT_PROJECT" default)
|
|
security_group=$(openstack security group list \
|
|
--project "$project_id" -c ID -c Name -f value | \
|
|
awk '{if ($2=="default") print $1}')
|
|
for ethertype in "${KURYR_ETHERTYPES[@]}"; do
|
|
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
|
|
security group rule create --project "$project_id" \
|
|
--dst-port "$dst_port" --ethertype "$ethertype" "$security_group"
|
|
done
|
|
openstack port set "$KURYR_OVERCLOUD_VM_PORT" --security-group service_pod_access
|
|
}
|
|
|
|
function run_kuryr_daemon {
|
|
local daemon_bin
|
|
daemon_bin=$(which kuryr-daemon)
|
|
run_process kuryr-daemon \
|
|
"$daemon_bin --config-file $KURYR_CONFIG" root root
|
|
}
|
|
|
|
function update_tempest_conf_file {
|
|
if [[ "$KURYR_USE_PORT_POOLS" == "True" ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes port_pool_enabled True
|
|
fi
|
|
if [[ "${KURYR_CONT}" == "True" ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes containerized True
|
|
fi
|
|
if [[ "$KURYR_SUBNET_DRIVER" == "namespace" ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes subnet_per_namespace True
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes kuryrnetworks True
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes trigger_namespace_upon_pod True
|
|
fi
|
|
if [[ "$KURYR_K8S_SERIAL_TESTS" == "True" ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes run_tests_serial True
|
|
fi
|
|
if [[ "$KURYR_MULTI_VIF_DRIVER" == "npwg_multiple_interfaces" ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes npwg_multi_vif_enabled True
|
|
fi
|
|
if [[ "$KURYR_ENABLED_HANDLERS" =~ .*policy.* ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes network_policy_enabled True
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes new_kuryrnetworkpolicy_crd True
|
|
fi
|
|
# NOTE(yboaron): Services with protocol UDP are supported in Kuryr
|
|
# starting from Stein release
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes test_udp_services True
|
|
if [[ "$KURYR_CONTROLLER_HA" == "True" ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes ap_ha True
|
|
fi
|
|
if [[ "$KURYR_K8S_MULTI_WORKER_TESTS" == "True" ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes multi_worker_setup True
|
|
fi
|
|
if [[ "$KURYR_K8S_CLOUD_PROVIDER" == "True" ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes cloud_provider True
|
|
fi
|
|
if [[ "$KURYR_CONFIGMAP_MODIFIABLE" == "True" ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes configmap_modifiable True
|
|
fi
|
|
if [[ "$KURYR_IPV6" == "True" || "$KURYR_DUAL_STACK" == "True" ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes ipv6 True
|
|
fi
|
|
# NOTE(digitalsimboja): Reconciliation tests create and delete LBs,
|
|
# so only enable them for OVN as it's faster when creating LBs
|
|
if [[ "$KURYR_EP_DRIVER_OCTAVIA_PROVIDER" == "ovn" ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes enable_reconciliation True
|
|
fi
|
|
if [[ "$KURYR_PROJECT_DRIVER" == "annotation" ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes annotation_project_driver True
|
|
fi
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes enable_listener_reconciliation True
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes validate_crd True
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes kuryrports True
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes kuryrloadbalancers True
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes test_services_without_selector True
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes test_sctp_services True
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes test_configurable_listener_timeouts True
|
|
if [[ "$KURYR_SUPPORT_POD_SECURITY" == "True" ]]; then
|
|
iniset "$TEMPEST_CONFIG" kuryr_kubernetes set_pod_security_context True
|
|
fi
|
|
}
|
|
|
|
function configure_neutron_defaults {
|
|
local k8s_api_clusterip
|
|
local service_cidr
|
|
|
|
if [ "${KURYR_CONF_NEUTRON}" == "False" ]; then
|
|
return
|
|
fi
|
|
|
|
if is_service_enabled kuryr-kubernetes; then
|
|
_configure_neutron_defaults
|
|
fi
|
|
|
|
if [ "${KURYR_CONT}" == "False" ]; then
|
|
KURYR_K8S_API_ROOT=${KURYR_K8S_API_URL}
|
|
iniset "$KURYR_CONFIG" kubernetes api_root "${KURYR_K8S_API_ROOT}"
|
|
iniset "$KURYR_CONFIG" kubernetes token_file '""'
|
|
else
|
|
iniset "$KURYR_CONFIG" kubernetes api_root '""'
|
|
fi
|
|
}
|
|
|
|
function uninstall_kuryr_cni {
|
|
sudo rm "${CNI_PLUGIN_DIR}/kuryr-cni"
|
|
if [ -z "$(ls -A ${CNI_PLUGIN_DIR})" ]; then
|
|
sudo rm -fr "${CNI_PLUGIN_DIR}"
|
|
fi
|
|
}
|
|
|
|
function rm_kuryr_conf {
|
|
sudo rm -fr /etc/kuryr
|
|
}
|