devstack: Use separate nets and project for kuryr

Up until now we were reusing the demo project and the devstack created
neutron 'private' subnet for pods. With this patch, we move to having
our own:

- k8s project
- pod net and subnet
- service net and subnet

This patch also makes sure that the k8s subnets keep allocation pools
intact so ports can be created for them (which somehow neutron-lbaasv2
haproxy driver didn't need but Octavia definitely needs).

Partially-Implements: blueprint octavia-support
Change-Id: If5a49bc2010ea59965c5ee9570fb5d131cdf4089
Signed-off-by: Antoni Segura Puimedon <antonisp@celebdor.com>
This commit is contained in:
Antoni Segura Puimedon 2017-07-21 14:49:50 +02:00
parent b29c54c0f5
commit 20463df4e1
No known key found for this signature in database
GPG Key ID: B71BE48A9A349926
3 changed files with 146 additions and 78 deletions

View File

@ -144,3 +144,104 @@ function run_etcd_legacy {
--initial-cluster "devstack=$KURYR_ETCD_ADVERTISE_PEER_URL" \
--initial-cluster-state new
}
# _allocation_range
# Description: Writes out tab separated usable ip range for a CIDR
# Params:
# cidr - The cidr to get the range for
# gateway_position - Whether to reserve at 'beginning' or at 'end'
function _allocation_range {
python - <<EOF "$@"
import sys
from ipaddress import ip_network
import six
n = ip_network(six.text_type(sys.argv[1]))
gateway_position = sys.argv[2]
if gateway_position == 'beginning':
beg_offset = 2
end_offset = 2
elif gateway_position == 'end':
beg_offset = 1
end_offset = 3
else:
raise ValueError('Disallowed gateway position %s' % gateway_position)
print("%s\\t%s" % (n[beg_offset], n[-end_offset]))
EOF
}
# create_k8s_subnet
# Description: Creates a network and subnet for Kuryr-Kubernetes usage
# Params:
# project_id - Kuryr's project uuid
# net_name - Name of the network to create
# subnet_name - Name of the subnet to create
# subnetpool_id - uuid of the subnet pool to use
# router - name of the router to plug the subnet to
function create_k8s_subnet {
# REVISIT(apuimedo): add support for IPv6
local project_id=$1
local net_name="$2"
local subnet_name="$3"
local subnetpool_id="$4"
local router="$5"
local subnet_params="--project $project_id "
local subnet_cidr
subnet_params+="--ip-version 4 "
subnet_params+="--no-dhcp --gateway none "
subnet_params+="--subnet-pool $subnetpool_id "
local net_id
net_id=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
network create --project "$project_id" \
"$net_name" \
-c id -f value)
subnet_params+="--network $net_id $subnet_name"
local subnet_id
subnet_id=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
subnet create $subnet_params \
-c id -f value)
die_if_not_set $LINENO subnet_id \
"Failure creating K8s ${subnet_name} IPv4 subnet for ${project_id}"
subnet_cidr=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
subnet show "$subnet_id" \
-c cidr -f value)
die_if_not_set $LINENO subnet_cidr \
"Failure getting K8s ${subnet_name} IPv4 subnet for $project_id"
# Since K8s has its own IPAM for services and allocates the first IP from
# service subnet CIDR to Kubernetes apiserver, we'll always put the router
# interface at the end of the range.
local router_ip
local allocation_start
local allocation_end
router_ip=$(_cidr_range "$subnet_cidr" | cut -f2)
allocation_start=$(_allocation_range "$subnet_cidr" end | cut -f1)
allocation_end=$(_allocation_range "$subnet_cidr" end | cut -f2)
die_if_not_set $LINENO router_ip \
"Failed to determine K8s ${subnet_name} subnet router IP"
openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" subnet set \
--gateway "$router_ip" --no-allocation-pool "$subnet_id" \
|| die $LINENO "Failed to update K8s ${subnet_name} subnet"
# Set a new allocation pool for the subnet so ports can be created again
openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" subnet set \
--allocation-pool "start=${allocation_start},end=${allocation_end}" \
"$subnet_id" || die $LINENO "Failed to update K8s ${subnet_name} subnet"
openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
router add subnet "$router" "$subnet_id" \
|| die $LINENO \
"Failed to enable routing for K8s ${subnet_name} subnet"
}

View File

@ -152,6 +152,12 @@ function create_k8s_api_service {
neutron lbaas-loadbalancer-create --name "$lb_name" \
--vip-address "$k8s_api_clusterip" \
"$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET"
# Octavia needs the LB to be active for the listener
while [[ "$(_lb_state $lb_name)" != "ACTIVE" ]]; do
sleep 1
done
neutron lbaas-listener-create --loadbalancer "$lb_name" \
--name default/kubernetes:443 \
--protocol HTTPS \
@ -177,76 +183,39 @@ function create_k8s_api_service {
default/kubernetes:443
}
function create_k8s_service_subnet {
# REVISIT(ivc): add support for IPv6
# REVISIT(apuimedo): Move this into a tool that can be used on deployments
# and make use of it here.
local project_id=$1
local subnet_params="--project $project_id "
if [ -z $SUBNETPOOL_V4_ID ]; then
local service_cidr=$KURYR_K8S_CLUSTER_IP_RANGE
fi
subnet_params+="--ip-version 4 "
subnet_params+="--no-dhcp --gateway none "
subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} "
subnet_params+="${service_cidr:+--subnet-range $service_cidr} "
subnet_params+="--network $NET_ID $KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET"
local subnet_id
subnet_id=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
subnet create $subnet_params \
-c id -f value)
die_if_not_set $LINENO subnet_id \
"Failure creating K8s service IPv4 subnet for $project_id"
service_cidr=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
subnet show $subnet_id \
-c cidr -f value)
die_if_not_set $LINENO service_cidr \
"Failure creating K8s service IPv4 subnet for $project_id"
# REVISIT(ivc): consider adding a note to 'settings'
# KURYR_K8S_CLUSTER_IP_RANGE from 'settings' is only used if no
# SUBNETPOOL_V4_ID is defined and otherwise it is rewritten with a
# generated CIDR from SUBNETPOOL_V4_ID.
KURYR_K8S_CLUSTER_IP_RANGE=$service_cidr
# REVISIT(ivc): look for a better solution to deal with K8s IPAM
# K8s has its own IPAM for services. It also allocates the first IP from
# service subnet CIDR to Kubernetes apiserver.
# To deal with it we set gateway's IP to the the last IP from subnet's
# IP range and Kuryr's K8s service handler will ignore services with
# gateway's IP.
local router_ip=$(_cidr_range "$service_cidr" | cut -f2)
die_if_not_set $LINENO router_ip \
"Failed to determine K8s service subnet router IP"
openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" subnet set \
--gateway "$router_ip" \
--no-allocation-pool \
$subnet_id \
|| die $LINENO "Failed to update K8s service subnet"
openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
router add subnet $ROUTER_ID $subnet_id \
|| die $LINENO "Failed to enable routing for K8s service subnet"
KURYR_K8S_SERVICE_SUBNET_ID=$subnet_id
}
function configure_neutron_defaults {
local project_id=$(get_or_create_project \
local project_id
local pod_subnet_id
local sg_ids
local service_subnet_id
local subnetpool_id
local router
# If a subnetpool is not passed, we get the one created in devstack's
# Neutron module
subnetpool_id=${KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID:-${SUBNETPOOL_V4_ID}}
router=${KURYR_NEUTRON_DEFAULT_ROUTER:-$Q_ROUTER_NAME}
project_id=$(get_or_create_project \
"$KURYR_NEUTRON_DEFAULT_PROJECT" default)
local pod_subnet_id=$(neutron subnet-show -c id -f value \
"$KURYR_NEUTRON_DEFAULT_POD_SUBNET")
local sg_ids=$(echo $(neutron security-group-list \
create_k8s_subnet "$project_id" \
"$KURYR_NEUTRON_DEFAULT_POD_NET" \
"$KURYR_NEUTRON_DEFAULT_POD_SUBNET" \
"$subnetpool_id" \
"$router"
pod_subnet_id="$(neutron subnet-show -c id -f value \
"${KURYR_NEUTRON_DEFAULT_POD_SUBNET}")"
sg_ids=$(echo $(neutron security-group-list \
--project-id "$project_id" -c id -f value) | tr ' ' ',')
create_k8s_service_subnet $project_id
local service_subnet_id=$KURYR_K8S_SERVICE_SUBNET_ID
create_k8s_subnet "$project_id" \
"$KURYR_NEUTRON_DEFAULT_SERVICE_NET" \
"$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \
"$subnetpool_id" \
"$router"
service_subnet_id="$(neutron subnet-show -c id -f value \
"${KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET}")"
iniset "$KURYR_CONFIG" neutron_defaults project "$project_id"
iniset "$KURYR_CONFIG" neutron_defaults pod_subnet "$pod_subnet_id"
@ -373,15 +342,10 @@ function run_k8s_api {
# Runs Hyperkube's Kubernetes API Server
wait_for "etcd" "${KURYR_ETCD_ADVERTISE_CLIENT_URL}/v2/machines"
KURYR_CONFIGURE_NEUTRON_DEFAULTS=$(trueorfalse True KURYR_CONFIGURE_NEUTRON_DEFAULTS)
if [ "$KURYR_CONFIGURE_NEUTRON_DEFAULTS" == "True" ]; then
cluster_ip_range="${KURYR_K8S_CLUSTER_IP_RANGE}"
else
cluster_ip_range=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
subnet show "$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \
-c cidr -f value)
fi
cluster_ip_range=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
subnet show "$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \
-c cidr -f value)
run_container kubernetes-api \
--net host \

View File

@ -10,9 +10,13 @@ KURYR_DOCKER_ENGINE_SOCKET_FILE=${KURYR_DOCKER_ENGINE_SOCKET_FILE:-/var/run/dock
# Neutron defaults
KURYR_CONFIGURE_NEUTRON_DEFAULTS=${KURYR_CONFIGURE_NEUTRON_DEFAULTS:-True}
KURYR_NEUTRON_DEFAULT_PROJECT=demo
KURYR_NEUTRON_DEFAULT_POD_SUBNET=${PRIVATE_SUBNET_NAME}
KURYR_NEUTRON_DEFAULT_PROJECT=${KURYR_NEUTRON_DEFAULT_PROJECT:-k8s}
KURYR_NEUTRON_DEFAULT_POD_NET=${KURYR_NEUTRON_DEFAULT_POD_SUBNET:-k8s-pod-net}
KURYR_NEUTRON_DEFAULT_SERVICE_NET=${KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET:-k8s-service-net}
KURYR_NEUTRON_DEFAULT_POD_SUBNET=${KURYR_NEUTRON_DEFAULT_POD_SUBNET:-k8s-pod-subnet}
KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET=${KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET:-k8s-service-subnet}
KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID=${KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID:-}
KURYR_NEUTRON_DEFAULT_ROUTER=${KURYR_NEUTRON_DEFAULT_ROUTER:-}
# Etcd
ETCD_PORT=${ETCD_PORT:-2379}
@ -31,7 +35,6 @@ KURYR_HYPERKUBE_DATA_DIR=${KURYR_HYPERKUBE_DATA_DIR:-${DATA_DIR}/hyperkube}
KURYR_HYPERKUBE_BINARY=${KURYR_HYPERKUBE_BINARY:-/usr/local/bin/hyperkube}
# Kubernetes
KURYR_K8S_CLUSTER_IP_RANGE=${KURYR_K8S_CLUSTER_IP_RANGE:-10.20.0.0/24}
KURYR_K8S_API_PORT=${KURYR_K8S_API_PORT:-8080}
KURYR_K8S_API_URL=${KURYR_K8S_API_URL:-http://${HOST_IP}:${KURYR_K8S_API_PORT}}
KURYR_K8S_API_CERT=${KURYR_K8S_API_CERT:-}