Add devstack plugin and scripts

Change-Id: I99c43b9033ac92284c03686a22cf9952f8bf20a1
This commit is contained in:
Lingxian Kong 2017-08-25 10:15:10 +12:00
parent e1a475708a
commit 3760a84a5c
41 changed files with 2321 additions and 2 deletions

141
devstack/plugin.sh Executable file
View File

@ -0,0 +1,141 @@
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set -o xtrace
function install_qinling {
git_clone $QINLING_REPO $QINLING_DIR $QINLING_BRANCH
setup_develop $QINLING_DIR
}
function install_qinlingclient {
if use_library_from_git "python-qinlingclient"; then
git_clone $QINLINGCLIENT_REPO $QINLINGCLIENT_DIR $QINLINGCLIENT_BRANCH
setup_develop $QINLINGCLIENT_DIR
else
pip_install python-qinlingclient
fi
}
function create_qinling_accounts {
create_service_user "qinling" "admin"
local qinling_service=$(get_or_create_service "qinling" "function" "Function Service")
qinling_api_url="$QINLING_SERVICE_PROTOCOL://$QINLING_SERVICE_HOST:$QINLING_SERVICE_PORT/v1"
get_or_create_endpoint $qinling_service \
"$REGION_NAME" \
"$qinling_api_url" \
"$qinling_api_url" \
"$qinling_api_url"
# get or adds 'service' role to 'qinling' on 'demo' project
get_or_add_user_project_role "service" "qinling" "demo"
}
function mkdir_chown_stack {
if [[ ! -d "$1" ]]; then
sudo mkdir -p "$1"
fi
sudo chown $STACK_USER "$1"
}
function configure_qinling {
mkdir_chown_stack "$QINLING_AUTH_CACHE_DIR"
rm -f "$QINLING_AUTH_CACHE_DIR"/*
mkdir_chown_stack "$QINLING_CONF_DIR"
rm -f "$QINLING_CONF_DIR"/*
mkdir_chown_stack "$QINLING_FUNCTION_STORAGE_DIR"
rm -f "$QINLING_FUNCTION_STORAGE_DIR"/*
# Generate Qinling configuration file and configure common parameters.
oslo-config-generator --config-file $QINLING_DIR/tools/config/config-generator.qinling.conf --output-file $QINLING_CONF_FILE
iniset $QINLING_CONF_FILE DEFAULT debug $QINLING_DEBUG
iniset $QINLING_CONF_FILE DEFAULT server all
iniset $QINLING_CONF_FILE storage file_system_dir $QINLING_FUNCTION_STORAGE_DIR
# Setup keystone_authtoken section
configure_auth_token_middleware $QINLING_CONF_FILE qinling $QINLING_AUTH_CACHE_DIR
iniset $QINLING_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_AUTH_URI_V3
# Setup RabbitMQ credentials
iniset_rpc_backend qinling $QINLING_CONF_FILE
# Configure the database.
iniset $QINLING_CONF_FILE database connection `database_connection_url qinling`
}
function init_qinling {
# (re)create qinling database
recreate_database qinling utf8
$QINLING_BIN_DIR/qinling-db-manage --config-file $QINLING_CONF_FILE upgrade head
}
function start_qinling {
run_process qinling-engine "$QINLING_BIN_DIR/qinling-server --server engine --config-file $QINLING_CONF_FILE"
run_process qinling-api "$QINLING_BIN_DIR/qinling-server --server api --config-file $QINLING_CONF_FILE"
}
function stop_qingling {
local serv
for serv in qinling-api qingling-engine; do
stop_process $serv
done
}
function cleanup_qingling {
sudo rm -rf $QINLING_AUTH_CACHE_DIR
sudo rm -rf $QINLING_CONF_DIR
}
# check for service enabled
if is_service_enabled qinling; then
if [[ "$1" == "stack" && "$2" == "install" ]]; then
# Perform installation of service source
echo_summary "Installing qinling"
install_qinling
echo_summary "Installing qinlingclient"
install_qinlingclient
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# Configure after the other layer 1 and 2 services have been configured
echo_summary "Configuring qinling"
if is_service_enabled key; then
create_qinling_accounts
fi
configure_qinling
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
# Initialize and start the qinling service
echo_summary "Initializing qinling"
init_qinling
start_qinling
fi
if [[ "$1" == "unstack" ]]; then
echo_summary "Shutting down qingling"
stop_qingling
fi
if [[ "$1" == "clean" ]]; then
echo_summary "Cleaning qingling"
cleanup_qingling
fi
fi
# Restore xtrace
$XTRACE

23
devstack/settings Normal file
View File

@ -0,0 +1,23 @@
# Devstack settings
enable_service qinling qinling-api qinling-engine
# Set up default repos
QINLING_DIR=$DEST/qinling
QINLING_BIN_DIR=$(get_python_exec_prefix)
QINLING_REPO=${QINLING_REPO:-${GIT_BASE}/openstack/qinling.git}
QINLING_BRANCH=${QINLING_BRANCH:-master}
QINLINGCLIENT_DIR=$DEST/python-qinlingclient
QINLINGCLIENT_REPO=${QINLINGCLIENT_REPO:-${GIT_BASE}/openstack/python-qinlingclient.git}
QINLINGCLIENT_BRANCH=${QINLINGCLIENT_BRANCH:-master}
QINLING_SERVICE_HOST=${QINLING_SERVICE_HOST:-$SERVICE_HOST}
QINLING_SERVICE_PORT=${QINLING_SERVICE_PORT:-7070}
QINLING_SERVICE_PROTOCOL=${QINLING_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
QINLING_DEBUG=${QINLING_DEBUG:-True}
QINLING_CONF_DIR=${QINLING_CONF_DIR:-/etc/qinling}
QINLING_CONF_FILE=${QINLING_CONF_DIR}/qinling.conf
QINLING_AUTH_CACHE_DIR=${QINLING_AUTH_CACHE_DIR:-/var/cache/qinling}
QINLING_FUNCTION_STORAGE_DIR=${QINLING_FUNCTION_STORAGE_DIR:-/opt/qinling/funtion/packages}

View File

@ -130,6 +130,7 @@ kubernetes_opts = [
),
cfg.IPOpt(
'qinling_service_address',
default='127.0.0.1',
help='Qinling API service ip address.'
),
cfg.IntOpt(

View File

@ -64,8 +64,14 @@ class NotAllowedException(QinlingException):
message = "Operation not allowed"
class ConflictException(QinlingException):
http_code = 409
message = ("The request could not be completed due to a conflict with the "
"current state of the target resource")
class RuntimeNotAvailableException(QinlingException):
http_code = 403
http_code = 409
message = "Runtime not available"

View File

@ -113,7 +113,7 @@ class TestRuntimeController(base.APITest):
expect_errors=True
)
self.assertEqual(403, resp.status_int)
self.assertEqual(409, resp.status_int)
@mock.patch('qinling.rpc.EngineClient.update_runtime')
def test_put_image(self, mock_update_runtime):

View File

@ -0,0 +1,6 @@
===============================================
Tempest Integration of Qinling
===============================================
This directory contains Tempest tests to cover the Qinling project.

View File

View File

@ -0,0 +1,17 @@
# Copyright 2015
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
if __name__ == '__main__':
pass

View File

@ -0,0 +1,35 @@
# Copyright 2015
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest.test_discover import plugins
class QinlingTempestPlugin(plugins.TempestPlugin):
def load_tests(self):
base_path = os.path.split(os.path.dirname(
os.path.abspath(__file__))
)[0]
test_dir = "qinling_tempest_plugin/tests"
full_test_dir = os.path.join(base_path, test_dir)
return full_test_dir, base_path
def register_opts(self, conf):
pass
def get_opt_lists(self):
pass

View File

@ -0,0 +1,34 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is executed inside post_test_hook function in devstack gate.
set -ex
sudo chmod -R a+rw /opt/stack/
(cd $BASE/new/tempest/; sudo virtualenv .venv)
source $BASE/new/tempest/.venv/bin/activate
(cd $BASE/new/tempest/; sudo pip install -r requirements.txt -r test-requirements.txt)
sudo pip install nose
sudo pip install numpy
sudo cp $BASE/new/tempest/etc/logging.conf.sample $BASE/new/tempest/etc/logging.conf
(cd $BASE/new/qinling/; sudo pip install -r requirements.txt -r test-requirements.txt)
(cd $BASE/new/qinling/; sudo python setup.py install)
export TOX_TESTENV_PASSENV=ZUUL_PROJECT
(cd $BASE/new/tempest/; sudo -E testr init)
(cd $BASE/new/tempest/; sudo -E tox -eall-plugin qinling)

View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is executed inside pre_test_hook function in devstack gate.
set -x
export localconf=$BASE/new/devstack/local.conf
export QINLING_CONF=/etc/qinling/qinling.conf
echo -e '[[post-config|$QINLING_CONF]]\n[kubernetes]\n' >> $localconf
echo -e 'qinling_service_address=${DEFAULT_HOST_IP}\n' >> $localconf
echo -e 'kube_host=${K8S_SERVICE_IP}:8001\n' >> $localconf

View File

View File

@ -36,6 +36,9 @@ qinling.orchestrator =
oslo.config.opts =
qinling.config = qinling.config:list_opts
tempest.test_plugins =
qinling_test = qinling_tempest_tests.plugin:QinlingTempestPlugin
[build_sphinx]
all-files = 1
warning-is-error = 1

97
tools/gate/dump_logs.sh Executable file
View File

@ -0,0 +1,97 @@
#!/bin/bash
set +xe
# if we can't find kubectl, fail immediately because it is likely
# the whitespace linter fails - no point to collect logs.
if ! type "kubectl" &> /dev/null; then
exit $1
fi
echo "Capturing logs from environment."
mkdir -p ${LOGS_DIR}/k8s/etc
sudo cp -a /etc/kubernetes ${LOGS_DIR}/k8s/etc
sudo chmod 777 --recursive ${LOGS_DIR}/*
mkdir -p ${LOGS_DIR}/k8s
for OBJECT_TYPE in nodes \
namespace \
storageclass; do
kubectl get ${OBJECT_TYPE} -o yaml > ${LOGS_DIR}/k8s/${OBJECT_TYPE}.yaml
done
kubectl describe nodes > ${LOGS_DIR}/k8s/nodes.txt
for OBJECT_TYPE in svc \
pods \
jobs \
deployments \
daemonsets \
statefulsets \
configmaps \
secrets; do
kubectl get --all-namespaces ${OBJECT_TYPE} -o yaml > \
${LOGS_DIR}/k8s/${OBJECT_TYPE}.yaml
done
mkdir -p ${LOGS_DIR}/k8s/pods
kubectl get pods -a --all-namespaces -o json | jq -r \
'.items[].metadata | .namespace + " " + .name' | while read line; do
NAMESPACE=$(echo $line | awk '{print $1}')
NAME=$(echo $line | awk '{print $2}')
kubectl get --namespace $NAMESPACE pod $NAME -o json | jq -r \
'.spec.containers[].name' | while read line; do
CONTAINER=$(echo $line | awk '{print $1}')
kubectl logs $NAME --namespace $NAMESPACE -c $CONTAINER > \
${LOGS_DIR}/k8s/pods/$NAMESPACE-$NAME-$CONTAINER.txt
done
done
mkdir -p ${LOGS_DIR}/k8s/svc
kubectl get svc -o json --all-namespaces | jq -r \
'.items[].metadata | .namespace + " " + .name' | while read line; do
NAMESPACE=$(echo $line | awk '{print $1}')
NAME=$(echo $line | awk '{print $2}')
kubectl describe svc $NAME --namespace $NAMESPACE > \
${LOGS_DIR}/k8s/svc/$NAMESPACE-$NAME.txt
done
mkdir -p ${LOGS_DIR}/k8s/pvc
kubectl get pvc -o json --all-namespaces | jq -r \
'.items[].metadata | .namespace + " " + .name' | while read line; do
NAMESPACE=$(echo $line | awk '{print $1}')
NAME=$(echo $line | awk '{print $2}')
kubectl describe pvc $NAME --namespace $NAMESPACE > \
${LOGS_DIR}/k8s/pvc/$NAMESPACE-$NAME.txt
done
mkdir -p ${LOGS_DIR}/k8s/rbac
for OBJECT_TYPE in clusterroles \
roles \
clusterrolebindings \
rolebindings; do
kubectl get ${OBJECT_TYPE} -o yaml > ${LOGS_DIR}/k8s/rbac/${OBJECT_TYPE}.yaml
done
mkdir -p ${LOGS_DIR}/k8s/descriptions
for NAMESPACE in $(kubectl get namespaces -o name | awk -F '/' '{ print $NF }') ; do
for OBJECT in $(kubectl get all --show-all -n $NAMESPACE -o name) ; do
OBJECT_TYPE=$(echo $OBJECT | awk -F '/' '{ print $1 }')
OBJECT_NAME=$(echo $OBJECT | awk -F '/' '{ print $2 }')
mkdir -p ${LOGS_DIR}/k8s/descriptions/${NAMESPACE}/${OBJECT_TYPE}
kubectl describe -n $NAMESPACE $OBJECT > ${LOGS_DIR}/k8s/descriptions/${NAMESPACE}/$OBJECT_TYPE/$OBJECT_NAME.txt
done
done
NODE_NAME=$(hostname)
mkdir -p ${LOGS_DIR}/nodes/${NODE_NAME}
echo "${NODE_NAME}" > ${LOGS_DIR}/nodes/master.txt
sudo docker logs kubelet 2> ${LOGS_DIR}/nodes/${NODE_NAME}/kubelet.txt
sudo docker logs kubeadm-aio 2>&1 > ${LOGS_DIR}/nodes/${NODE_NAME}/kubeadm-aio.txt
sudo docker images --digests --no-trunc --all > ${LOGS_DIR}/nodes/${NODE_NAME}/images.txt
sudo du -h --max-depth=1 /var/lib/docker | sort -hr > ${LOGS_DIR}/nodes/${NODE_NAME}/docker-size.txt
sudo iptables-save > ${LOGS_DIR}/nodes/${NODE_NAME}/iptables.txt
sudo ip a > ${LOGS_DIR}/nodes/${NODE_NAME}/ip.txt
sudo route -n > ${LOGS_DIR}/nodes/${NODE_NAME}/routes.txt
sudo arp -a > ${LOGS_DIR}/nodes/${NODE_NAME}/arp.txt
cat /etc/resolv.conf > ${LOGS_DIR}/nodes/${NODE_NAME}/resolv.conf
sudo lshw > ${LOGS_DIR}/nodes/${NODE_NAME}/hardware.txt
exit $1

141
tools/gate/funcs/common.sh Normal file
View File

@ -0,0 +1,141 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function base_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get update -y
sudo apt-get install -y --no-install-recommends \
iproute2 \
iptables \
ipcalc \
nmap \
lshw
elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \
epel-release
# ipcalc is in the initscripts package
sudo yum install -y \
iproute \
iptables \
initscripts \
nmap \
lshw
elif [ "x$HOST_OS" == "xfedora" ]; then
sudo dnf install -y \
iproute \
iptables \
ipcalc \
nmap \
lshw
fi
}
function loopback_support_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get update -y
sudo apt-get install -y --no-install-recommends \
targetcli \
open-iscsi \
lshw
sudo systemctl restart iscsid
elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \
targetcli \
iscsi-initiator-utils \
lshw
elif [ "x$HOST_OS" == "xfedora" ]; then
sudo dnf install -y \
targetcli \
iscsi-initiator-utils \
lshw
fi
}
function loopback_setup {
sudo mkdir -p ${LOOPBACK_DIR}
for ((LOOPBACK_DEV=1;LOOPBACK_DEV<=${LOOPBACK_DEVS};LOOPBACK_DEV++)); do
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo targetcli backstores/fileio create loopback-${LOOPBACK_DEV} ${LOOPBACK_DIR}/fileio-${LOOPBACK_DEV} ${LOOPBACK_SIZE}
else
sudo targetcli backstores/fileio create loopback-${LOOPBACK_DEV} ${LOOPBACK_DIR}/fileio-${LOOPBACK_DEV} ${LOOPBACK_SIZE} write_back=false
fi
done
sudo targetcli iscsi/ create iqn.2016-01.com.example:target
if ! [ "x$HOST_OS" == "xubuntu" ]; then
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/portals delete 0.0.0.0 3260
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/portals create 127.0.0.1 3260
else
#NOTE (Portdirect): Frustratingly it appears that Ubuntu's targetcli wont
# let you bind to localhost.
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/portals create 0.0.0.0 3260
fi
for ((LOOPBACK_DEV=1;LOOPBACK_DEV<=${LOOPBACK_DEVS};LOOPBACK_DEV++)); do
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/luns/ create /backstores/fileio/loopback-${LOOPBACK_DEV}
done
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/acls/ create $(sudo cat /etc/iscsi/initiatorname.iscsi | awk -F '=' '/^InitiatorName/ { print $NF}')
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1 set attribute authentication=0
fi
sudo iscsiadm --mode discovery --type sendtargets --portal 127.0.0.1
sudo iscsiadm -m node -T iqn.2016-01.com.example:target -p 127.0.0.1:3260 -l
# Display disks
sudo lshw -class disk
}
function ceph_support_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get update -y
sudo apt-get install -y --no-install-recommends -qq \
ceph-common
elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \
ceph
elif [ "x$HOST_OS" == "xfedora" ]; then
sudo dnf install -y \
ceph
fi
sudo modprobe rbd
}
function nfs_support_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get update -y
sudo apt-get install -y --no-install-recommends -qq \
nfs-common
elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \
nfs-utils
elif [ "x$HOST_OS" == "xfedora" ]; then
sudo dnf install -y \
nfs-utils
fi
}
function gate_base_setup {
# Install base requirements
base_install
# Install and setup iscsi loopback devices if required.
if [ "x$LOOPBACK_CREATE" == "xtrue" ]; then
loopback_support_install
loopback_setup
fi
# Install support packages for pvc backends
if [ "x$PVC_BACKEND" == "xceph" ]; then
ceph_support_install
elif [ "x$PVC_BACKEND" == "xnfs" ]; then
nfs_support_install
fi
}

150
tools/gate/funcs/kube.sh Executable file
View File

@ -0,0 +1,150 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function kube_wait_for_pods {
# From Kolla-Kubernetes, orginal authors Kevin Fox & Serguei Bezverkhi
# Default wait timeout is 180 seconds
set +x
end=$(date +%s)
if ! [ -z $2 ]; then
end=$((end + $2))
else
end=$((end + 180))
fi
while true; do
kubectl get pods --namespace=$1 -o json | jq -r \
'.items[].status.phase' | grep Pending > /dev/null && \
PENDING=True || PENDING=False
query='.items[]|select(.status.phase=="Running")'
query="$query|.status.containerStatuses[].ready"
kubectl get pods --namespace=$1 -o json | jq -r "$query" | \
grep false > /dev/null && READY="False" || READY="True"
kubectl get jobs -o json --namespace=$1 | jq -r \
'.items[] | .spec.completions == .status.succeeded' | \
grep false > /dev/null && JOBR="False" || JOBR="True"
[ $PENDING == "False" -a $READY == "True" -a $JOBR == "True" ] && \
break || true
sleep 1
now=$(date +%s)
[ $now -gt $end ] && echo containers failed to start. && \
kubectl get pods --namespace $1 -o wide && exit -1
done
set -x
}
function kube_wait_for_nodes {
# Default wait timeout is 180 seconds
set +x
end=$(date +%s)
if ! [ -z $2 ]; then
end=$((end + $2))
else
end=$((end + 180))
fi
while true; do
NUMBER_OF_NODES_EXPECTED=$1
NUMBER_OF_NODES=$(kubectl get nodes --no-headers -o name | wc -l)
[ $NUMBER_OF_NODES -eq $NUMBER_OF_NODES_EXPECTED ] && \
NODES_ONLINE="True" || NODES_ONLINE="False"
while read SUB_NODE; do
echo $SUB_NODE | grep -q ^Ready && NODES_READY="True" || NODES_READY="False"
done < <(kubectl get nodes --no-headers | awk '{ print $2 }')
[ $NODES_ONLINE == "True" -a $NODES_READY == "True" ] && \
break || true
sleep 5
now=$(date +%s)
[ $now -gt $end ] && echo "Nodes Failed to be ready in time." && \
kubectl get nodes -o wide && exit -1
done
set -x
}
function kubeadm_aio_reqs_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get update -y
sudo apt-get install -y --no-install-recommends -qq \
docker.io \
jq
elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \
epel-release
sudo yum install -y \
docker-latest \
jq
sudo cp -f /usr/lib/systemd/system/docker-latest.service /etc/systemd/system/docker.service
sudo sed -i "s|/var/lib/docker-latest|/var/lib/docker|g" /etc/systemd/system/docker.service
sudo sed -i 's/^OPTIONS/#OPTIONS/g' /etc/sysconfig/docker-latest
sudo sed -i "s|^MountFlags=slave|MountFlags=share|g" /etc/systemd/system/docker.service
sudo sed -i "/--seccomp-profile/,+1 d" /etc/systemd/system/docker.service
echo "DOCKER_STORAGE_OPTIONS=--storage-driver=overlay" | sudo tee /etc/sysconfig/docker-latest-storage
sudo setenforce 0 || true
sudo systemctl daemon-reload
sudo systemctl restart docker
elif [ "x$HOST_OS" == "xfedora" ]; then
sudo dnf install -y \
docker-latest \
jq
sudo cp -f /usr/lib/systemd/system/docker-latest.service /etc/systemd/system/docker.service
sudo sed -i "s|/var/lib/docker-latest|/var/lib/docker|g" /etc/systemd/system/docker.service
echo "DOCKER_STORAGE_OPTIONS=--storage-driver=overlay2" | sudo tee /etc/sysconfig/docker-latest-storage
sudo setenforce 0 || true
sudo systemctl daemon-reload
sudo systemctl restart docker
fi
if CURRENT_KUBECTL_LOC=$(type -p kubectl); then
CURRENT_KUBECTL_VERSION=$(${CURRENT_KUBECTL_LOC} version --client --short | awk '{ print $NF }' | awk -F '+' '{ print $1 }')
fi
[ "x$KUBE_VERSION" == "x$CURRENT_KUBECTL_VERSION" ] || ( \
TMP_DIR=$(mktemp -d)
curl -sSL https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl
chmod +x ${TMP_DIR}/kubectl
sudo mv ${TMP_DIR}/kubectl /usr/local/bin/kubectl
rm -rf ${TMP_DIR} )
}
function kubeadm_aio_build {
sudo docker build --pull -t ${KUBEADM_IMAGE} tools/kubeadm-aio
}
function kubeadm_aio_launch {
${WORK_DIR}/tools/kubeadm-aio/kubeadm-aio-launcher.sh
mkdir -p ${HOME}/.kube
cat ${KUBECONFIG} > ${HOME}/.kube/config
kube_wait_for_pods kube-system 240
kube_wait_for_pods default 240
}
function kubeadm_aio_clean {
sudo docker rm -f kubeadm-aio || true
sudo docker rm -f kubelet || true
sudo docker ps -aq | xargs -r -l1 -P16 sudo docker rm -f
sudo rm -rfv \
/etc/cni/net.d \
/etc/kubernetes \
/var/lib/etcd \
/var/etcd \
/var/lib/kubelet/* \
/run/openvswitch \
/var/lib/nova \
${HOME}/.kubeadm-aio/admin.conf \
/var/lib/openstack-helm \
/var/lib/nfs-provisioner || true
}
function ceph_kube_controller_manager_replace {
sudo docker pull ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE}
sudo docker tag ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE} ${BASE_KUBE_CONTROLLER_MANAGER_IMAGE}
}

87
tools/gate/funcs/network.sh Executable file
View File

@ -0,0 +1,87 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function net_default_iface {
sudo ip -4 route list 0/0 | awk '{ print $5; exit }'
}
function net_default_host_addr {
sudo ip addr | awk "/inet / && /$(net_default_iface)/{print \$2; exit }"
}
function net_default_host_ip {
echo $(net_default_host_addr) | awk -F '/' '{ print $1; exit }'
}
function net_resolv_pre_kube {
sudo cp -f /etc/resolv.conf /etc/resolv-pre-kube.conf
sudo rm -f /etc/resolv.conf
cat << EOF | sudo tee /etc/resolv.conf
nameserver ${UPSTREAM_DNS}
EOF
}
function net_resolv_post_kube {
sudo cp -f /etc/resolv-pre-kube.conf /etc/resolv.conf
}
function net_hosts_pre_kube {
sudo cp -f /etc/hosts /etc/hosts-pre-kube
sudo sed -i "/$(hostname)/d" /etc/hosts
# The var will be used in qinling pre_test_hook.sh
export DEFAULT_HOST_IP=$(net_default_host_ip)
echo "${DEFAULT_HOST_IP} $(hostname)" | sudo tee -a /etc/hosts
}
function net_hosts_post_kube {
sudo cp -f /etc/hosts-pre-kube /etc/hosts
}
function find_subnet_range {
if [ "x$HOST_OS" == "xubuntu" ]; then
ipcalc $(net_default_host_addr) | awk '/^Network/ { print $2 }'
else
eval $(ipcalc --network --prefix $(net_default_host_addr))
echo "$NETWORK/$PREFIX"
fi
}
function find_multi_subnet_range {
: ${PRIMARY_NODE_IP:="$(cat /etc/nodepool/primary_node | tail -1)"}
: ${SUB_NODE_IPS:="$(cat /etc/nodepool/sub_nodes)"}
NODE_IPS="${PRIMARY_NODE_IP} ${SUB_NODE_IPS}"
NODE_IP_UNSORTED=$(mktemp --suffix=.txt)
for NODE_IP in $NODE_IPS; do
echo $NODE_IP >> ${NODE_IP_UNSORTED}
done
NODE_IP_SORTED=$(mktemp --suffix=.txt)
sort -V ${NODE_IP_UNSORTED} > ${NODE_IP_SORTED}
rm -f ${NODE_IP_UNSORTED}
FIRST_IP_SUBNET=$(ipcalc "$(head -n 1 ${NODE_IP_SORTED})/24" | awk '/^Network/ { print $2 }')
LAST_IP_SUBNET=$(ipcalc "$(tail -n 1 ${NODE_IP_SORTED})/24" | awk '/^Network/ { print $2 }')
rm -f ${NODE_IP_SORTED}
function ip_diff {
echo $(($(echo $LAST_IP_SUBNET | awk -F '.' "{ print \$$1}") - $(echo $FIRST_IP_SUBNET | awk -F '.' "{ print \$$1}")))
}
for X in {1..4}; do
if ! [ "$(ip_diff $X)" -eq "0" ]; then
SUBMASK=$(((($X - 1 )) * 8))
break
elif [ $X -eq "4" ]; then
SUBMASK=24
fi
done
echo ${FIRST_IP_SUBNET%/*}/${SUBMASK}
}

25
tools/gate/kubeadm_aio.sh Executable file
View File

@ -0,0 +1,25 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
: ${WORK_DIR:="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"}
source ${WORK_DIR}/tools/gate/vars.sh
source ${WORK_DIR}/tools/gate/funcs/network.sh
source ${WORK_DIR}/tools/gate/funcs/kube.sh
kubeadm_aio_reqs_install
# Re-use the docker image pre-built by openstack-helm team.
sudo docker pull ${KUBEADM_IMAGE} || kubeadm_aio_build
kubeadm_aio_launch

38
tools/gate/setup_gate.sh Executable file
View File

@ -0,0 +1,38 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
export WORK_DIR=$(pwd)
source ${WORK_DIR}/tools/gate/vars.sh
source ${WORK_DIR}/tools/gate/funcs/common.sh
source ${WORK_DIR}/tools/gate/funcs/network.sh
# Setup the logging location: by default use the working dir as the root.
rm -rf ${LOGS_DIR} || true
mkdir -p ${LOGS_DIR}
function dump_logs () {
${WORK_DIR}/tools/gate/dump_logs.sh
}
trap 'dump_logs "$?"' ERR
# Do the basic node setup for running the gate
gate_base_setup
# We setup the network for pre kube here, to enable cluster restarts on
# development machines
net_resolv_pre_kube
net_hosts_pre_kube
# Setup the K8s Cluster
bash ${WORK_DIR}/tools/gate/kubeadm_aio.sh

50
tools/gate/vars.sh Executable file
View File

@ -0,0 +1,50 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set work dir if not already done
: ${WORK_DIR:="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"}
# Set logs directory
export LOGS_DIR=${LOGS_DIR:-"${WORK_DIR}/logs"}
# Get Host OS
source /etc/os-release
export HOST_OS=${HOST_OS:="${ID}"}
# Set versions of K8s to use
export KUBE_VERSION=${KUBE_VERSION:-"v1.6.8"}
# Set K8s-AIO options
export KUBECONFIG=${KUBECONFIG:="${HOME}/.kubeadm-aio/admin.conf"}
export KUBEADM_IMAGE=${KUBEADM_IMAGE:="openstackhelm/kubeadm-aio:${KUBE_VERSION}"}
# Set K8s network options
export CNI_POD_CIDR=${CNI_POD_CIDR:="192.168.0.0/16"}
export KUBE_CNI=${KUBE_CNI:="calico"}
# Set PVC Backend
export PVC_BACKEND=${PVC_BACKEND:-"ceph"}
# Set Upstream DNS
export UPSTREAM_DNS=${UPSTREAM_DNS:-"8.8.8.8"}
# Set gate script timeouts
export SERVICE_LAUNCH_TIMEOUT=${SERVICE_LAUNCH_TIMEOUT:="600"}
export SERVICE_TEST_TIMEOUT=${SERVICE_TEST_TIMEOUT:="600"}
# Setup Loopback device options
export LOOPBACK_CREATE=${LOOPBACK_CREATE:="false"}
export LOOPBACK_DEVS=${LOOPBACK_DEVS:="3"}
export LOOPBACK_SIZE=${LOOPBACK_SIZE:="500M"}
export LOOPBACK_DIR=${LOOPBACK_DIR:="/var/lib/iscsi-loopback"}

View File

@ -0,0 +1,88 @@
FROM ubuntu:16.04
MAINTAINER pete.birley@att.com
ENV KUBE_VERSION=v1.6.8 \
CNI_VERSION=v0.6.0-rc2 \
container="docker" \
DEBIAN_FRONTEND="noninteractive"
RUN set -x \
&& TMP_DIR=$(mktemp --directory) \
&& cd ${TMP_DIR} \
&& apt-get update \
&& apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
curl \
dbus \
make \
git \
vim \
jq \
# Add Kubernetes repo
&& curl -sSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \
&& echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list \
&& apt-get update \
&& apt-get install -y --no-install-recommends \
docker.io \
iptables \
kubectl \
kubelet \
kubernetes-cni \
# Install Kubeadm without running postinstall script as it expects systemd to be running.
&& apt-get download kubeadm \
&& dpkg --unpack kubeadm*.deb \
&& mv /var/lib/dpkg/info/kubeadm.postinst /opt/kubeadm.postinst \
&& dpkg --configure kubeadm \
&& apt-get install -yf kubeadm \
&& mkdir -p /etc/kubernetes/manifests \
# Install kubectl:
&& curl -sSL https://dl.k8s.io/${KUBE_VERSION}/kubernetes-client-linux-amd64.tar.gz | tar -zxv --strip-components=1 \
&& mv ${TMP_DIR}/client/bin/kubectl /usr/bin/kubectl \
&& chmod +x /usr/bin/kubectl \
# Install kubelet & kubeadm binaries:
# (portdirect) We do things in this weird way to let us use the deps and systemd
# units from the packages in the .deb repo.
&& curl -sSL https://dl.k8s.io/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz | tar -zxv --strip-components=1 \
&& mv ${TMP_DIR}/server/bin/kubelet /usr/bin/kubelet \
&& chmod +x /usr/bin/kubelet \
&& mv ${TMP_DIR}/server/bin/kubeadm /usr/bin/kubeadm \
&& chmod +x /usr/bin/kubeadm \
# Install CNI:
&& CNI_BIN_DIR=/opt/cni/bin \
&& mkdir -p ${CNI_BIN_DIR} \
&& cd ${CNI_BIN_DIR} \
&& curl -sSL https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-amd64-$CNI_VERSION.tgz | tar -zxv --strip-components=1 \
&& cd ${TMP_DIR} \
# Move kubelet binary as we will run containerised
&& mv /usr/bin/kubelet /usr/bin/kubelet-real \
# Install utils for PVC provisioners
&& apt-get install -y --no-install-recommends \
nfs-common \
ceph-common \
kmod \
# Tweak Systemd units and targets for running in a container
&& find /lib/systemd/system/sysinit.target.wants/ ! -name 'systemd-tmpfiles-setup.service' -type l -exec rm -fv {} + \
&& rm -fv \
/lib/systemd/system/multi-user.target.wants/* \
/etc/systemd/system/*.wants/* \
/lib/systemd/system/local-fs.target.wants/* \
/lib/systemd/system/sockets.target.wants/*udev* \
/lib/systemd/system/sockets.target.wants/*initctl* \
/lib/systemd/system/basic.target.wants/* \
# Clean up apt cache
&& rm -rf /var/lib/apt/lists/* \
# Clean up tmp dir
&& cd / \
&& rm -rf ${TMP_DIR}
# Load assets into place, setup startup target & units
COPY ./assets/ /
RUN set -x \
&& ln -s /usr/lib/systemd/system/container-up.target /etc/systemd/system/default.target \
&& mkdir -p /etc/systemd/system/container-up.target.wants \
&& ln -s /usr/lib/systemd/system/kubeadm-aio.service /etc/systemd/system/container-up.target.wants/kubeadm-aio.service
VOLUME /sys/fs/cgroup
CMD /kubeadm-aio

View File

@ -0,0 +1,110 @@
Kubeadm AIO Container
=====================
This container builds a small AIO Kubeadm based Kubernetes deployment
for Development and Gating use.
Instructions
------------
OS Specific Host setup:
~~~~~~~~~~~~~~~~~~~~~~~
Ubuntu:
^^^^^^^
From a freshly provisioned Ubuntu 16.04 LTS host run:
.. code:: bash
sudo apt-get update -y
sudo apt-get install -y \
docker.io \
nfs-common \
git \
make
OS Independent Host setup:
~~~~~~~~~~~~~~~~~~~~~~~~~~
You should install the ``kubectl`` and ``helm`` binaries:
.. code:: bash
KUBE_VERSION=v1.6.8
HELM_VERSION=v2.5.1
TMP_DIR=$(mktemp -d)
curl -sSL https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl
chmod +x ${TMP_DIR}/kubectl
sudo mv ${TMP_DIR}/kubectl /usr/local/bin/kubectl
curl -sSL https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxv --strip-components=1 -C ${TMP_DIR}
sudo mv ${TMP_DIR}/helm /usr/local/bin/helm
rm -rf ${TMP_DIR}
And clone the OpenStack-Helm repo:
.. code:: bash
git clone https://git.openstack.org/openstack/openstack-helm
Build the AIO environment (optional)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A known good image is published to dockerhub on a fairly regular basis, but if
you wish to build your own image, from the root directory of the OpenStack-Helm
repo run:
.. code:: bash
export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:v1.6.8
sudo docker build --pull -t ${KUBEADM_IMAGE} tools/kubeadm-aio
Deploy the AIO environment
~~~~~~~~~~~~~~~~~~~~~~~~~~
To launch the environment run:
.. code:: bash
export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:v1.6.8
export KUBE_VERSION=v1.6.8
./tools/kubeadm-aio/kubeadm-aio-launcher.sh
export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf
Once this has run without errors, you should hopefully have a Kubernetes single
node environment running, with Helm, Calico, appropriate RBAC rules and node
labels to get developing.
Prior to launching you can also optionally set the following environment
variables to control aspects of the CNI used:
.. code:: bash
export KUBE_CNI=calico # or "canal" "weave" "flannel"
export CNI_POD_CIDR=192.168.0.0/16
If you wish to use this environment as the primary Kubernetes environment on
your host you may run the following, but note that this will wipe any previous
client configuration you may have.
.. code:: bash
mkdir -p ${HOME}/.kube
cat ${HOME}/.kubeadm-aio/admin.conf > ${HOME}/.kube/config
If you wish to create dummy network devices for Neutron to manage there
is a helper script that can set them up for you:
.. code:: bash
sudo docker exec kubelet /usr/bin/openstack-helm-aio-network-prep
Logs
~~~~
You can get the logs from your ``kubeadm-aio`` container by running:
.. code:: bash
sudo docker logs -f kubeadm-aio

View File

@ -0,0 +1,2 @@
KUBE_CNI=calico
CNI_POD_CIDR=192.168.0.0/16

View File

@ -0,0 +1,3 @@
# If KUBE_ROLE is set 'master' kubeadm-aio will set this node up to be a master
# node, otherwise if 'worker', will join an existing cluster.
KUBE_ROLE=master

View File

@ -0,0 +1,3 @@
# If KUBE_VERSION is set 'default' kubeadm will use the default version of K8s
# otherwise the version specified here will be used.
KUBE_VERSION=default

View File

@ -0,0 +1 @@
KUBEADM_JOIN_ARGS="no_command_supplied"

View File

@ -0,0 +1,4 @@
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
apiServerExtraArgs:
runtime-config: "batch/v2alpha1=true"

View File

@ -0,0 +1,3 @@
# If KUBE_BIND_DEV is set to 'autodetect' we will use kubeadm's autodetect logic
# otherwise use the device specified to find the IP address to bind to.
KUBE_BIND_DEV=autodetect

View File

@ -0,0 +1,3 @@
# If KUBELET_CONTAINER is set 'to_this' one we will not attempt to launch a new
# container for the kubelet process, otherwise use the image tag specified
KUBELET_CONTAINER=this_one

View File

@ -0,0 +1,54 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
echo 'Checking cgroups'
if ls -dZ /sys/fs/cgroup | grep -q :svirt_sandbox_file_t: ; then
echo 'Invocation error: use -v /sys/fs/cgroup:/sys/fs/cgroup:ro parameter to docker run.'
exit 1
fi
echo 'Setting up K8s version to deploy'
: ${KUBE_VERSION:="default"}
sed -i "s|KUBE_VERSION=.*|KUBE_VERSION=${KUBE_VERSION}|g" /etc/kube-version
echo 'Setting up device to use for kube-api'
: ${KUBE_BIND_DEV:="autodetect"}
sed -i "s|KUBE_BIND_DEV=.*|KUBE_BIND_DEV=${KUBE_BIND_DEV}|g" /etc/kubeapi-device
echo 'Setting up container image to use for kubelet'
: ${KUBELET_CONTAINER:="this_one"}
sed -i "s|KUBELET_CONTAINER=.*|KUBELET_CONTAINER=${KUBELET_CONTAINER}|g" /etc/kubelet-container
echo 'Setting whether this node is a master, or slave, K8s node'
: ${KUBE_ROLE:="master"}
sed -i "s|KUBE_ROLE=.*|KUBE_ROLE=${KUBE_ROLE}|g" /etc/kube-role
echo 'Setting any kubeadm join commands'
: ${KUBEADM_JOIN_ARGS:="no_command_supplied"}
sed -i "s|KUBEADM_JOIN_ARGS=.*|KUBEADM_JOIN_ARGS=\"${KUBEADM_JOIN_ARGS}\"|g" /etc/kubeadm-join-command-args
echo 'Setting CNI pod CIDR'
: ${CNI_POD_CIDR:="192.168.0.0/16"}
sed -i "s|192.168.0.0/16|${CNI_POD_CIDR}|g" /opt/cni-manifests/*.yaml
sed -i "s|CNI_POD_CIDR=.*|CNI_POD_CIDR=\"${CNI_POD_CIDR}\"|g" /etc/kube-cni
echo 'Setting CNI '
: ${KUBE_CNI:="calico"}
sed -i "s|KUBE_CNI=.*|KUBE_CNI=\"${KUBE_CNI}\"|g" /etc/kube-cni
echo 'Starting Systemd'
exec /bin/systemd --system

View File

@ -0,0 +1,365 @@
# Calico Version v2.1.4
# http://docs.projectcalico.org/v2.1/releases#v2.1.4
# This manifest includes the following component versions:
# calico/node:v1.1.3
# calico/cni:v1.7.0
# calico/kube-policy-controller:v0.5.4
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# The location of your etcd cluster. This uses the Service clusterIP
# defined below.
etcd_endpoints: "http://10.96.232.136:6666"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
---
# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
# to force it to run on the master even when the master isn't schedulable, and uses
# nodeSelector to ensure it only runs on the master.
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: calico-etcd
namespace: kube-system
labels:
k8s-app: calico-etcd
spec:
template:
metadata:
labels:
k8s-app: calico-etcd
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# Only run this pod on the master.
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
nodeSelector:
node-role.kubernetes.io/master: ""
hostNetwork: true
containers:
- name: calico-etcd
image: gcr.io/google_containers/etcd:2.2.1
env:
- name: CALICO_ETCD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command: ["/bin/sh","-c"]
args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
volumeMounts:
- name: var-etcd
mountPath: /var/etcd
volumes:
- name: var-etcd
hostPath:
path: /var/etcd
---
# This manfiest installs the Service which gets traffic to the Calico
# etcd.
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: calico-etcd
name: calico-etcd
namespace: kube-system
spec:
# Select the calico-etcd pod running on the master.
selector:
k8s-app: calico-etcd
# This ClusterIP needs to be known in advance, since we cannot rely
# on DNS to get access to etcd.
clusterIP: 10.96.232.136
ports:
- port: 6666
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-cni-plugin
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v1.1.3
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Enable BGP. Disable to enforce policy only.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: "192.168.0.0/16"
- name: CALICO_IPV4POOL_IPIP
value: "always"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Auto-detect the BGP IP address.
- name: IP
value: ""
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.7.0
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy-controller
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-policy-controller
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-policy-controller:v0.5.4
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-cni-plugin
subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cni-plugin
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- pods
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-policy-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-policy-controller
subjects:
- kind: ServiceAccount
name: calico-policy-controller
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-policy-controller
namespace: kube-system
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
verbs:
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-policy-controller
namespace: kube-system

View File

@ -0,0 +1,329 @@
# Calico Roles
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: canal
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- thirdpartyresources
verbs:
- create
- get
- list
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["projectcalico.org"]
resources:
- globalconfigs
verbs:
- create
- get
- list
- update
- watch
- apiGroups: ["projectcalico.org"]
resources:
- ippools
verbs:
- create
- delete
- get
- list
- update
- watch
---
# Flannel roles
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: canal
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: canal
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
# This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"hostname": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "192.168.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
labels:
k8s-app: canal
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
serviceAccountName: canal
tolerations:
# Allow the pod to run on the master. This is required for
# the master to communicate with pods.
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Mark the pod as a critical add-on for rescheduling.
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v1.2.1
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Enable felix logging.
- name: FELIX_LOGSEVERITYSYS
value: "info"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# Disable IPV6 support in Felix.
- name: FELIX_IPV6SUPPORT
value: "false"
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: WAIT_FOR_DATASTORE
value: "true"
# No IP address needed.
- name: IP
value: ""
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.8.3
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.8.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used by flannel.
- name: run
hostPath:
path: /run
- name: flannel-cfg
configMap:
name: canal-config
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system

View File

@ -0,0 +1,94 @@
#https://raw.githubusercontent.com/coreos/flannel/v0.8.0/Documentation/kube-flannel.yml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
net-conf.json: |
{
"Network": "192.168.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: flannel
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.8.0-amd64
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: quay.io/coreos/flannel:v0.8.0-amd64
command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

View File

@ -0,0 +1,187 @@
# curl --location "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"server-version": "master-c3b4969",
"original-request": {
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
namespace: kube-system
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"server-version": "master-c3b4969",
"original-request": {
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"server-version": "master-c3b4969",
"original-request": {
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
- apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"server-version": "master-c3b4969",
"original-request": {
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
namespace: kube-system
spec:
template:
metadata:
labels:
name: weave-net
spec:
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: WEAVE_MTU
value: '1337'
- name: IPALLOC_RANGE
value: 192.168.0.0/16
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-kube:2.0.1'
imagePullPolicy: Always
livenessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
initialDelaySeconds: 30
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: weave-npc
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-npc:2.0.1'
imagePullPolicy: Always
resources:
requests:
cpu: 10m
securityContext:
privileged: true
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
updateStrategy:
type: RollingUpdate

View File

@ -0,0 +1,73 @@
kind: Service
apiVersion: v1
metadata:
name: nfs-provisioner
labels:
app: nfs-provisioner
spec:
ports:
- name: nfs
port: 2049
- name: mountd
port: 20048
- name: rpcbind
port: 111
- name: rpcbind-udp
port: 111
protocol: UDP
selector:
app: nfs-provisioner
---
kind: Deployment
apiVersion: apps/v1beta1
metadata:
name: nfs-provisioner
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-provisioner
spec:
containers:
- name: nfs-provisioner
image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.7
ports:
- name: nfs
containerPort: 2049
- name: mountd
containerPort: 20048
- name: rpcbind
containerPort: 111
- name: rpcbind-udp
containerPort: 111
protocol: UDP
securityContext:
capabilities:
add:
- DAC_READ_SEARCH
- SYS_RESOURCE
args:
- "-provisioner=example.com/nfs"
- "-grace-period=10"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICE_NAME
value: nfs-provisioner
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: export-volume
mountPath: /export
volumes:
- name: export-volume
hostPath:
path: /var/lib/nfs-provisioner

View File

@ -0,0 +1,5 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: general
provisioner: example.com/nfs

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1alpha1
kind: ClusterRoleBinding
metadata:
name: cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: Group
name: system:masters
- kind: Group
name: system:authenticated
- kind: Group
name: system:unauthenticated

View File

@ -0,0 +1,102 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
# Setup shared mounts for kubelet
sudo mkdir -p /var/lib/kubelet
sudo mount --bind /var/lib/kubelet /var/lib/kubelet
sudo mount --make-shared /var/lib/kubelet
# Cleanup any old deployment
sudo docker rm -f kubeadm-aio || true
sudo docker rm -f kubelet || true
sudo docker ps -aq | xargs -r -l1 -P16 sudo docker rm -f
sudo rm -rfv \
/etc/cni/net.d \
/etc/kubernetes \
/var/lib/etcd \
/var/etcd \
/var/lib/kubelet/* \
/run/openvswitch \
${HOME}/.kubeadm-aio/admin.conf \
/var/lib/nfs-provisioner || true
: ${KUBE_CNI:="calico"}
: ${CNI_POD_CIDR:="192.168.0.0/16"}
# Launch Container, refer to:
# https://docs.docker.com/engine/reference/run/
sudo docker run \
-dt \
--name=kubeadm-aio \
--net=host \
--security-opt=seccomp:unconfined \
--cap-add=SYS_ADMIN \
--tmpfs=/run \
--tmpfs=/run/lock \
--volume=/etc/machine-id:/etc/machine-id:ro \
--volume=${HOME}:${HOME}:rw \
--volume=${HOME}/.kubeadm-aio:/root:rw \
--volume=/etc/kubernetes:/etc/kubernetes:rw \
--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro \
--volume=/var/run/docker.sock:/run/docker.sock \
--env KUBELET_CONTAINER=${KUBEADM_IMAGE} \
--env KUBE_VERSION=${KUBE_VERSION} \
--env KUBE_CNI=${KUBE_CNI} \
--env CNI_POD_CIDR=${CNI_POD_CIDR} \
${KUBEADM_IMAGE}
echo "Waiting for kubeconfig"
set +x
end=$(($(date +%s) + 240))
READY="False"
while true; do
if [ -f ${HOME}/.kubeadm-aio/admin.conf ]; then
READY="True"
fi
[ $READY == "True" ] && break || true
sleep 1
now=$(date +%s)
[ $now -gt $end ] && \
echo "KubeADM did not generate kubectl config in time" && \
sudo docker logs kubeadm-aio && exit -1
done
set -x
# Set perms of kubeconfig and set env-var
sudo chown $(id -u):$(id -g) ${HOME}/.kubeadm-aio/admin.conf
export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf
echo "Waiting for node to be ready before continuing"
set +x
end=$(($(date +%s) + 240))
READY="False"
while true; do
READY=$(kubectl get nodes --no-headers=true | awk "{ print \$2 }" | head -1)
[ $READY == "Ready" ] && break || true
sleep 1
now=$(date +%s)
[ $now -gt $end ] && \
echo "Kube node did not register as ready in time" && \
sudo docker logs kubeadm-aio && exit -1
done
set -x
: ${PVC_BACKEND:="nfs"}
if [ "$PVC_BACKEND" == "nfs" ]; then
# Deploy NFS provisioner into enviromment
sudo docker exec kubeadm-aio openstack-helm-nfs-prep
fi