Merge "devstack: add openshift support"
This commit is contained in:
commit
7b25f43dc1
|
@ -518,3 +518,154 @@ spec:
|
|||
path: /etc
|
||||
EOF
|
||||
}
|
||||
|
||||
# install_openshift_binary
|
||||
# Description: Fetches the configured binary release of OpenShift and
|
||||
# installs it in the system
|
||||
function install_openshift_binary {
|
||||
mkdir -p "$OPENSHIFT_HOME"
|
||||
wget "$OPENSHIFT_BINARY_URL" -O "${OPENSHIFT_HOME}/openshift.tar.gz"
|
||||
tar xzvf "${OPENSHIFT_HOME}/openshift.tar.gz" --strip 1 -C "$OPENSHIFT_HOME"
|
||||
|
||||
# Make openshift run from its untarred directory
|
||||
cat << EOF | sudo tee /usr/local/bin/openshift
|
||||
#!/bin/bash
|
||||
cd ${OPENSHIFT_HOME}
|
||||
exec ./openshift "\$@"
|
||||
EOF
|
||||
sudo chmod a+x /usr/local/bin/openshift
|
||||
|
||||
# Make oc easily available
|
||||
cat << EOF | sudo tee /usr/local/bin/oc
|
||||
#!/bin/bash
|
||||
CURL_CA_BUNDLE=${OPENSHIFT_DATA_DIR}/ca.crt \
|
||||
KUBECONFIG=${OPENSHIFT_DATA_DIR}/admin.kubeconfig \
|
||||
${OPENSHIFT_HOME}/oc "\$@"
|
||||
EOF
|
||||
sudo chmod a+x /usr/local/bin/oc
|
||||
|
||||
# Make kubectl easily available
|
||||
cat << EOF | sudo tee /usr/local/bin/kubectl
|
||||
#!/bin/bash
|
||||
CURL_CA_BUNDLE=${OPENSHIFT_DATA_DIR}/ca.crt \
|
||||
KUBECONFIG=${OPENSHIFT_DATA_DIR}/admin.kubeconfig \
|
||||
${OPENSHIFT_HOME}/kubectl "\$@"
|
||||
EOF
|
||||
sudo chmod a+x /usr/local/bin/kubectl
|
||||
|
||||
# Make oadm easily available
|
||||
cat << EOF | sudo tee /usr/local/bin/oadm
|
||||
#!/bin/bash
|
||||
CURL_CA_BUNDLE=${OPENSHIFT_DATA_DIR}/ca.crt \
|
||||
KUBECONFIG=${OPENSHIFT_DATA_DIR}/admin.kubeconfig \
|
||||
${OPENSHIFT_HOME}/oadm "\$@"
|
||||
EOF
|
||||
sudo chmod a+x /usr/local/bin/oadm
|
||||
}
|
||||
|
||||
# run_openshift_master
|
||||
# Description: Starts the openshift master
|
||||
function run_openshift_master {
|
||||
local cmd
|
||||
local pod_subnet_cidr
|
||||
local service_subnet_cidr
|
||||
|
||||
sudo install -d -o "$STACK_USER" "$OPENSHIFT_DATA_DIR"
|
||||
|
||||
pod_subnet_cidr=$(openstack --os-cloud devstack-admin \
|
||||
--os-region "$REGION_NAME" \
|
||||
subnet show "$KURYR_NEUTRON_DEFAULT_POD_SUBNET" \
|
||||
-c cidr -f value)
|
||||
service_subnet_cidr=$(openstack --os-cloud devstack-admin \
|
||||
--os-region "$REGION_NAME" \
|
||||
subnet show "$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \
|
||||
-c cidr -f value)
|
||||
|
||||
# Generate master config
|
||||
"${OPENSHIFT_HOME}/openshift" start master \
|
||||
"--etcd=${KURYR_ETCD_ADVERTISE_CLIENT_URL}" \
|
||||
"--network-cidr=${pod_subnet_cidr}" \
|
||||
"--portal-net=${service_subnet_cidr}" \
|
||||
"--listen=${OPENSHIFT_API_URL}" \
|
||||
"--master=${OPENSHIFT_API_URL}" \
|
||||
"--write-config=${OPENSHIFT_DATA_DIR}"
|
||||
|
||||
# Reconfigure Kuryr-Kubernetes to use the certs generated
|
||||
iniset "$KURYR_CONFIG" kubernetes api_root "$OPENSHIFT_API_URL"
|
||||
iniset "$KURYR_CONFIG" kubernetes ssl_client_crt_file "${OPENSHIFT_DATA_DIR}/admin.crt"
|
||||
iniset "$KURYR_CONFIG" kubernetes ssl_client_key_file "${OPENSHIFT_DATA_DIR}/admin.key"
|
||||
iniset "$KURYR_CONFIG" kubernetes ssl_ca_crt_file "${OPENSHIFT_DATA_DIR}/ca.crt"
|
||||
|
||||
sudo chown "${STACK_USER}:${STACK_USER}" -R "$OPENSHIFT_DATA_DIR"
|
||||
|
||||
# Generate kubelet kubeconfig
|
||||
"${OPENSHIFT_HOME}/oadm" create-kubeconfig \
|
||||
"--client-key=${OPENSHIFT_DATA_DIR}/master.kubelet-client.key" \
|
||||
"--client-certificate=${OPENSHIFT_DATA_DIR}/master.kubelet-client.crt" \
|
||||
"--certificate-authority=${OPENSHIFT_DATA_DIR}/ca.crt" \
|
||||
"--master=${OPENSHIFT_API_URL}" \
|
||||
"--kubeconfig=${OPENSHIFT_DATA_DIR}/master.kubelet-client.kubeconfig"
|
||||
|
||||
cmd="/usr/local/bin/openshift start master \
|
||||
--config=${OPENSHIFT_DATA_DIR}/master-config.yaml"
|
||||
|
||||
wait_for "etcd" "${KURYR_ETCD_ADVERTISE_CLIENT_URL}/v2/machines"
|
||||
|
||||
if [[ "$USE_SYSTEMD" = "True" ]]; then
|
||||
# If systemd is being used, proceed as normal
|
||||
run_process openshift-master "$cmd" root root
|
||||
else
|
||||
# If screen is being used, there is a possibility that the devstack
|
||||
# environment is on a stable branch. Older versions of run_process have
|
||||
# a different signature. Sudo is used as a workaround that works in
|
||||
# both older and newer versions of devstack.
|
||||
run_process openshift-master "sudo $cmd"
|
||||
fi
|
||||
}
|
||||
|
||||
# make_admin_cluster_admin
|
||||
# Description: Gives the system:admin permissions over the cluster
|
||||
function make_admin_cluster_admin {
|
||||
wait_for "OpenShift API Server" "$OPENSHIFT_API_URL" \
|
||||
"${OPENSHIFT_DATA_DIR}/ca.crt"
|
||||
/usr/local/bin/oadm policy add-cluster-role-to-user cluster-admin admin \
|
||||
"--config=${OPENSHIFT_DATA_DIR}/openshift-master.kubeconfig"
|
||||
}
|
||||
|
||||
# run_openshift_node
|
||||
# Description: Starts the openshift node
|
||||
function run_openshift_node {
|
||||
local command
|
||||
|
||||
#install required CNI loopback driver
|
||||
curl -L "$OPENSHIFT_CNI_BINARY_URL" | sudo tar -C "$CNI_BIN_DIR" -xzvf - ./loopback
|
||||
command="/usr/local/bin/openshift start node \
|
||||
--kubeconfig=${OPENSHIFT_DATA_DIR}/master.kubelet-client.kubeconfig \
|
||||
--enable=kubelet,plugins \
|
||||
--network-plugin=cni"
|
||||
|
||||
# Link master config necessary for bootstrapping
|
||||
# TODO: This needs to be generated so we don't depend on it on multinode
|
||||
mkdir -p "${OPENSHIFT_HOME}/openshift.local.config"
|
||||
ln -fs "${OPENSHIFT_DATA_DIR}" "${OPENSHIFT_HOME}/openshift.local.config/master"
|
||||
|
||||
# Link stack CNI to location expected by openshift node
|
||||
sudo mkdir -p /etc/cni
|
||||
sudo rm -fr /etc/cni/net.d
|
||||
sudo rm -fr /opt/cni/bin
|
||||
sudo ln -fs "${CNI_CONF_DIR}" /etc/cni/net.d
|
||||
sudo mkdir -p /opt/cni
|
||||
sudo ln -fs "${CNI_BIN_DIR}" /opt/cni/bin
|
||||
|
||||
|
||||
if [[ "$USE_SYSTEMD" = "True" ]]; then
|
||||
# If systemd is being used, proceed as normal
|
||||
run_process openshift-node "$command" root root
|
||||
else
|
||||
# If screen is being used, there is a possibility that the devstack
|
||||
# environment is on a stable branch. Older versions of run_process have
|
||||
# a different signature. Sudo is used as a workaround that works in
|
||||
# both older and newer versions of devstack.
|
||||
run_process openshift-node "sudo $command"
|
||||
fi
|
||||
}
|
||||
|
|
|
@ -0,0 +1,220 @@
|
|||
[[local|localrc]]
|
||||
|
||||
enable_plugin kuryr-kubernetes \
|
||||
https://git.openstack.org/openstack/kuryr-kubernetes
|
||||
|
||||
# If you do not want stacking to clone new versions of the enabled services,
|
||||
# like for example when you did local modifications and need to ./unstack.sh
|
||||
# and ./stack.sh again, uncomment the following
|
||||
# RECLONE="no"
|
||||
|
||||
# Log settings for better readability
|
||||
LOGFILE=devstack.log
|
||||
LOG_COLOR=False
|
||||
# If you want the screen tabs logged in a specific location, you can use:
|
||||
# SCREEN_LOGDIR="${HOME}/devstack_logs"
|
||||
|
||||
|
||||
# Credentials
|
||||
ADMIN_PASSWORD=pass
|
||||
DATABASE_PASSWORD=pass
|
||||
RABBIT_PASSWORD=pass
|
||||
SERVICE_PASSWORD=pass
|
||||
SERVICE_TOKEN=pass
|
||||
# Enable Keystone v3
|
||||
IDENTITY_API_VERSION=3
|
||||
|
||||
# In pro of speed and being lightweight, we will be explicit in regards to
|
||||
# which services we enable
|
||||
ENABLED_SERVICES=""
|
||||
|
||||
# Neutron services
|
||||
enable_service neutron
|
||||
enable_service q-agt
|
||||
enable_service q-dhcp
|
||||
enable_service q-l3
|
||||
enable_service q-svc
|
||||
|
||||
# OCTAVIA
|
||||
KURYR_K8S_LBAAS_USE_OCTAVIA=True
|
||||
# Uncomment it to use L2 communication between loadbalancer and member pods
|
||||
# KURYR_K8S_OCTAVIA_MEMBER_MODE=L2
|
||||
|
||||
if [[ "$KURYR_K8S_LBAAS_USE_OCTAVIA" == "True" ]]; then
|
||||
# Octavia LBaaSv2
|
||||
LIBS_FROM_GIT+=python-octaviaclient
|
||||
enable_plugin octavia https://git.openstack.org/openstack/octavia
|
||||
enable_service octavia
|
||||
enable_service o-api
|
||||
enable_service o-cw
|
||||
enable_service o-hm
|
||||
enable_service o-hk
|
||||
## Octavia Deps
|
||||
### Image
|
||||
### Barbican
|
||||
enable_plugin barbican https://git.openstack.org/openstack/barbican
|
||||
### Nova
|
||||
enable_service n-api
|
||||
enable_service n-api-meta
|
||||
enable_service n-cpu
|
||||
enable_service n-cond
|
||||
enable_service n-sch
|
||||
enable_service placement-api
|
||||
enable_service placement-client
|
||||
### Glance
|
||||
enable_service g-api
|
||||
enable_service g-reg
|
||||
### Neutron-lbaas
|
||||
#### In case Octavia is older than Pike, neutron-lbaas is needed
|
||||
enable_plugin neutron-lbaas \
|
||||
git://git.openstack.org/openstack/neutron-lbaas
|
||||
enable_service q-lbaasv2
|
||||
else
|
||||
# LBaaSv2 service and Haproxy agent
|
||||
enable_plugin neutron-lbaas \
|
||||
git://git.openstack.org/openstack/neutron-lbaas
|
||||
enable_service q-lbaasv2
|
||||
fi
|
||||
|
||||
|
||||
# Keystone
|
||||
enable_service key
|
||||
|
||||
# dependencies
|
||||
enable_service mysql
|
||||
enable_service rabbit
|
||||
|
||||
# By default use all the services from the kuryr-kubernetes plugin
|
||||
|
||||
# Docker
|
||||
# ======
|
||||
# If you already have docker configured, running and with its socket writable
|
||||
# by the stack user, you can omit the following line.
|
||||
enable_plugin devstack-plugin-container https://git.openstack.org/openstack/devstack-plugin-container
|
||||
|
||||
# Etcd
|
||||
# ====
|
||||
# The default is for devstack to run etcd for you.
|
||||
enable_service etcd3
|
||||
|
||||
# You can also run the deprecated etcd containerized and select the image and
|
||||
# version of it by commenting the etcd3 service enablement and uncommenting
|
||||
#
|
||||
# enable legacy_etcd
|
||||
#
|
||||
# You can also modify the following defaults.
|
||||
# KURYR_ETCD_IMAGE="quay.io/coreos/etcd"
|
||||
# KURYR_ETCD_VERSION="v3.0.8"
|
||||
#
|
||||
# You can select the listening and advertising client and peering Etcd
|
||||
# addresses by uncommenting and changing from the following defaults:
|
||||
# KURYR_ETCD_ADVERTISE_CLIENT_URL=http://my_host_ip:2379}
|
||||
# KURYR_ETCD_ADVERTISE_PEER_URL=http://my_host_ip:2380}
|
||||
# KURYR_ETCD_LISTEN_CLIENT_URL=http://0.0.0.0:2379}
|
||||
# KURYR_ETCD_LISTEN_PEER_URL=http://0.0.0.0:2380}
|
||||
#
|
||||
# If you already have an etcd cluster configured and running, you can just
|
||||
# comment out the lines enabling legacy_etcd and etcd3
|
||||
# then uncomment and set the following line:
|
||||
# KURYR_ETCD_CLIENT_URL="http://etcd_ip:etcd_client_port"
|
||||
|
||||
# OpenShift
|
||||
# ==========
|
||||
#
|
||||
# OpenShift is run from the binaries conained in a binary release tarball
|
||||
enable_service openshift-master
|
||||
enable_service openshift-node
|
||||
|
||||
# OpenShift node uses systemd as its cgroup driver. Thus we need Docker to
|
||||
# use the same.
|
||||
DOCKER_CGROUP_DRIVER="systemd"
|
||||
|
||||
# We default to the 3.6 release, but you should be able to replace with other
|
||||
# releases by redefining the following
|
||||
# OPENSHIFT_BINARY_URL=https://github.com/openshift/origin/releases/download/v3.6.0/openshift-origin-server-v3.6.0-c4dd4cf-linux-64bit.tar.gz
|
||||
#
|
||||
# If you want to test with a different range for the Cluster IPs uncomment and
|
||||
# set the following ENV var to a different CIDR
|
||||
# KURYR_K8S_CLUSTER_IP_RANGE="10.0.0.0/24"
|
||||
#
|
||||
# If, however, you are reusing an existing deployment, you should uncomment and
|
||||
# set an ENV var so that the Kubelet devstack runs can find the API server:
|
||||
# OPENSHIFT_API_URL="http (or https, if OpenShift is SSL/TLS enabled)://openshift_api_ip:openshift_api_port"
|
||||
#
|
||||
# Since OpenShift defaults to its API server being 'https' enabled, set path of
|
||||
# the ssl cert files if you are reusing an environment, otherwise devstack will
|
||||
# do it for you.
|
||||
# KURYR_K8S_API_CERT="/etc/origin/master/kuryr.crt"
|
||||
# KURYR_K8S_API_KEY="/etc/origin/master/kuryr.key"
|
||||
# KURYR_K8S_API_CACERT="/etc/origin/master/ca.crt"
|
||||
|
||||
# Kuryr watcher
|
||||
# =============
|
||||
#
|
||||
# Just like the Kubelet, you'll want to have the watcher enabled. It is the
|
||||
# part of the codebase that connects to the Kubernetes API server to read the
|
||||
# resource events and convert them to Neutron actions
|
||||
enable_service kuryr-kubernetes
|
||||
|
||||
# Containerized Kuryr
|
||||
# ===================
|
||||
#
|
||||
# Kuryr can be installed on Kubernetes as a pair of Deployment
|
||||
# (kuryr-controller) and DaemonSet (kuryr-cni). If you want DevStack to deploy
|
||||
# Kuryr services as pods on Kubernetes uncomment next line.
|
||||
# KURYR_K8S_CONTAINERIZED_DEPLOYMENT=True
|
||||
|
||||
# Kuryr POD VIF Driver
|
||||
# ====================
|
||||
#
|
||||
# Set up the VIF Driver to be used. The default one is the neutron-vif, but if
|
||||
# a nested deployment is desired, the corresponding driver need to be set,
|
||||
# e.g.: nested-vlan or nested-macvlan
|
||||
# KURYR_POD_VIF_DRIVER=neutron-vif
|
||||
|
||||
# Kuryr Ports Pools
|
||||
# =================
|
||||
#
|
||||
# To speed up containers boot time the kuryr ports pool driver can be enabled
|
||||
# by uncommenting the next line, so that neutron port resources are precreated
|
||||
# and ready to be used by the pods when needed
|
||||
# KURYR_USE_PORTS_POOLS=True
|
||||
#
|
||||
# By default the pool driver is noop, i.e., there is no pool. If pool
|
||||
# optimizations want to be used you need to set it to 'neutron' for the
|
||||
# baremetal case, or to 'nested' for the nested case
|
||||
# KURYR_VIF_POOL_DRIVER=noop
|
||||
#
|
||||
# There are extra configuration options for the pools that can be set to decide
|
||||
# on the minimum number of ports that should be ready to use at each pool, the
|
||||
# maximum (0 to unset), and the batch size for the repopulation actions, i.e.,
|
||||
# the number of neutron ports to create in bulk operations. Finally, the update
|
||||
# frequency between actions over the pool can be set too
|
||||
# KURYR_VIF_POOL_MIN=5
|
||||
# KURYR_VIF_POOL_MAX=0
|
||||
# KURYR_VIF_POOL_BATCH=10
|
||||
# KURYR_VIF_POOL_UPDATE_FREQ=20
|
||||
|
||||
# Kuryr VIF Pool Manager
|
||||
# ======================
|
||||
#
|
||||
# Uncomment the next line to enable the pool manager. Note it requires the
|
||||
# nested-vlan pod vif driver, as well as the ports pool being enabled and
|
||||
# configured with the nested driver
|
||||
# KURYR_VIF_POOL_MANAGER=True
|
||||
|
||||
# Increase Octavia amphorae timeout so that the first LB amphora has time to
|
||||
# build and boot
|
||||
if [[ "$KURYR_K8S_LBAAS_USE_OCTAVIA" == "True" ]]; then
|
||||
IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
|
||||
else
|
||||
NEUTRON_LBAAS_SERVICE_PROVIDERV2="LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default"
|
||||
fi
|
||||
|
||||
[[post-config|$OCTAVIA_CONF]]
|
||||
[controller_worker]
|
||||
amp_active_retries=9999
|
||||
|
||||
[[post-config|/$Q_PLUGIN_CONF_FILE]]
|
||||
[securitygroup]
|
||||
firewall_driver = openvswitch
|
|
@ -203,10 +203,17 @@ function create_k8s_api_service {
|
|||
while [[ "$(_lb_state $lb_name)" != "ACTIVE" ]]; do
|
||||
sleep 1
|
||||
done
|
||||
neutron lbaas-member-create --subnet public-subnet \
|
||||
--address "${HOST_IP}" \
|
||||
--protocol-port 6443 \
|
||||
default/kubernetes:443
|
||||
if is_service_enabled openshift-master; then
|
||||
neutron lbaas-member-create --subnet public-subnet \
|
||||
--address "${HOST_IP}" \
|
||||
--protocol-port 8443 \
|
||||
default/kubernetes:443
|
||||
else
|
||||
neutron lbaas-member-create --subnet public-subnet \
|
||||
--address "${HOST_IP}" \
|
||||
--protocol-port 6443 \
|
||||
default/kubernetes:443
|
||||
fi
|
||||
}
|
||||
|
||||
function configure_neutron_defaults {
|
||||
|
@ -355,12 +362,21 @@ function prepare_kubernetes_files {
|
|||
function wait_for {
|
||||
local name
|
||||
local url
|
||||
local cacert_path
|
||||
local flags
|
||||
name="$1"
|
||||
url="$2"
|
||||
cacert_path=${3:-}
|
||||
|
||||
echo -n "Waiting for $name to respond"
|
||||
|
||||
until curl -o /dev/null -sIf "$url"; do
|
||||
if [ $# == 3 ]; then
|
||||
extra_flags="--cacert ${cacert_path}"
|
||||
else
|
||||
extra_flags=""
|
||||
fi
|
||||
|
||||
until curl -o /dev/null -sIf $extra_flags "$url"; do
|
||||
echo -n "."
|
||||
sleep 1
|
||||
done
|
||||
|
@ -511,7 +527,13 @@ function run_k8s_kubelet {
|
|||
|
||||
function run_kuryr_kubernetes {
|
||||
local python_bin=$(which python)
|
||||
wait_for "Kubernetes API Server" "$KURYR_K8S_API_URL"
|
||||
|
||||
if is_service_enabled openshift-master; then
|
||||
wait_for "OpenShift API Server" "$OPENSHIFT_API_URL" \
|
||||
"${OPENSHIFT_DATA_DIR}/ca.crt"
|
||||
else
|
||||
wait_for "Kubernetes API Server" "$KURYR_K8S_API_URL"
|
||||
fi
|
||||
run_process kuryr-kubernetes \
|
||||
"$python_bin ${KURYR_HOME}/scripts/run_server.py \
|
||||
--config-file $KURYR_CONFIG"
|
||||
|
@ -523,7 +545,7 @@ source $DEST/kuryr-kubernetes/devstack/lib/kuryr_kubernetes
|
|||
# main loop
|
||||
if [[ "$1" == "stack" && "$2" == "install" ]]; then
|
||||
setup_develop "$KURYR_HOME"
|
||||
if is_service_enabled kubelet; then
|
||||
if is_service_enabled kubelet || is_service_enabled openshift-node; then
|
||||
KURYR_K8S_CONTAINERIZED_DEPLOYMENT=$(trueorfalse False KURYR_K8S_CONTAINERIZED_DEPLOYMENT)
|
||||
if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "False" ]; then
|
||||
install_kuryr_cni
|
||||
|
@ -566,8 +588,29 @@ if [[ "$1" == "stack" && "$2" == "extra" ]]; then
|
|||
run_etcd_legacy
|
||||
fi
|
||||
|
||||
get_container "$KURYR_HYPERKUBE_IMAGE" "$KURYR_HYPERKUBE_VERSION"
|
||||
prepare_kubernetes_files
|
||||
# FIXME(apuimedo): Allow running only openshift node for multinode devstack
|
||||
# We are missing generating a node config so that it does not need to
|
||||
# bootstrap from the master config.
|
||||
if is_service_enabled openshift-master || is_service_enabled openshift-node; then
|
||||
install_openshift_binary
|
||||
fi
|
||||
if is_service_enabled openshift-master; then
|
||||
run_openshift_master
|
||||
make_admin_cluster_admin
|
||||
fi
|
||||
if is_service_enabled openshift-node; then
|
||||
prepare_kubelet
|
||||
run_openshift_node
|
||||
fi
|
||||
|
||||
if is_service_enabled kubernetes-api \
|
||||
|| is_service_enabled kubernetes-controller-manager \
|
||||
|| is_service_enabled kubernetes-scheduler \
|
||||
|| is_service_enabled kubelet; then
|
||||
get_container "$KURYR_HYPERKUBE_IMAGE" "$KURYR_HYPERKUBE_VERSION"
|
||||
prepare_kubernetes_files
|
||||
fi
|
||||
|
||||
if is_service_enabled kubernetes-api; then
|
||||
run_k8s_api
|
||||
fi
|
||||
|
@ -635,6 +678,12 @@ if [[ "$1" == "unstack" ]]; then
|
|||
if is_service_enabled kubernetes-api; then
|
||||
stop_container kubernetes-api
|
||||
fi
|
||||
if is_service_enabled openshift-master; then
|
||||
stop_process openshift-master
|
||||
fi
|
||||
if is_service_enabled openshift-node; then
|
||||
stop_process openshift-node
|
||||
fi
|
||||
if is_service_enabled legacy_etcd; then
|
||||
stop_container etcd
|
||||
fi
|
||||
|
|
|
@ -43,6 +43,13 @@ KURYR_K8S_API_KEY=${KURYR_K8S_API_KEY:-}
|
|||
KURYR_K8S_API_CACERT=${KURYR_K8S_API_CACERT:-}
|
||||
KURYR_PORT_DEBUG=${KURYR_PORT_DEBUG:-True}
|
||||
|
||||
# OpenShift
|
||||
OPENSHIFT_BINARY_URL=${OPENSHIFT_BINARY_URL:-https://github.com/openshift/origin/releases/download/v3.6.0/openshift-origin-server-v3.6.0-c4dd4cf-linux-64bit.tar.gz}
|
||||
OPENSHIFT_HOME=${OPENSHIFT_HOME:-$DEST/openshift}
|
||||
OPENSHIFT_DATA_DIR=${OPENSHIFT_DATA_DIR:-${DATA_DIR}/openshift}
|
||||
OPENSHIFT_API_URL=${OPENSHIFT_API_URL:-https://${HOST_IP}:8443}
|
||||
OPENSHIFT_CNI_BINARY_URL=${OPENSHIFT_CNI_BINARY_URL:-https://github.com/containernetworking/cni/releases/download/v0.5.2/cni-v0.5.2.tgz}
|
||||
|
||||
# Octavia
|
||||
KURYR_K8S_LBAAS_USE_OCTAVIA=${KURYR_K8S_LBAAS_USE_OCTAVIA:-True}
|
||||
KURYR_K8S_OCTAVIA_MEMBER_MODE=${KURYR_K8S_OCTAVIA_MEMBER_MODE:-L3}
|
||||
|
|
Loading…
Reference in New Issue