Gate: Update multinode scripts

This PS updates the multinode deploy scripts to show output in the
gates, stop proxying the kubedns service, and use a pvc to back mariadb.

Change-Id: I78caf3f15e4c5ca33eaa1e592e8df958f13be90b
This commit is contained in:
portdirect 2018-02-08 11:18:07 -05:00
parent 61eb170f48
commit b97d6ffc61
16 changed files with 107 additions and 95 deletions

View File

@ -17,9 +17,8 @@
set -xe
#NOTE: Deploy command
helm install ./memcached \
--namespace=openstack \
--name=memcached
helm upgrade --install memcached ./memcached \
--namespace=openstack
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack

View File

@ -19,7 +19,8 @@ set -xe
sudo -H -E pip install python-openstackclient python-heatclient
sudo -H mkdir -p /etc/openstack
cat << EOF | sudo -H tee -a /etc/openstack/clouds.yaml
sudo -H chown -R $(id -un): /etc/openstack
tee /etc/openstack/clouds.yaml << EOF
clouds:
openstack_helm:
region_name: RegionOne
@ -32,7 +33,6 @@ clouds:
user_domain_name: 'default'
auth_url: 'http://keystone.openstack.svc.cluster.local/v3'
EOF
sudo -H chown -R $(id -un): /etc/openstack
#NOTE: Build charts
make all

View File

@ -17,21 +17,30 @@
set -xe
#NOTE: Deploy global ingress
helm install ./ingress \
tee /tmp/ingress-kube-system.yaml << EOF
pod:
replicas:
error_page: 2
deployment:
mode: cluster
type: DaemonSet
network:
host_namespace: true
EOF
helm upgrade --install ingress-kube-system ./ingress \
--namespace=kube-system \
--name=ingress-kube-system \
--set pod.replicas.error_page=2 \
--set deployment.mode=cluster \
--set deployment.type=DaemonSet \
--set network.host_namespace=true \
--set conf.services.udp.53='kube-system/kube-dns:53'
--values=/tmp/ingress-kube-system.yaml
#NOTE: Deploy namespace ingress
helm install ./ingress \
tee /tmp/ingress-openstack.yaml << EOF
pod:
replicas:
ingress: 2
error_page: 2
EOF
helm upgrade --install ingress-openstack ./ingress \
--namespace=openstack \
--name=ingress-openstack \
--set pod.replicas.ingress=2 \
--set pod.replicas.error_page=2
--values=/tmp/ingress-openstack.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh kube-system

View File

@ -18,9 +18,10 @@ set -xe
#NOTE: Deploy command
uuidgen > /tmp/ceph-fs-uuid.txt
CEPH_PUBLIC_NETWORK=$(./tools/deployment/multinode/kube-node-subnet.sh)
CEPH_CLUSTER_NETWORK=$(./tools/deployment/multinode/kube-node-subnet.sh)
cat > /tmp/ceph.yaml <<EOF
CEPH_PUBLIC_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
CEPH_CLUSTER_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
tee /tmp/ceph.yaml << EOF
endpoints:
identity:
namespace: openstack
@ -43,13 +44,12 @@ bootstrap:
conf:
config:
global:
fsid: "$(cat /tmp/ceph-fs-uuid.txt)"
fsid: ${CEPH_FS_ID}
rgw_ks:
enabled: true
EOF
helm install ./ceph \
helm upgrade --install ceph ./ceph \
--namespace=ceph \
--name=ceph \
--values=/tmp/ceph.yaml
#NOTE: Wait for deploy

View File

@ -17,9 +17,10 @@
set -xe
#NOTE: Deploy command
CEPH_PUBLIC_NETWORK=$(./tools/deployment/multinode/kube-node-subnet.sh)
CEPH_CLUSTER_NETWORK=$(./tools/deployment/multinode/kube-node-subnet.sh)
cat > /tmp/ceph-openstack-config.yaml <<EOF
CEPH_PUBLIC_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
CEPH_CLUSTER_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
tee /tmp/ceph-openstack-config.yaml <<EOF
endpoints:
identity:
namespace: openstack
@ -42,17 +43,16 @@ bootstrap:
conf:
config:
global:
fsid: "$(cat /tmp/ceph-fs-uuid.txt)"
fsid: ${CEPH_FS_ID}
rgw_ks:
enabled: true
EOF
helm install ./ceph \
helm upgrade --install ceph-openstack-config ./ceph \
--namespace=openstack \
--name=ceph-openstack-config \
--values=/tmp/ceph-openstack-config.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status ceph
helm status ceph-openstack-config

View File

@ -17,10 +17,8 @@
set -xe
#NOTE: Deploy command
helm install ./mariadb \
--namespace=openstack \
--name=mariadb \
--set volume.enabled=false
helm upgrade --install mariadb ./mariadb \
--namespace=openstack
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack

View File

@ -17,9 +17,8 @@
set -xe
#NOTE: Deploy command
helm install ./rabbitmq \
--namespace=openstack \
--name=rabbitmq
helm upgrade --install rabbitmq ./rabbitmq \
--namespace=openstack
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack

View File

@ -17,9 +17,8 @@
set -xe
#NOTE: Deploy command
helm install ./keystone \
helm upgrade --install keystone ./keystone \
--namespace=openstack \
--name=keystone \
--set pod.replicas.api=2
#NOTE: Wait for deploy

View File

@ -17,9 +17,10 @@
set -xe
#NOTE: Deploy command
CEPH_PUBLIC_NETWORK=$(./tools/deployment/multinode/kube-node-subnet.sh)
CEPH_CLUSTER_NETWORK=$(./tools/deployment/multinode/kube-node-subnet.sh)
cat > /tmp/radosgw-openstack.yaml <<EOF
CEPH_PUBLIC_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
CEPH_CLUSTER_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
tee /tmp/radosgw-openstack.yaml <<EOF
endpoints:
identity:
namespace: openstack
@ -42,13 +43,12 @@ bootstrap:
conf:
config:
global:
fsid: "$(cat /tmp/ceph-fs-uuid.txt)"
fsid: ${CEPH_FS_ID}
rgw_ks:
enabled: true
EOF
helm install ./ceph \
helm upgrade --install radosgw-openstack ./ceph \
--namespace=openstack \
--name=radosgw-openstack \
--values=/tmp/radosgw-openstack.yaml
#NOTE: Wait for deploy

View File

@ -18,12 +18,16 @@ set -xe
#NOTE: Deploy command
GLANCE_BACKEND="radosgw" # NOTE(portdirect), this could be: radosgw, rbd, swift or pvc
helm install ./glance \
tee /tmp/glance.yaml << EOF
storage: ${GLANCE_BACKEND}
pod:
replicas:
api: 2
registry: 2
EOF
helm upgrade --install glance ./glance \
--namespace=openstack \
--name=glance \
--set pod.replicas.api=2 \
--set pod.replicas.registry=2 \
--set storage=${GLANCE_BACKEND}
--values=/tmp/glance.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack

View File

@ -16,14 +16,21 @@
set -xe
#NOTE: Deploy command
helm install ./cinder \
tee /tmp/cinder.yaml << EOF
pod:
replicas:
api: 2
volume: 1
scheduler: 1
backup: 1
conf:
cinder:
DEFAULT:
backup_driver: cinder.backup.drivers.swift
EOF
helm upgrade --install cinder ./cinder \
--namespace=openstack \
--name=cinder \
--set pod.replicas.api=2 \
--set pod.replicas.volume=1 \
--set pod.replicas.scheduler=1 \
--set pod.replicas.backup=1 \
--set conf.cinder.DEFAULT.backup_driver=cinder.backup.drivers.swift
--values=/tmp/cinder.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack

View File

@ -16,9 +16,8 @@
set -xe
#NOTE: Deploy command
helm install ./openvswitch \
--namespace=openstack \
--name=openvswitch
helm upgrade --install openvswitch ./openvswitch \
--namespace=openstack
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack

View File

@ -16,9 +16,8 @@
set -xe
#NOTE: Deploy command
helm install ./libvirt \
--namespace=openstack \
--name=libvirt
helm upgrade --install libvirt ./libvirt \
--namespace=openstack
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack

View File

@ -16,34 +16,31 @@
set -xe
#NOTE: Deploy nova
tee /tmp/nova.yaml << EOF
labels:
api_metadata:
node_selector_key: openstack-helm-node-class
node_selector_value: primary
pod:
replicas:
api_metadata: 1
placement: 2
osapi: 2
conductor: 2
consoleauth: 2
scheduler: 1
novncproxy: 1
EOF
if [ "x$(systemd-detect-virt)" == "xnone" ]; then
echo 'OSH is not being deployed in virtualized environment'
helm install ./nova \
helm upgrade --install nova ./nova \
--namespace=openstack \
--name=nova \
--set pod.replicas.api_metadata=1 \
--set pod.replicas.placement=2 \
--set pod.replicas.osapi=2 \
--set pod.replicas.conductor=2 \
--set pod.replicas.consoleauth=2 \
--set pod.replicas.scheduler=2 \
--set pod.replicas.novncproxy=1 \
--set labels.api_metadata.node_selector_key=openstack-helm-node-class \
--set labels.api_metadata.node_selector_value=primary
--values=/tmp/nova.yaml
else
echo 'OSH is being deployed in virtualized environment, using qemu for nova'
helm install ./nova \
helm upgrade --install nova ./nova \
--namespace=openstack \
--name=nova \
--set pod.replicas.api_metadata=1 \
--set pod.replicas.placement=2 \
--set pod.replicas.osapi=2 \
--set pod.replicas.conductor=2 \
--set pod.replicas.consoleauth=2 \
--set pod.replicas.scheduler=2 \
--set pod.replicas.novncproxy=1 \
--set labels.api_metadata.node_selector_key=openstack-helm-node-class \
--set labels.api_metadata.node_selector_value=primary \
--values=/tmp/nova.yaml \
--set conf.nova.libvirt.virt_type=qemu
fi
@ -87,9 +84,8 @@ conf:
ovs:
bridge_mappings: public:br-ex
EOF
helm install ./neutron \
helm upgrade --install neutron ./neutron \
--namespace=openstack \
--name=neutron \
--values=/tmp/neutron.yaml
#NOTE: Wait for deploy

View File

@ -16,13 +16,17 @@
set -xe
#NOTE: Deploy command
helm install ./heat \
tee /tmp/heat.yaml << EOF
pod:
replicas:
api: 2
cfn: 2
cloudwatch: 2
engine: 2
EOF
helm upgrade --install heat ./heat \
--namespace=openstack \
--name=heat \
--set pod.replicas.api=2 \
--set pod.replicas.cfn=2 \
--set pod.replicas.cloudwatch=2 \
--set pod.replicas.engine=2
--values=/tmp/heat.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack

View File

@ -16,10 +16,9 @@
set -xe
#NOTE: Deploy command
helm install ./barbican \
helm upgrade --install barbican ./barbican \
--namespace=openstack \
--name=barbican \
--set pod.replicas.api=1
--set pod.replicas.api=2
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack