Merge "zuul.d: Adding multinode job"
This commit is contained in:
commit
6f9f7fb11c
1
tools/deployment/multinode/045-nfs-provisioner.sh
Symbolic link
1
tools/deployment/multinode/045-nfs-provisioner.sh
Symbolic link
@ -0,0 +1 @@
|
||||
../developer/nfs/040-nfs-provisioner.sh
|
@ -19,11 +19,21 @@ tee /tmp/mariadb.yaml << EOF
|
||||
pod:
|
||||
replicas:
|
||||
server: 3
|
||||
ingress: 3
|
||||
ingress: 2
|
||||
EOF
|
||||
: ${OSH_INFRA_PATH:="../openstack-helm-infra"}
|
||||
helm upgrade --install mariadb ${OSH_INFRA_PATH}/mariadb \
|
||||
|
||||
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
|
||||
: ${OSH_EXTRA_HELM_ARGS_MARIADB:="$(./tools/deployment/common/get-values-overrides.sh mariadb)"}
|
||||
|
||||
#NOTE: Lint and package chart
|
||||
make -C ${HELM_CHART_ROOT_PATH} mariadb
|
||||
|
||||
#NOTE: Deploy command
|
||||
: ${OSH_EXTRA_HELM_ARGS:=""}
|
||||
helm upgrade --install mariadb ${HELM_CHART_ROOT_PATH}/mariadb \
|
||||
--namespace=openstack \
|
||||
--set volume.use_local_path_for_single_pod_cluster.enabled=true \
|
||||
--set volume.enabled=false \
|
||||
--values=/tmp/mariadb.yaml \
|
||||
${OSH_EXTRA_HELM_ARGS} \
|
||||
${OSH_EXTRA_HELM_ARGS_MARIADB}
|
||||
|
@ -1,29 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
set -xe
|
||||
|
||||
#NOTE: Deploy command
|
||||
: ${OSH_INFRA_PATH:="../openstack-helm-infra"}
|
||||
: ${OSH_EXTRA_HELM_ARGS:=""}
|
||||
helm upgrade --install rabbitmq ${OSH_INFRA_PATH}/rabbitmq \
|
||||
--namespace=openstack \
|
||||
${OSH_EXTRA_HELM_ARGS} \
|
||||
${OSH_EXTRA_HELM_ARGS_RABBITMQ}
|
||||
|
||||
#NOTE: Wait for deploy
|
||||
./tools/deployment/common/wait-for-pods.sh openstack
|
||||
|
||||
#NOTE: Validate Deployment info
|
||||
helm status rabbitmq
|
1
tools/deployment/multinode/060-rabbitmq.sh
Symbolic link
1
tools/deployment/multinode/060-rabbitmq.sh
Symbolic link
@ -0,0 +1 @@
|
||||
../component/common/rabbitmq.sh
|
@ -14,7 +14,15 @@
|
||||
|
||||
set -xe
|
||||
|
||||
#NOTE: Get the over-rides to use
|
||||
: ${OSH_EXTRA_HELM_ARGS_KEYSTONE:="$(./tools/deployment/common/get-values-overrides.sh keystone)"}
|
||||
: ${RUN_HELM_TESTS:="yes"}
|
||||
|
||||
#NOTE: Lint and package chart
|
||||
make keystone
|
||||
|
||||
#NOTE: Deploy command
|
||||
: ${OSH_EXTRA_HELM_ARGS:=""}
|
||||
helm upgrade --install keystone ./keystone \
|
||||
--namespace=openstack \
|
||||
--set pod.replicas.api=2 \
|
||||
@ -29,6 +37,7 @@ helm status keystone
|
||||
export OS_CLOUD=openstack_helm
|
||||
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
|
||||
openstack endpoint list
|
||||
# Delete the test pod if it still exists
|
||||
kubectl delete pods -l application=keystone,release_group=keystone,component=test --namespace=openstack --ignore-not-found
|
||||
helm test keystone --timeout 900
|
||||
|
||||
if [ "x${RUN_HELM_TESTS}" != "xno" ]; then
|
||||
./tools/deployment/common/run-helm-tests.sh keystone
|
||||
fi
|
||||
|
@ -17,14 +17,22 @@
|
||||
|
||||
set -xe
|
||||
|
||||
#NOTE: Get the over-rides to use
|
||||
: ${OSH_EXTRA_HELM_ARGS_HORIZON:="$(./tools/deployment/common/get-values-overrides.sh horizon)"}
|
||||
|
||||
#NOTE: Lint and package chart
|
||||
make horizon
|
||||
|
||||
#NOTE: Deploy command
|
||||
tee /tmp/horizon.yaml <<EOF
|
||||
pod:
|
||||
replicas:
|
||||
server: 2
|
||||
EOF
|
||||
: ${OSH_EXTRA_HELM_ARGS:=""}
|
||||
helm upgrade --install horizon ./horizon \
|
||||
--namespace=openstack \
|
||||
--set manifests.network_policy=true \
|
||||
--values=/tmp/horizon.yaml \
|
||||
${OSH_EXTRA_HELM_ARGS} \
|
||||
${OSH_EXTRA_HELM_ARGS_HORIZON}
|
||||
|
||||
@ -33,3 +41,5 @@ helm upgrade --install horizon ./horizon \
|
||||
|
||||
#NOTE: Validate Deployment info
|
||||
helm status horizon
|
||||
|
||||
helm test horizon
|
||||
|
@ -14,7 +14,15 @@
|
||||
|
||||
set -xe
|
||||
|
||||
#NOTE: Get the over-rides to use
|
||||
export HELM_CHART_ROOT_PATH="${HELM_CHART_ROOT_PATH:="${OSH_INFRA_PATH:="../openstack-helm-infra"}"}"
|
||||
: ${OSH_EXTRA_HELM_ARGS_CEPH_RGW:="$(./tools/deployment/common/get-values-overrides.sh ceph-rgw)"}
|
||||
|
||||
#NOTE: Lint and package chart
|
||||
make -C ${HELM_CHART_ROOT_PATH} ceph-rgw
|
||||
|
||||
#NOTE: Deploy command
|
||||
: ${OSH_EXTRA_HELM_ARGS:=""}
|
||||
CEPH_PUBLIC_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
|
||||
CEPH_CLUSTER_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
|
||||
tee /tmp/radosgw-openstack.yaml <<EOF
|
||||
@ -35,39 +43,14 @@ bootstrap:
|
||||
conf:
|
||||
rgw_ks:
|
||||
enabled: true
|
||||
network_policy:
|
||||
ceph:
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
application: glance
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
application: cinder
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
application: libvirt
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
application: nova
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
application: ceph
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
application: ingress
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8088
|
||||
manifests:
|
||||
network_policy: true
|
||||
pod:
|
||||
replicas:
|
||||
rgw: 1
|
||||
EOF
|
||||
|
||||
: ${OSH_INFRA_PATH:="../openstack-helm-infra"}
|
||||
helm upgrade --install radosgw-openstack ${OSH_INFRA_PATH}/ceph-rgw \
|
||||
--namespace=openstack \
|
||||
--set manifests.network_policy=true \
|
||||
--values=/tmp/radosgw-openstack.yaml \
|
||||
${OSH_EXTRA_HELM_ARGS} \
|
||||
${OSH_EXTRA_HELM_ARGS_HEAT}
|
||||
|
@ -14,10 +14,16 @@
|
||||
|
||||
set -xe
|
||||
|
||||
#NOTE: Get the over-rides to use
|
||||
: ${OSH_EXTRA_HELM_ARGS_GLANCE:="$(./tools/deployment/common/get-values-overrides.sh glance)"}
|
||||
: ${RUN_HELM_TESTS:="yes"}
|
||||
|
||||
#NOTE: Lint and package chart
|
||||
make glance
|
||||
|
||||
#NOTE: Deploy command
|
||||
: ${OSH_OPENSTACK_RELEASE:="newton"}
|
||||
#NOTE(portdirect), this could be: radosgw, rbd, swift or pvc
|
||||
: ${GLANCE_BACKEND:="swift"}
|
||||
: ${OSH_EXTRA_HELM_ARGS:=""}
|
||||
: ${GLANCE_BACKEND:="pvc"}
|
||||
tee /tmp/glance.yaml <<EOF
|
||||
storage: ${GLANCE_BACKEND}
|
||||
pod:
|
||||
@ -25,25 +31,11 @@ pod:
|
||||
api: 2
|
||||
registry: 2
|
||||
EOF
|
||||
if [ "x${OSH_OPENSTACK_RELEASE}" == "xnewton" ]; then
|
||||
# NOTE(portdirect): glance APIv1 is required for heat in Newton
|
||||
tee -a /tmp/glance.yaml <<EOF
|
||||
conf:
|
||||
glance:
|
||||
DEFAULT:
|
||||
enable_v1_api: true
|
||||
enable_v2_registry: true
|
||||
manifests:
|
||||
deployment_registry: true
|
||||
ingress_registry: true
|
||||
pdb_registry: true
|
||||
service_ingress_registry: true
|
||||
EOF
|
||||
fi
|
||||
|
||||
helm upgrade --install glance ./glance \
|
||||
--namespace=openstack \
|
||||
--values=/tmp/glance.yaml \
|
||||
${OSH_EXTRA_HELM_ARGS} \
|
||||
${OSH_EXTRA_HELM_ARGS:=} \
|
||||
${OSH_EXTRA_HELM_ARGS_GLANCE}
|
||||
|
||||
#NOTE: Wait for deploy
|
||||
@ -56,6 +48,9 @@ openstack service list
|
||||
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
|
||||
openstack image list
|
||||
openstack image show 'Cirros 0.3.5 64-bit'
|
||||
# Delete the test pod if it still exists
|
||||
kubectl delete pods -l application=glance,release_group=glance,component=test --namespace=openstack --ignore-not-found
|
||||
helm test glance --timeout 900
|
||||
|
||||
if [ "x${RUN_HELM_TESTS}" == "xno" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
./tools/deployment/common/run-helm-tests.sh glance
|
||||
|
@ -13,18 +13,55 @@
|
||||
# under the License.
|
||||
set -xe
|
||||
|
||||
#NOTE: Get the over-rides to use
|
||||
: ${OSH_EXTRA_HELM_ARGS_CINDER:="$(./tools/deployment/common/get-values-overrides.sh cinder)"}
|
||||
|
||||
#NOTE: Lint and package chart
|
||||
make cinder
|
||||
|
||||
#NOTE: Deploy command
|
||||
tee /tmp/cinder.yaml << EOF
|
||||
conf:
|
||||
ceph:
|
||||
pools:
|
||||
backup:
|
||||
replication: 1
|
||||
crush_rule: same_host
|
||||
chunk_size: 8
|
||||
app_name: cinder-backup
|
||||
# default pool used by rbd1 backend
|
||||
cinder.volumes:
|
||||
replication: 1
|
||||
crush_rule: same_host
|
||||
chunk_size: 8
|
||||
app_name: cinder-volume
|
||||
# secondary pool used by rbd2 backend
|
||||
cinder.volumes.gold:
|
||||
replication: 1
|
||||
crush_rule: same_host
|
||||
chunk_size: 8
|
||||
app_name: cinder-volume
|
||||
backends:
|
||||
# add an extra storage backend same values as rbd1 (see
|
||||
# cinder/values.yaml) except for volume_backend_name and rbd_pool
|
||||
rbd2:
|
||||
volume_driver: cinder.volume.drivers.rbd.RBDDriver
|
||||
volume_backend_name: rbd2
|
||||
rbd_pool: cinder.volumes.gold
|
||||
rbd_ceph_conf: "/etc/ceph/ceph.conf"
|
||||
rbd_flatten_volume_from_snapshot: false
|
||||
report_discard_supported: true
|
||||
rbd_max_clone_depth: 5
|
||||
rbd_store_chunk_size: 4
|
||||
rados_connect_timeout: -1
|
||||
rbd_user: cinder
|
||||
rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
|
||||
pod:
|
||||
replicas:
|
||||
api: 2
|
||||
volume: 1
|
||||
scheduler: 1
|
||||
backup: 1
|
||||
conf:
|
||||
cinder:
|
||||
DEFAULT:
|
||||
backup_driver: cinder.backup.drivers.swift
|
||||
EOF
|
||||
helm upgrade --install cinder ./cinder \
|
||||
--namespace=openstack \
|
||||
@ -40,6 +77,8 @@ export OS_CLOUD=openstack_helm
|
||||
openstack service list
|
||||
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
|
||||
openstack volume type list
|
||||
openstack volume type list --default
|
||||
|
||||
# Delete the test pod if it still exists
|
||||
kubectl delete pods -l application=cinder,release_group=cinder,component=test --namespace=openstack --ignore-not-found
|
||||
helm test cinder --timeout 900
|
||||
|
@ -13,8 +13,14 @@
|
||||
# under the License.
|
||||
set -xe
|
||||
|
||||
#NOTE: Deploy command
|
||||
#NOTE: Get the over-rides to use
|
||||
: ${OSH_INFRA_PATH:="../openstack-helm-infra"}
|
||||
: ${OSH_EXTRA_HELM_ARGS_OPENVSWITCH:="$(./tools/deployment/common/get-values-overrides.sh openvswitch)"}
|
||||
|
||||
#NOTE: Lint and package chart
|
||||
make -C ${OSH_INFRA_PATH} openvswitch
|
||||
|
||||
#NOTE: Deploy command
|
||||
helm upgrade --install openvswitch ${OSH_INFRA_PATH}/openvswitch \
|
||||
--namespace=openstack \
|
||||
${OSH_EXTRA_HELM_ARGS} \
|
||||
|
@ -13,10 +13,24 @@
|
||||
# under the License.
|
||||
set -xe
|
||||
|
||||
#NOTE: Deploy libvirt
|
||||
export OS_CLOUD=openstack_helm
|
||||
CEPH_ENABLED=false
|
||||
if openstack service list -f value -c Type | grep -q "^volume" && \
|
||||
openstack volume type list -f value -c Name | grep -q "rbd"; then
|
||||
CEPH_ENABLED=true
|
||||
fi
|
||||
|
||||
#NOTE: Get the over-rides to use
|
||||
: ${OSH_INFRA_PATH:="../openstack-helm-infra"}
|
||||
: ${OSH_EXTRA_HELM_ARGS_LIBVIRT:="$(./tools/deployment/common/get-values-overrides.sh libvirt)"}
|
||||
|
||||
#NOTE: Lint and package chart
|
||||
make -C ${OSH_INFRA_PATH} libvirt
|
||||
|
||||
#NOTE: Deploy libvirt
|
||||
helm upgrade --install libvirt ${OSH_INFRA_PATH}/libvirt \
|
||||
--namespace=openstack \
|
||||
--set conf.ceph.enabled=${CEPH_ENABLED} \
|
||||
${OSH_EXTRA_HELM_ARGS} \
|
||||
${OSH_EXTRA_HELM_ARGS_LIBVIRT}
|
||||
|
||||
|
@ -13,120 +13,112 @@
|
||||
# under the License.
|
||||
set -xe
|
||||
|
||||
#NOTE: Deploy nova
|
||||
tee /tmp/nova.yaml << EOF
|
||||
labels:
|
||||
api_metadata:
|
||||
node_selector_key: openstack-helm-node-class
|
||||
node_selector_value: primary
|
||||
: ${RUN_HELM_TESTS:="yes"}
|
||||
|
||||
export OS_CLOUD=openstack_helm
|
||||
CEPH_ENABLED=false
|
||||
if openstack service list -f value -c Type | grep -q "^volume" && \
|
||||
openstack volume type list -f value -c Name | grep -q "rbd"; then
|
||||
CEPH_ENABLED=true
|
||||
fi
|
||||
|
||||
#NOTE: Get the overrides to use for placement, should placement be deployed.
|
||||
case "${OPENSTACK_RELEASE}" in
|
||||
"queens")
|
||||
DEPLOY_SEPARATE_PLACEMENT="no"
|
||||
;;
|
||||
"rocky")
|
||||
DEPLOY_SEPARATE_PLACEMENT="no"
|
||||
;;
|
||||
"stein")
|
||||
DEPLOY_SEPARATE_PLACEMENT="yes"
|
||||
;;
|
||||
*)
|
||||
DEPLOY_SEPARATE_PLACEMENT="yes"
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ "${DEPLOY_SEPARATE_PLACEMENT}" == "yes" ]]; then
|
||||
# Get overrides
|
||||
: ${OSH_EXTRA_HELM_ARGS_PLACEMENT:="$(./tools/deployment/common/get-values-overrides.sh placement)"}
|
||||
|
||||
# Lint and package
|
||||
make placement
|
||||
|
||||
tee /tmp/placement.yaml << EOF
|
||||
pod:
|
||||
replicas:
|
||||
api: 2
|
||||
EOF
|
||||
# Deploy
|
||||
helm upgrade --install placement ./placement \
|
||||
--namespace=openstack \
|
||||
--values=/tmp/placement.yaml \
|
||||
${OSH_EXTRA_HELM_ARGS:=} \
|
||||
${OSH_EXTRA_HELM_ARGS_PLACEMENT}
|
||||
fi
|
||||
|
||||
#NOTE: Get the over-rides to use
|
||||
: ${OSH_EXTRA_HELM_ARGS_NOVA:="$(./tools/deployment/common/get-values-overrides.sh nova)"}
|
||||
|
||||
# TODO: Revert this reasoning when gates are pointing to more up to
|
||||
# date openstack release. When doing so, we should revert the default
|
||||
# values of the nova chart to NOT use placement by default, and
|
||||
# have a ocata/pike/queens/rocky/stein override to enable placement in the nova chart deploy
|
||||
|
||||
if [[ "${DEPLOY_SEPARATE_PLACEMENT}" == "yes" ]]; then
|
||||
OSH_EXTRA_HELM_ARGS_NOVA="${OSH_EXTRA_HELM_ARGS_NOVA} --values=./nova/values_overrides/train-disable-nova-placement.yaml"
|
||||
fi
|
||||
|
||||
#NOTE: Lint and package chart
|
||||
make nova
|
||||
|
||||
#NOTE: Deploy nova
|
||||
tee /tmp/nova.yaml << EOF
|
||||
pod:
|
||||
replicas:
|
||||
api_metadata: 1
|
||||
placement: 2
|
||||
osapi: 2
|
||||
conductor: 2
|
||||
consoleauth: 2
|
||||
scheduler: 1
|
||||
novncproxy: 1
|
||||
EOF
|
||||
if [[ "${DEPLOY_SEPARATE_PLACEMENT}" == "no" ]]; then
|
||||
echo " placement: 2" >> /tmp/nova.yaml
|
||||
fi
|
||||
|
||||
function kvm_check () {
|
||||
POD_NAME="tmp-$(cat /dev/urandom | env LC_CTYPE=C tr -dc a-z | head -c 5; echo)"
|
||||
cat <<EOF | kubectl apply -f - 1>&2;
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: ${POD_NAME}
|
||||
spec:
|
||||
hostPID: true
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: util
|
||||
securityContext:
|
||||
privileged: true
|
||||
image: docker.io/busybox:latest
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
nsenter -t1 -m -u -n -i -- sh -c "kvm-ok >/dev/null && echo yes || echo no"
|
||||
EOF
|
||||
end=$(($(date +%s) + 900))
|
||||
until kubectl get pod/${POD_NAME} -o go-template='{{.status.phase}}' | grep -q Succeeded; do
|
||||
now=$(date +%s)
|
||||
[ $now -gt $end ] && echo containers failed to start. && \
|
||||
kubectl get pod/${POD_NAME} -o wide && exit 1
|
||||
done
|
||||
kubectl logs pod/${POD_NAME}
|
||||
kubectl delete pod/${POD_NAME} 1>&2;
|
||||
}
|
||||
|
||||
if [ "x$(kvm_check)" == "xyes" ]; then
|
||||
#NOTE: Deploy nova
|
||||
: ${OSH_EXTRA_HELM_ARGS:=""}
|
||||
if [ "x$(systemd-detect-virt)" == "xnone" ]; then
|
||||
echo 'OSH is not being deployed in virtualized environment'
|
||||
helm upgrade --install nova ./nova \
|
||||
--namespace=openstack \
|
||||
--values=/tmp/nova.yaml \
|
||||
${OSH_EXTRA_HELM_ARGS} \
|
||||
--set bootstrap.wait_for_computes.enabled=true \
|
||||
--set conf.ceph.enabled=${CEPH_ENABLED} \
|
||||
${OSH_EXTRA_HELM_ARGS:=} \
|
||||
${OSH_EXTRA_HELM_ARGS_NOVA}
|
||||
else
|
||||
echo 'OSH is being deployed in virtualized environment, using qemu for nova'
|
||||
helm upgrade --install nova ./nova \
|
||||
--namespace=openstack \
|
||||
--values=/tmp/nova.yaml \
|
||||
--set bootstrap.wait_for_computes.enabled=true \
|
||||
--set conf.ceph.enabled=${CEPH_ENABLED} \
|
||||
--set conf.nova.libvirt.virt_type=qemu \
|
||||
--set conf.nova.libvirt.cpu_mode=none \
|
||||
${OSH_EXTRA_HELM_ARGS} \
|
||||
${OSH_EXTRA_HELM_ARGS:=} \
|
||||
${OSH_EXTRA_HELM_ARGS_NOVA}
|
||||
fi
|
||||
|
||||
#NOTE: Deploy neutron, for simplicity we will assume the default route device
|
||||
# should be used for tunnels
|
||||
function network_tunnel_dev () {
|
||||
POD_NAME="tmp-$(cat /dev/urandom | env LC_CTYPE=C tr -dc a-z | head -c 5; echo)"
|
||||
cat <<EOF | kubectl apply -f - 1>&2;
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: ${POD_NAME}
|
||||
spec:
|
||||
hostNetwork: true
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: util
|
||||
image: docker.io/busybox:latest
|
||||
command:
|
||||
- 'ip'
|
||||
- '-4'
|
||||
- 'route'
|
||||
- 'list'
|
||||
- '0/0'
|
||||
EOF
|
||||
end=$(($(date +%s) + 900))
|
||||
until kubectl get pod/${POD_NAME} -o go-template='{{.status.phase}}' | grep -q Succeeded; do
|
||||
now=$(date +%s)
|
||||
[ $now -gt $end ] && echo containers failed to start. && \
|
||||
kubectl get pod/${POD_NAME} -o wide && exit 1
|
||||
done
|
||||
kubectl logs pod/${POD_NAME} | awk '{ print $5; exit }'
|
||||
kubectl delete pod/${POD_NAME} 1>&2;
|
||||
}
|
||||
#NOTE: Get the over-rides to use
|
||||
: ${OSH_EXTRA_HELM_ARGS_NEUTRON:="$(./tools/deployment/common/get-values-overrides.sh neutron)"}
|
||||
|
||||
#NOTE: Lint and package chart
|
||||
make neutron
|
||||
|
||||
NETWORK_TUNNEL_DEV="$(network_tunnel_dev)"
|
||||
tee /tmp/neutron.yaml << EOF
|
||||
network:
|
||||
interface:
|
||||
tunnel: "${NETWORK_TUNNEL_DEV}"
|
||||
labels:
|
||||
agent:
|
||||
dhcp:
|
||||
node_selector_key: openstack-helm-node-class
|
||||
node_selector_value: primary
|
||||
l3:
|
||||
node_selector_key: openstack-helm-node-class
|
||||
node_selector_value: primary
|
||||
metadata:
|
||||
node_selector_key: openstack-helm-node-class
|
||||
node_selector_value: primary
|
||||
tunnel: docker0
|
||||
pod:
|
||||
replicas:
|
||||
server: 2
|
||||
@ -146,15 +138,11 @@ conf:
|
||||
tunnel_types: vxlan
|
||||
ovs:
|
||||
bridge_mappings: public:br-ex
|
||||
linuxbridge_agent:
|
||||
linux_bridge:
|
||||
bridge_mappings: public:br-ex
|
||||
EOF
|
||||
|
||||
if [ -n "$OSH_OPENSTACK_RELEASE" ]; then
|
||||
if [ -e "./neutron/values_overrides/${OSH_OPENSTACK_RELEASE}.yaml" ] ; then
|
||||
echo "Adding release overrides for ${OSH_OPENSTACK_RELEASE}"
|
||||
OSH_RELEASE_OVERRIDES_NEUTRON="--values=./neutron/values_overrides/${OSH_OPENSTACK_RELEASE}.yaml"
|
||||
fi
|
||||
fi
|
||||
|
||||
helm upgrade --install neutron ./neutron \
|
||||
--namespace=openstack \
|
||||
--values=/tmp/neutron.yaml \
|
||||
@ -162,6 +150,10 @@ helm upgrade --install neutron ./neutron \
|
||||
${OSH_EXTRA_HELM_ARGS} \
|
||||
${OSH_EXTRA_HELM_ARGS_NEUTRON}
|
||||
|
||||
# If compute kit installed using Tungsten Fubric, it will be alive when Tunsten Fabric become active.
|
||||
if [[ "$FEATURE_GATES" =~ (,|^)tf(,|$) ]]; then
|
||||
exit 0
|
||||
fi
|
||||
#NOTE: Wait for deploy
|
||||
./tools/deployment/common/wait-for-pods.sh openstack
|
||||
|
||||
@ -171,10 +163,11 @@ openstack service list
|
||||
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
|
||||
openstack compute service list
|
||||
openstack network agent list
|
||||
# Delete the test pods if they still exist
|
||||
kubectl delete pods -l application=nova,release_group=nova,component=test --namespace=openstack --ignore-not-found
|
||||
kubectl delete pods -l application=neutron,release_group=neutron,component=test --namespace=openstack --ignore-not-found
|
||||
openstack hypervisor list
|
||||
|
||||
timeout=${OSH_TEST_TIMEOUT:-900}
|
||||
helm test nova --timeout $timeout
|
||||
helm test neutron --timeout $timeout
|
||||
if [ "x${RUN_HELM_TESTS}" == "xno" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
./tools/deployment/common/run-helm-tests.sh nova
|
||||
./tools/deployment/common/run-helm-tests.sh neutron
|
||||
|
@ -13,7 +13,12 @@
|
||||
# under the License.
|
||||
set -xe
|
||||
|
||||
#NOTE: Deploy command
|
||||
#NOTE: Get the over-rides to use
|
||||
: ${OSH_EXTRA_HELM_ARGS_HEAT:="$(./tools/deployment/common/get-values-overrides.sh heat)"}
|
||||
|
||||
#NOTE: Lint and package chart
|
||||
make heat
|
||||
|
||||
tee /tmp/heat.yaml << EOF
|
||||
pod:
|
||||
replicas:
|
||||
@ -22,6 +27,9 @@ pod:
|
||||
cloudwatch: 2
|
||||
engine: 2
|
||||
EOF
|
||||
|
||||
#NOTE: Deploy command
|
||||
: ${OSH_EXTRA_HELM_ARGS:=""}
|
||||
helm upgrade --install heat ./heat \
|
||||
--namespace=openstack \
|
||||
--values=/tmp/heat.yaml \
|
||||
@ -34,8 +42,7 @@ helm upgrade --install heat ./heat \
|
||||
#NOTE: Validate Deployment info
|
||||
export OS_CLOUD=openstack_helm
|
||||
openstack service list
|
||||
openstack endpoint list
|
||||
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
|
||||
openstack orchestration service list
|
||||
# Delete the test pod if it still exists
|
||||
kubectl delete pods -l application=heat,release_group=heat,component=test --namespace=openstack --ignore-not-found
|
||||
helm test heat --timeout 900
|
||||
|
||||
openstack --os-interface internal orchestration service list
|
||||
|
50
tools/deployment/multinode/800-setup-gateway.sh
Executable file
50
tools/deployment/multinode/800-setup-gateway.sh
Executable file
@ -0,0 +1,50 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
set -xe
|
||||
|
||||
# Assign IP address to br-ex
|
||||
: ${OSH_EXT_SUBNET:="172.24.4.0/24"}
|
||||
: ${OSH_BR_EX_ADDR:="172.24.4.1/24"}
|
||||
sudo ip addr add ${OSH_BR_EX_ADDR} dev br-ex
|
||||
sudo ip link set br-ex up
|
||||
|
||||
: ${DNSMASQ_IMAGE:=docker.io/openstackhelm/neutron:ocata}
|
||||
|
||||
# NOTE(portdirect): With Docker >= 1.13.1 the default FORWARD chain policy is
|
||||
# configured to DROP, for the l3 agent to function as expected and for
|
||||
# VMs to reach the outside world correctly this needs to be set to ACCEPT.
|
||||
sudo iptables -P FORWARD ACCEPT
|
||||
|
||||
# Setup masquerading on default route dev to public subnet by searching for the
|
||||
# interface with default routing, if multiple default routes exist then select
|
||||
# the one with the lowest metric.
|
||||
DEFAULT_ROUTE_DEV=$(route -n | awk '/^0.0.0.0/ { print $5 " " $NF }' | sort | awk '{ print $NF; exit }')
|
||||
sudo iptables -t nat -A POSTROUTING -o ${DEFAULT_ROUTE_DEV} -s ${OSH_EXT_SUBNET} -j MASQUERADE
|
||||
|
||||
# NOTE(portdirect): Setup DNS for public endpoints
|
||||
sudo docker run -d \
|
||||
--name br-ex-dns-server \
|
||||
--net host \
|
||||
--cap-add=NET_ADMIN \
|
||||
--volume /etc/kubernetes/kubelet-resolv.conf:/etc/kubernetes/kubelet-resolv.conf:ro \
|
||||
--entrypoint dnsmasq \
|
||||
${DNSMASQ_IMAGE} \
|
||||
--keep-in-foreground \
|
||||
--no-hosts \
|
||||
--bind-interfaces \
|
||||
--resolv-file=/etc/kubernetes/kubelet-resolv.conf \
|
||||
--address="/svc.cluster.local/${OSH_BR_EX_ADDR%/*}" \
|
||||
--listen-address="${OSH_BR_EX_ADDR%/*}"
|
||||
sleep 1
|
||||
sudo docker top br-ex-dns-server
|
136
tools/deployment/multinode/900-use-it.sh
Executable file
136
tools/deployment/multinode/900-use-it.sh
Executable file
@ -0,0 +1,136 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
set -xe
|
||||
|
||||
export OS_CLOUD=openstack_helm
|
||||
|
||||
: ${OSH_EXT_NET_NAME:="public"}
|
||||
: ${OSH_EXT_SUBNET_NAME:="public-subnet"}
|
||||
: ${OSH_EXT_SUBNET:="172.24.4.0/24"}
|
||||
: ${OSH_BR_EX_ADDR:="172.24.4.1/24"}
|
||||
openstack stack create --wait \
|
||||
--parameter network_name=${OSH_EXT_NET_NAME} \
|
||||
--parameter physical_network_name=public \
|
||||
--parameter subnet_name=${OSH_EXT_SUBNET_NAME} \
|
||||
--parameter subnet_cidr=${OSH_EXT_SUBNET} \
|
||||
--parameter subnet_gateway=${OSH_BR_EX_ADDR%/*} \
|
||||
-t ./tools/gate/files/heat-public-net-deployment.yaml \
|
||||
heat-public-net-deployment
|
||||
|
||||
: ${OSH_PRIVATE_SUBNET_POOL:="10.0.0.0/8"}
|
||||
: ${OSH_PRIVATE_SUBNET_POOL_NAME:="shared-default-subnetpool"}
|
||||
: ${OSH_PRIVATE_SUBNET_POOL_DEF_PREFIX:="24"}
|
||||
openstack stack create --wait \
|
||||
--parameter subnet_pool_name=${OSH_PRIVATE_SUBNET_POOL_NAME} \
|
||||
--parameter subnet_pool_prefixes=${OSH_PRIVATE_SUBNET_POOL} \
|
||||
--parameter subnet_pool_default_prefix_length=${OSH_PRIVATE_SUBNET_POOL_DEF_PREFIX} \
|
||||
-t ./tools/gate/files/heat-subnet-pool-deployment.yaml \
|
||||
heat-subnet-pool-deployment
|
||||
|
||||
: ${OSH_EXT_NET_NAME:="public"}
|
||||
: ${OSH_VM_KEY_STACK:="heat-vm-key"}
|
||||
: ${OSH_PRIVATE_SUBNET:="10.0.0.0/24"}
|
||||
# NOTE(portdirect): We do this fancy, and seemingly pointless, footwork to get
|
||||
# the full image name for the cirros Image without having to be explicit.
|
||||
IMAGE_NAME=$(openstack image show -f value -c name \
|
||||
$(openstack image list -f csv | awk -F ',' '{ print $2 "," $1 }' | \
|
||||
grep "^\"Cirros" | head -1 | awk -F ',' '{ print $2 }' | tr -d '"'))
|
||||
|
||||
# Setup SSH Keypair in Nova
|
||||
mkdir -p ${HOME}/.ssh
|
||||
openstack keypair create --private-key ${HOME}/.ssh/osh_key ${OSH_VM_KEY_STACK}
|
||||
chmod 600 ${HOME}/.ssh/osh_key
|
||||
|
||||
openstack stack create --wait \
|
||||
--parameter public_net=${OSH_EXT_NET_NAME} \
|
||||
--parameter image="${IMAGE_NAME}" \
|
||||
--parameter ssh_key=${OSH_VM_KEY_STACK} \
|
||||
--parameter cidr=${OSH_PRIVATE_SUBNET} \
|
||||
--parameter dns_nameserver=${OSH_BR_EX_ADDR%/*} \
|
||||
-t ./tools/gate/files/heat-basic-vm-deployment.yaml \
|
||||
heat-basic-vm-deployment
|
||||
|
||||
if ! openstack server list -c Status -f value | grep -q "ACTIVE"; then
|
||||
echo "VM is not active"
|
||||
openstack server list --long
|
||||
exit -1
|
||||
fi
|
||||
|
||||
# The following checks the connictivity of the VM created.
|
||||
# The networking for multinode needs to be enhance to be able to run the
|
||||
# following code. Hence commenting this code out.
|
||||
#
|
||||
# FLOATING_IP=$(openstack stack output show \
|
||||
# heat-basic-vm-deployment \
|
||||
# floating_ip \
|
||||
# -f value -c output_value)
|
||||
#
|
||||
# function wait_for_ssh_port {
|
||||
# # Default wait timeout is 300 seconds
|
||||
# set +x
|
||||
# end=$(date +%s)
|
||||
# if ! [ -z $2 ]; then
|
||||
# end=$((end + $2))
|
||||
# else
|
||||
# end=$((end + 300))
|
||||
# fi
|
||||
# while true; do
|
||||
# # Use Nmap as its the same on Ubuntu and RHEL family distros
|
||||
# nmap -Pn -p22 $1 | awk '$1 ~ /22/ {print $2}' | grep -q 'open' && \
|
||||
# break || true
|
||||
# sleep 1
|
||||
# now=$(date +%s)
|
||||
# [ $now -gt $end ] && echo "Could not connect to $1 port 22 in time" && exit -1
|
||||
# done
|
||||
# set -x
|
||||
# }
|
||||
# wait_for_ssh_port $FLOATING_IP
|
||||
#
|
||||
# # SSH into the VM and check it can reach the outside world
|
||||
# ssh-keyscan "$FLOATING_IP" >> ~/.ssh/known_hosts
|
||||
# ssh -i ${HOME}/.ssh/osh_key cirros@${FLOATING_IP} ping -q -c 1 -W 2 ${OSH_BR_EX_ADDR%/*}
|
||||
#
|
||||
# # Check the VM can reach the metadata server
|
||||
# ssh -i ${HOME}/.ssh/osh_key cirros@${FLOATING_IP} curl --verbose --connect-timeout 5 169.254.169.254
|
||||
#
|
||||
# # Check the VM can reach the keystone server
|
||||
# ssh -i ${HOME}/.ssh/osh_key cirros@${FLOATING_IP} curl --verbose --connect-timeout 5 keystone.openstack.svc.cluster.local
|
||||
#
|
||||
# # Check to see if cinder has been deployed, if it has then perform a volume attach.
|
||||
# if openstack service list -f value -c Type | grep -q "^volume"; then
|
||||
# INSTANCE_ID=$(openstack stack output show \
|
||||
# heat-basic-vm-deployment \
|
||||
# instance_uuid \
|
||||
# -f value -c output_value)
|
||||
#
|
||||
# # Get the devices that are present on the instance
|
||||
# DEVS_PRE_ATTACH=$(mktemp)
|
||||
# ssh -i ${HOME}/.ssh/osh_key cirros@${FLOATING_IP} lsblk > ${DEVS_PRE_ATTACH}
|
||||
#
|
||||
# # Create and attach a block device to the instance
|
||||
# openstack stack create --wait \
|
||||
# --parameter instance_uuid=${INSTANCE_ID} \
|
||||
# -t ./tools/gate/files/heat-vm-volume-attach.yaml \
|
||||
# heat-vm-volume-attach
|
||||
#
|
||||
# # Get the devices that are present on the instance
|
||||
# DEVS_POST_ATTACH=$(mktemp)
|
||||
# ssh -i ${HOME}/.ssh/osh_key cirros@${FLOATING_IP} lsblk > ${DEVS_POST_ATTACH}
|
||||
#
|
||||
# # Check that we have the expected number of extra devices on the instance post attach
|
||||
# if ! [ "$(comm -13 ${DEVS_PRE_ATTACH} ${DEVS_POST_ATTACH} | wc -l)" -eq "1" ]; then
|
||||
# echo "Volume not successfully attached"
|
||||
# exit 1
|
||||
# fi
|
||||
# fi
|
30
tools/gate/playbooks/multinode-base.yaml
Normal file
30
tools/gate/playbooks/multinode-base.yaml
Normal file
@ -0,0 +1,30 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
- hosts: all
|
||||
tasks:
|
||||
- name: Ensure pip
|
||||
include_role:
|
||||
name: ensure-pip
|
||||
- name: Override images
|
||||
include_role:
|
||||
name: override-images
|
||||
when: buildset_registry is defined
|
||||
- name: Use docker mirror
|
||||
include_role:
|
||||
name: use-docker-mirror
|
||||
- name: "creating directory for run artifacts"
|
||||
file:
|
||||
path: "/tmp/artifacts"
|
||||
state: directory
|
||||
...
|
80
tools/gate/playbooks/multinode-deploy-cinder.yaml
Normal file
80
tools/gate/playbooks/multinode-deploy-cinder.yaml
Normal file
@ -0,0 +1,80 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
- hosts: primary
|
||||
vars_files:
|
||||
- vars.yaml
|
||||
tasks:
|
||||
- name: Setup OS and K8s Clients
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/010-setup-client.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Ingress
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/component/common/ingress.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- hosts: all
|
||||
tasks:
|
||||
- name: Create loopback devices for CEPH on all nodes
|
||||
shell: |
|
||||
set -xe;
|
||||
pwd;
|
||||
./tools/deployment/common/setup-ceph-loopback-device.sh --ceph-osd-data /dev/loop0 --ceph-osd-dbwal /dev/loop1
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- hosts: primary
|
||||
vars_files:
|
||||
- vars.yaml
|
||||
tasks:
|
||||
- name: Deploy Ceph
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/030-ceph.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Setup openstack namespace for ceph
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/040-ceph-ns-activate.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy MariaDB RabbitMQ and Memcached
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/050-mariadb.sh
|
||||
./tools/deployment/multinode/060-rabbitmq.sh
|
||||
./tools/deployment/multinode/070-memcached.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Keystone
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/080-keystone.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Cinder
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/110-cinder.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
84
tools/gate/playbooks/multinode-deploy-compute-kit.yaml
Normal file
84
tools/gate/playbooks/multinode-deploy-compute-kit.yaml
Normal file
@ -0,0 +1,84 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
- hosts: primary
|
||||
vars_files:
|
||||
- vars.yaml
|
||||
tasks:
|
||||
- name: Setup OS and K8s Clients
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/010-setup-client.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Ingress
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/component/common/ingress.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy MariaDB RabbitMQ and Memcached
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/050-mariadb.sh
|
||||
./tools/deployment/multinode/060-rabbitmq.sh
|
||||
./tools/deployment/multinode/070-memcached.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy NFS and Keystone
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/045-nfs-provisioner.sh
|
||||
./tools/deployment/multinode/080-keystone.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Glance Heat and OpenVswitch
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/100-glance.sh
|
||||
./tools/deployment/multinode/150-heat.sh
|
||||
./tools/deployment/multinode/120-openvswitch.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Libvirt
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/130-libvirt.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy ComputeKit
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/140-compute-kit.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Horizon
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/085-horizon.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Exercise the Cloud
|
||||
environment: "{{ multinode_env }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/900-use-it.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
@ -1,282 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
- hosts: primary
|
||||
tasks:
|
||||
- name: Setup OS and K8s Clients
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/010-setup-client.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Ingress
|
||||
environment:
|
||||
OSH_DEPLOY_MULTINODE: True
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/component/common/ingress.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Ceph
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/030-ceph.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Setup openstack namespace for ceph
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/040-ceph-ns-activate.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy MariaDB
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/050-mariadb.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy RabbitMQ
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/060-rabbitmq.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Memcached
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/070-memcached.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Keystone
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/080-keystone.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Keystone RadosGW endpoints and user
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/090-ceph-radosgateway.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Glance
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/100-glance.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Cinder
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/110-cinder.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy OpenVswitch
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/120-openvswitch.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Libvirt
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/130-libvirt.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy ComputeKit
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/140-compute-kit.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Heat
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/150-heat.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Barbican
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/160-barbican.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Senlin
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/170-senlin.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Mistral
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/180-mistral.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Magnum
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/190-magnum.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Congress
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/200-congress.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
- name: Deploy Postgresql
|
||||
environment:
|
||||
OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
shell: |
|
||||
set -xe;
|
||||
./tools/deployment/multinode/210-postgresql.sh
|
||||
args:
|
||||
chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
# TODO(srwilkers: Disable these charts until we can determine periodic job
|
||||
# failures
|
||||
#
|
||||
# - name: Deploy Gnocchi
|
||||
# environment:
|
||||
# OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
# OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
# OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
# zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
# shell: |
|
||||
# set -xe;
|
||||
# ./tools/deployment/multinode/220-gnocchi.sh
|
||||
# args:
|
||||
# chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
# - name: Deploy MongoDB
|
||||
# environment:
|
||||
# OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
# OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
# OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
# zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
# shell: |
|
||||
# set -xe;
|
||||
# ./tools/deployment/multinode/230-mongodb.sh
|
||||
# args:
|
||||
# chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
||||
# - name: Deploy Ceilometer
|
||||
# environment:
|
||||
# OSH_OPENSTACK_RELEASE: "{{ osh_openstack_release }}"
|
||||
# OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
# OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
# zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
# shell: |
|
||||
# set -xe;
|
||||
# ./tools/deployment/multinode/240-ceilometer.sh
|
||||
# args:
|
||||
# chdir: "{{ zuul_osh_relative_path | default(zuul.project.src_dir) }}"
|
@ -10,4 +10,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
null: null
|
||||
multinode_env:
|
||||
OSH_DEPLOY_MULTINODE: True
|
||||
OPENSTACK_RELEASE: "{{ osh_params.openstack_release | default('') }}"
|
||||
CONTAINER_DISTRO_NAME: "{{ osh_params.container_distro_name | default('') }}"
|
||||
CONTAINER_DISTRO_VERSION: "{{ osh_params.container_distro_version | default('') }}"
|
||||
FEATURE_GATES: "{{ osh_params.feature_gates | default('') }}"
|
||||
OSH_EXTRA_HELM_ARGS: "{{ zuul_osh_extra_helm_args_relative_path | default('') }}"
|
||||
OSH_INFRA_PATH: "{{ zuul_osh_infra_relative_path | default('') }}"
|
||||
zuul_site_mirror_fqdn: "{{ zuul_site_mirror_fqdn }}"
|
||||
|
@ -723,3 +723,41 @@
|
||||
parent: openstack-helm-multinode-temp
|
||||
nodeset: openstack-helm-five-node-ubuntu
|
||||
run: tools/gate/playbooks/multinode-tempest-deploy.yaml
|
||||
|
||||
- job:
|
||||
timeout: 10800
|
||||
name: openstack-helm-multinode-compute-kit-train-ubuntu_bionic
|
||||
parent: openstack-helm-chart-deploy
|
||||
nodeset: openstack-helm-five-node-ubuntu
|
||||
vars:
|
||||
zuul_osh_infra_relative_path: ../openstack-helm-infra/
|
||||
osh_params:
|
||||
openstack_release: train
|
||||
container_distro_name: ubuntu
|
||||
container_distro_version: bionic
|
||||
pre-run:
|
||||
- tools/gate/playbooks/multinode-base.yaml
|
||||
- tools/gate/playbooks/osh-infra-upgrade-host.yaml
|
||||
- tools/gate/playbooks/osh-infra-deploy-docker.yaml
|
||||
- tools/gate/playbooks/osh-infra-build.yaml
|
||||
- tools/gate/playbooks/osh-infra-deploy-k8s.yaml
|
||||
run: tools/gate/playbooks/multinode-deploy-compute-kit.yaml
|
||||
|
||||
- job:
|
||||
timeout: 9600
|
||||
name: openstack-helm-multinode-cinder-train-ubuntu_bionic
|
||||
parent: openstack-helm-chart-deploy
|
||||
nodeset: openstack-helm-five-node-ubuntu
|
||||
vars:
|
||||
zuul_osh_infra_relative_path: ../openstack-helm-infra/
|
||||
osh_params:
|
||||
openstack_release: train
|
||||
container_distro_name: ubuntu
|
||||
container_distro_version: bionic
|
||||
pre-run:
|
||||
- tools/gate/playbooks/multinode-base.yaml
|
||||
- tools/gate/playbooks/osh-infra-upgrade-host.yaml
|
||||
- tools/gate/playbooks/osh-infra-deploy-docker.yaml
|
||||
- tools/gate/playbooks/osh-infra-build.yaml
|
||||
- tools/gate/playbooks/osh-infra-deploy-k8s.yaml
|
||||
run: tools/gate/playbooks/multinode-deploy-cinder.yaml
|
||||
|
@ -41,6 +41,8 @@
|
||||
- openstack-helm-cinder-stein-ubuntu_bionic
|
||||
- openstack-helm-compute-kit-stein-ubuntu_bionic
|
||||
- openstack-helm-horizon-stein-ubuntu_bionic
|
||||
- openstack-helm-multinode-compute-kit-train-ubuntu_bionic
|
||||
- openstack-helm-multinode-cinder-train-ubuntu_bionic
|
||||
post:
|
||||
jobs:
|
||||
- publish-openstack-helm-charts
|
||||
@ -66,6 +68,8 @@
|
||||
- openstack-helm-netpol-compute-kit-train
|
||||
- openstack-helm-netpol-compute-kit-ussuri
|
||||
- openstack-helm-netpol-cinder
|
||||
- openstack-helm-multinode-compute-kit-train-ubuntu_bionic
|
||||
- openstack-helm-multinode-cinder-train-ubuntu_bionic
|
||||
# NOTE(srwilkers): Disabling the following jobs until
|
||||
# issues with the kubeadm-aio based deployments are addressed
|
||||
# - openstack-helm-multinode-temp-ubuntu
|
||||
|
Loading…
Reference in New Issue
Block a user