(gating) Add shellcheck linter for multinode

- Add a shellcheck linter for the scripts in the multinode
  framework

- Update all scripting to comply with shellcheck

- Move linting job to Ubuntu Bionic as the multinode gate now
  requires Bionic versions of libvirt

Change-Id: Ibee645331421e1e6cecd4e3daa8e9c321dce5523
This commit is contained in:
Nishant Kumar 2019-10-11 17:01:43 +00:00
parent 0d8e68c17f
commit 2b67ffaefb
38 changed files with 246 additions and 229 deletions

View File

@ -27,10 +27,16 @@
jobs:
- airship-in-a-bottle-upload-git-mirror
- nodeset:
name: airship-integration-single-node
nodes:
- name: primary
label: ubuntu-bionic
- job:
name: airship-in-a-bottle-linter
run: tools/gate/playbooks/zuul-linter.yaml
nodeset: openstack-helm-single-node
nodeset: airship-integration-single-node
- job:
name: airship-in-a-bottle-upload-git-mirror

View File

@ -15,6 +15,16 @@
- hosts: primary
tasks:
- name: Execute a Whitespace Linter check
command: find . -not -path "*/\.*" -not -path "*/doc/build/*" -not -name "*.tgz" -type f -exec egrep -l " +$" {} \;
command: find . -not -path "*/\.*" -not -path "*/doc/build/*" -not -name "*.tgz" -type f -exec egrep -ln " +$" {} \;
register: result
failed_when: result.stdout != ""
failed_when: result.stdout != ""
- name: Install shellcheck
apt:
name: "shellcheck"
become: true
- name: Execute Shellcheck Against Framework Scripts
command: find ./tools -type f -name '*.sh' -exec shellcheck -e SC1090 {} \;
args:
chdir: "{{ zuul.project.src_dir }}"
register: result
failed_when: result.stdout != ""

View File

@ -20,14 +20,14 @@ export CLUSTER_TYPE="${CLUSTER_TYPE:="node,clusterrole,clusterrolebinding,storag
export PARALLELISM_FACTOR="${PARALLELISM_FACTOR:=2}"
function list_objects () {
printf ${CLUSTER_TYPE} | xargs -d ',' -I {} -P1 -n1 bash -c 'echo "$@"' _ {}
printf "%s" ${CLUSTER_TYPE} | xargs -d ',' -I {} -P1 -n1 bash -c 'echo "$@"' _ {}
}
export -f list_objects
function name_objects () {
export OBJECT=$1
kubectl get ${OBJECT} -o name | xargs -L1 -I {} -P1 -n1 bash -c 'echo "${OBJECT} ${1#*/}"' _ {}
kubectl get "${OBJECT}" -o name | xargs -L1 -I {} -P1 -n1 bash -c "echo ${OBJECT} ${1#*/}" _ {}
}
export -f name_objects
@ -39,9 +39,9 @@ function get_objects () {
echo "${OBJECT}/${NAME}"
export BASE_DIR="${BASE_DIR:="/tmp"}"
DIR="${BASE_DIR}/objects/cluster/${OBJECT}"
mkdir -p ${DIR}
kubectl get ${OBJECT} ${NAME} -o yaml > "${DIR}/${NAME}.yaml"
kubectl describe ${OBJECT} ${NAME} > "${DIR}/${NAME}.txt"
mkdir -p "${DIR}"
kubectl get "${OBJECT}" "${NAME}" -o yaml > "${DIR}/${NAME}.yaml"
kubectl describe "${OBJECT}" "${NAME}" > "${DIR}/${NAME}.txt"
}
export -f get_objects

View File

@ -1,5 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -16,10 +15,10 @@
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..)
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh
SCRIPT_DIR="$(realpath "$(dirname "$0")")"
WORKSPACE="$(realpath "${SCRIPT_DIR}/../../..")"
GATE_UTILS="${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh"
source ${GATE_UTILS}
source "${GATE_UTILS}"
drydock_cmd $@
drydock_cmd "$@"

View File

@ -25,7 +25,7 @@ function get_namespaces () {
function list_namespaced_objects () {
export NAMESPACE=$1
printf ${OBJECT_TYPE} | xargs -d ',' -I {} -P1 -n1 bash -c 'echo "${NAMESPACE} $@"' _ {}
printf "%s" "${OBJECT_TYPE}" | xargs -d ',' -I {} -P1 -n1 bash -c "echo ${NAMESPACE} ${1#*/}" _ {}
}
export -f list_namespaced_objects
@ -34,7 +34,7 @@ function name_objects () {
input=($1)
export NAMESPACE=${input[0]}
export OBJECT=${input[1]}
kubectl get -n ${NAMESPACE} ${OBJECT} -o name | xargs -L1 -I {} -P1 -n1 bash -c 'echo "${NAMESPACE} ${OBJECT} $@"' _ {}
kubectl get -n "${NAMESPACE}" "${OBJECT}" -o name | xargs -L1 -I {} -P1 -n1 bash -c "echo ${NAMESPACE} ${OBJECT} ${1#*/}" _ {}
}
export -f name_objects
@ -47,19 +47,19 @@ function get_objects () {
echo "${NAMESPACE}/${OBJECT}/${NAME}"
export BASE_DIR="${BASE_DIR:="/tmp"}"
DIR="${BASE_DIR}/namespaces/${NAMESPACE}/${OBJECT}"
mkdir -p ${DIR}
kubectl get -n ${NAMESPACE} ${OBJECT} ${NAME} -o yaml > "${DIR}/${NAME}.yaml"
kubectl describe -n ${NAMESPACE} ${OBJECT} ${NAME} > "${DIR}/${NAME}.txt"
mkdir -p "${DIR}"
kubectl get -n "${NAMESPACE}" "${OBJECT}" "${NAME}" -o yaml > "${DIR}/${NAME}.yaml"
kubectl describe -n "${NAMESPACE}" "${OBJECT}" "${NAME}" > "${DIR}/${NAME}.txt"
LOG_DIR="${BASE_DIR}/pod-logs"
mkdir -p ${LOG_DIR}
if [ ${OBJECT_TYPE} = "pods" ]; then
POD_DIR="${LOG_DIR}/${NAME}"
mkdir -p ${POD_DIR}
mkdir -p "${POD_DIR}"
CONTAINERS=$(kubectl get pod "${NAME}" -n "${NAMESPACE}" -o json | jq -r '.spec.containers[].name')
for CONTAINER in ${CONTAINERS}; do
kubectl logs -n ${NAMESPACE} ${NAME} -c ${CONTAINER} > "${POD_DIR}/${CONTAINER}.txt"
kubectl logs -n "${NAMESPACE}" "${NAME}" -c "${CONTAINER}" > "${POD_DIR}/${CONTAINER}.txt"
done
fi
}

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -15,10 +15,10 @@
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..)
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh
SCRIPT_DIR="$(realpath "$(dirname "$0")")"
WORKSPACE="$(realpath "${SCRIPT_DIR}/../../..")"
GATE_UTILS="${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh"
source ${GATE_UTILS}
source "${GATE_UTILS}"
exec rsync -e "ssh -F ${SSH_CONFIG_DIR}/config" $@
exec rsync -e "ssh -F ${SSH_CONFIG_DIR}/config" "$@"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -15,10 +15,10 @@
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..)
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh
SCRIPT_DIR="$(realpath "$(dirname "$0")")"
WORKSPACE="$(realpath "${SCRIPT_DIR}/../../..")"
GATE_UTILS="${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh"
source ${GATE_UTILS}
source "${GATE_UTILS}"
exec scp -F ${SSH_CONFIG_DIR}/config $@
exec scp -F "${SSH_CONFIG_DIR}/config" "$@"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -15,10 +15,10 @@
set -x
SCRIPT_DIR=$(realpath $(dirname $0))
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..)
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh
SCRIPT_DIR="$(realpath "$(dirname "$0")")"
WORKSPACE="$(realpath "${SCRIPT_DIR}/../../..")"
GATE_UTILS="${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh"
source ${GATE_UTILS}
source "${GATE_UTILS}"
shipyard_cmd $@
shipyard_cmd "$@"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -15,10 +15,10 @@
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..)
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh
SCRIPT_DIR="$(realpath "$(dirname "$0")")"
WORKSPACE="$(realpath "${SCRIPT_DIR}/../../..")"
GATE_UTILS="${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh"
source ${GATE_UTILS}
source "${GATE_UTILS}"
exec ssh -F ${SSH_CONFIG_DIR}/config $@
exec ssh -F "${SSH_CONFIG_DIR}/config" "$@"

View File

@ -20,64 +20,64 @@ shipard_cmd_stdout() {
install_ingress_ca
ssh_cmd "${BUILD_NAME}" \
docker run -t --network=host \
--dns ${dns_server} \
--dns "${dns_server}" \
-v "${BUILD_WORK_DIR}:/work" \
-e OS_AUTH_URL=${AIRSHIP_KEYSTONE_URL} \
-e OS_AUTH_URL="${AIRSHIP_KEYSTONE_URL}" \
-e OS_USERNAME=shipyard \
-e OS_USER_DOMAIN_NAME=default \
-e OS_PASSWORD="${SHIPYARD_PASSWORD}" \
-e OS_PROJECT_DOMAIN_NAME=default \
-e OS_PROJECT_NAME=service \
-e REQUESTS_CA_BUNDLE=/work/ingress_ca.pem \
--entrypoint /usr/local/bin/shipyard "${IMAGE_SHIPYARD_CLI}" $* 2>&1
--entrypoint /usr/local/bin/shipyard "${IMAGE_SHIPYARD_CLI}" "$@" 2>&1
}
shipyard_cmd() {
if [[ ! -z "${LOG_FILE}" ]]
then
set -o pipefail
shipard_cmd_stdout $* | tee -a "${LOG_FILE}"
shipard_cmd_stdout "$@" | tee -a "${LOG_FILE}"
set +o pipefail
else
shipard_cmd_stdout $*
shipard_cmd_stdout "$@"
fi
}
drydock_cmd_stdout() {
dns_netspec="$(config_netspec_for_role "dns")"
dns_server=$(config_vm_net_ip "${BUILD_NAME}" "$dns_netspec")
dns_server="$(config_vm_net_ip "${BUILD_NAME}" "$dns_netspec")"
install_ingress_ca
ssh_cmd "${BUILD_NAME}" \
docker run -t --network=host \
--dns ${dns_server} \
--dns "${dns_server}" \
-v "${BUILD_WORK_DIR}:/work" \
-e DD_URL=http://drydock-api.ucp.svc.cluster.local:9000 \
-e OS_AUTH_URL=${AIRSHIP_KEYSTONE_URL} \
-e OS_AUTH_URL="${AIRSHIP_KEYSTONE_URL}" \
-e OS_USERNAME=shipyard \
-e OS_USER_DOMAIN_NAME=default \
-e OS_PASSWORD="${SHIPYARD_PASSWORD}" \
-e OS_PROJECT_DOMAIN_NAME=default \
-e OS_PROJECT_NAME=service \
-e REQUESTS_CA_BUNDLE=/work/ingress_ca.pem \
--entrypoint /usr/local/bin/drydock "${IMAGE_DRYDOCK_CLI}" $* 2>&1
--entrypoint /usr/local/bin/drydock "${IMAGE_DRYDOCK_CLI}" "$@" 2>&1
}
drydock_cmd() {
if [[ ! -z "${LOG_FILE}" ]]
then
set -o pipefail
drydock_cmd_stdout $* | tee -a "${LOG_FILE}"
drydock_cmd_stdout "$@" | tee -a "${LOG_FILE}"
set +o pipefail
else
drydock_cmd_stdout $*
drydock_cmd_stdout "$@"
fi
}
# Create a shipyard action
# and poll until completion
shipyard_action_wait() {
action=$1
timeout=${2:-3600}
poll_time=${3:-60}
action="$1"
timeout="${2:-3600}"
poll_time="${3:-60}"
if [[ $action == "update_site" ]]
then
@ -149,7 +149,7 @@ collect_ssh_key() {
return 0
fi
cat << EOF > ${GATE_DEPOT}/airship_ubuntu_ssh_key.yaml
cat << EOF > "${GATE_DEPOT}/airship_ubuntu_ssh_key.yaml"
---
schema: deckhand/Certificate/v1
metadata:
@ -161,6 +161,6 @@ metadata:
storagePolicy: cleartext
data: |-
EOF
cat ${SSH_CONFIG_DIR}/id_rsa.pub | sed -e 's/^/ /' >> ${GATE_DEPOT}/airship_ubuntu_ssh_key.yaml
sed -e 's/^/ /' >> "${GATE_DEPOT}/airship_ubuntu_ssh_key.yaml" < "${SSH_CONFIG_DIR}/id_rsa.pub"
}

View File

@ -1,7 +1,10 @@
#!/bin/bash
set -e
LIB_DIR=$(realpath "$(dirname "${BASH_SOURCE}")")
REPO_ROOT=$(realpath "$(dirname "${BASH_SOURCE}")/../../../..")
LIB_DIR=$(realpath "$(dirname "${BASH_SOURCE[0]}")")
export LIB_DIR
REPO_ROOT=$(realpath "$(dirname "${BASH_SOURCE[0]}")/../../../..")
export REPO_ROOT
source "$LIB_DIR"/config.sh
source "$LIB_DIR"/const.sh

View File

@ -1,13 +1,16 @@
#!/bin/bash
QUAGGA_DAEMONS="${TEMP_DIR}/daemons"
QUAGGA_DEBIAN_CONF="${TEMP_DIR}/debian.conf"
QUAGGA_BGPD_CONF="${TEMP_DIR}/bgpd.conf"
bgp_router_config() {
quagga_as_number=$(config_bgp_as "quagga_as")
calico_as_number=$(config_bgp_as "calico_as")
quagga_as_number="$(config_bgp_as "quagga_as")"
calico_as_number="$(config_bgp_as "calico_as")"
bgp_net="$(config_netspec_for_role "bgp")"
quagga_ip=$(config_vm_net_ip "build" "$bgp_net")
quagga_ip="$(config_vm_net_ip "build" "$bgp_net")"
# shellcheck disable=SC2016
QUAGGA_AS=${quagga_as_number} CALICO_AS=${calico_as_number} QUAGGA_IP=${quagga_ip} envsubst '${QUAGGA_AS} ${CALICO_AS} ${QUAGGA_IP}' < "${TEMPLATE_DIR}/bgpd_conf.sub" > "${QUAGGA_BGPD_CONF}"
cp "${TEMPLATE_DIR}/daemons.sub" "${QUAGGA_DAEMONS}"
@ -20,9 +23,9 @@ bgp_router_start() {
nodename=$1
remote_work_dir="/var/tmp/quagga"
remote_daemons_file="${remote_work_dir}/$(basename $QUAGGA_DAEMONS)"
remote_debian_conf_file="${remote_work_dir}/$(basename $QUAGGA_DEBIAN_CONF)"
remote_bgpd_conf_file="${remote_work_dir}/$(basename $QUAGGA_BGPD_CONF)"
remote_daemons_file="${remote_work_dir}/$(basename "$QUAGGA_DAEMONS")"
remote_debian_conf_file="${remote_work_dir}/$(basename "$QUAGGA_DEBIAN_CONF")"
remote_bgpd_conf_file="${remote_work_dir}/$(basename "$QUAGGA_BGPD_CONF")"
ssh_cmd "${nodename}" mkdir -p "${remote_work_dir}"
@ -30,5 +33,5 @@ bgp_router_start() {
rsync_cmd "$QUAGGA_DEBIAN_CONF" "${nodename}:${remote_debian_conf_file}"
rsync_cmd "$QUAGGA_BGPD_CONF" "${nodename}:${remote_bgpd_conf_file}"
ssh_cmd "${nodename}" docker run -ti -d --net=host --privileged -v /var/tmp/quagga:/etc/quagga --restart always --name Quagga $IMAGE_QUAGGA
ssh_cmd "${nodename}" docker run -ti -d --net=host --privileged -v /var/tmp/quagga:/etc/quagga --restart always --name Quagga "$IMAGE_QUAGGA"
}

View File

@ -1,4 +1,5 @@
#!/usr/bin/env bash
#!/bin/bash
#
# Copyright 2019 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -28,7 +29,7 @@ manifests_lookup(){
local allow_fail="$6"
FAIL=false
RESULT=`python3 -c "
RESULT=$(python3 -c "
import yaml,sys
y = yaml.load_all(open('$file'))
for x in y:
@ -50,7 +51,7 @@ for x in y:
print(x$key_path)
break
else:
sys.exit(1)" 2>&1` || FAIL=true
sys.exit(1)" 2>&1) || FAIL=true
if [[ $FAIL = true ]] && [[ $allow_fail != true ]]; then
echo "Lookup failed for schema '$schema', metadata.name '$mdata_name', key path '$key_path'"
@ -63,7 +64,8 @@ install_file(){
local path="$1"
local content="$2"
local permissions="$3"
local dirname=$(dirname "$path")
local dirname
dirname=$(dirname "$path")
if [[ ! -d $dirname ]]; then
mkdir -p "$dirname"
@ -72,9 +74,9 @@ install_file(){
if [[ ! -f $path ]] || [ "$(cat "$path")" != "$content" ]; then
echo "$content" > "$path"
chmod "$permissions" "$path"
FILE_UPDATED=true
export FILE_UPDATED=true
else
FILE_UPDATED=false
export FILE_UPDATED=false
fi
}
@ -90,7 +92,7 @@ fi
if ([[ -z $1 ]] && [[ -z $RENDERED ]]) || [[ $1 =~ .*[hH][eE][lL][pP].* ]]; then
echo "Missing required script argument"
echo "Usage: ./$(basename $BASH_SOURCE) /path/to/rendered/site/manifest.yaml"
echo "Usage: ./$(basename "${BASH_SOURCE[0]}") /path/to/rendered/site/manifest.yaml"
exit 1
fi
@ -106,8 +108,8 @@ fi
echo "Using rendered manifests file '$rendered_file'"
# env vars which can be set if you want to disable
: ${DISABLE_SECCOMP_PROFILE:=}
: ${DISABLE_APPARMOR_PROFILES:=}
: "${DISABLE_SECCOMP_PROFILE:=}"
: "${DISABLE_APPARMOR_PROFILES:=}"
###############################################################################
@ -146,8 +148,8 @@ if [[ ! $DISABLE_APPARMOR_PROFILES ]]; then
if [[ -n "$RESULT" ]] && [[ $RESULT -gt 0 ]]; then
# Fetch apparmor profile data
LAST=$(( $RESULT - 1 ))
for i in `seq 0 $LAST`; do
LAST=$(( RESULT - 1 ))
for i in $(seq 0 $LAST); do
manifests_lookup "$rendered_file" "drydock/BootAction/v1" \
"apparmor-profiles" "['data']['assets'][$i]['path']"

View File

@ -457,7 +457,7 @@ join_array() {
besteffort() {
set +e
$@
"$@"
set -e
}
@ -467,7 +467,7 @@ get_namekey() {
key=$(cat "$NAMEKEY_FILE")
else
key=$(openssl rand -hex 4)
echo -n "$key" > $NAMEKEY_FILE
echo -n "$key" > "$NAMEKEY_FILE"
fi
echo -n "$key"

View File

@ -1,3 +1,4 @@
#!/bin/bash
export GENESIS_NAME=n0
export BUILD_NAME=build
export SSH_CONFIG_DIR=${WORKSPACE}/multi_nodes_gate/airship_gate/config-ssh

View File

@ -1,3 +1,4 @@
#!/bin/bash
docker_ps() {
VIA="${1}"
ssh_cmd "${VIA}" docker ps -a

View File

@ -1,34 +1,39 @@
#!/bin/bash
DNS_ZONE_FILE="${TEMP_DIR}/ingress.dns"
COREFILE="${TEMP_DIR}/ingress.corefile"
ingress_dns_config() {
ingress_domain=$(config_ingress_domain)
ingress_domain="$(config_ingress_domain)"
INGRESS_DOMAIN=${ingress_domain} envsubst '${INGRESS_DOMAIN}' < "${TEMPLATE_DIR}/ingress_header.sub" > "${DNS_ZONE_FILE}"
#shellcheck disable=SC2016
INGRESS_DOMAIN="${ingress_domain}" envsubst '${INGRESS_DOMAIN}' < "${TEMPLATE_DIR}/ingress_header.sub" > "${DNS_ZONE_FILE}"
read -a ingress_ip_list <<< $(config_ingress_ips)
read -r -a ingress_ip_list <<< "$(config_ingress_ips)"
for ip in "${ingress_ip_list[@]}"
do
read -a ip_entries <<< $(config_ingress_entries $ip)
# TODO(sthussey) shift config_ingress_entries to printf w/ quotes
# shellcheck disable=SC2046
read -r -a ip_entries <<< $(config_ingress_entries "$ip")
for entry in "${ip_entries[@]}"
do
HOSTNAME=${entry} HOSTIP=${ip} envsubst < "${TEMPLATE_DIR}/ingress_entry.sub" >> "${DNS_ZONE_FILE}"
HOSTNAME="${entry}" HOSTIP="${ip}" envsubst < "${TEMPLATE_DIR}/ingress_entry.sub" >> "${DNS_ZONE_FILE}"
done
done
DNS_DOMAIN=${ingress_domain} ZONE_FILE=$(basename $DNS_ZONE_FILE) DNS_SERVERS="$UPSTREAM_DNS" envsubst < "${TEMPLATE_DIR}/ingress_corefile.sub" > "${COREFILE}"
DNS_DOMAIN="${ingress_domain}" ZONE_FILE="$(basename "$DNS_ZONE_FILE")" DNS_SERVERS="$UPSTREAM_DNS" envsubst < "${TEMPLATE_DIR}/ingress_corefile.sub" > "${COREFILE}"
}
ingress_dns_start() {
# nodename where DNS should run
nodename=$1
nodename="$1"
remote_work_dir="/var/tmp/coredns"
remote_zone_file="${remote_work_dir}/$(basename $DNS_ZONE_FILE)"
remote_corefile="${remote_work_dir}/$(basename $COREFILE)"
remote_zone_file="${remote_work_dir}/$(basename "$DNS_ZONE_FILE")"
remote_corefile="${remote_work_dir}/$(basename "$COREFILE")"
ssh_cmd "${nodename}" mkdir -p "${remote_work_dir}"
rsync_cmd "$DNS_ZONE_FILE" "${nodename}:${remote_zone_file}"
rsync_cmd "$COREFILE" "${nodename}:${remote_corefile}"
ssh_cmd "${nodename}" docker run -d -v /var/tmp/coredns:/data -w /data --network host --restart always -P $IMAGE_COREDNS -conf $(basename $remote_corefile)
ssh_cmd "${nodename}" docker run -d -v /var/tmp/coredns:/data -w /data --network host --restart always -P "$IMAGE_COREDNS" -conf "$(basename "$remote_corefile")"
}

View File

@ -1,3 +1,5 @@
#!/bin/bash
kubectl_apply() {
VIA=${1}
FILE=${2}

View File

@ -1,3 +1,4 @@
#!/bin/bash
if [[ -v GATE_COLOR && ${GATE_COLOR} = "1" ]]; then
C_CLEAR="\e[0m"
C_ERROR="\e[38;5;160m"

View File

@ -1,3 +1,5 @@
#!/bin/bash
nginx_down() {
REGISTRY_ID=$(docker ps -qa -f name=promenade-nginx)
if [ "x${REGISTRY_ID}" != "x" ]; then
@ -22,7 +24,7 @@ nginx_cache_and_replace_tar_urls() {
TAR_NUM=0
mkdir -p "${NGINX_DIR}"
for file in "$@"; do
grep -Po "^ +tar_url: \K.+$" "${file}" | while read tar_url ; do
grep -Po "^ +tar_url: \K.+$" "${file}" | while read -r tar_url ; do
# NOTE(mark-burnet): Does not yet ignore repeated files.
DEST_PATH="${NGINX_DIR}/cached-tar-${TAR_NUM}.tgz"
log "Caching ${tar_url} in file: ${DEST_PATH}"

View File

@ -1,3 +1,5 @@
#!/bin/bash
promenade_health_check() {
VIA=${1}
log "Checking Promenade API health"

View File

@ -1,3 +1,5 @@
#!/bin/bash
registry_down() {
REGISTRY_ID=$(docker ps -qa -f name=registry)
if [[ ! -z ${REGISTRY_ID} ]]; then
@ -7,7 +9,7 @@ registry_down() {
}
registry_list_images() {
FILES=($(find ${DEFINITION_DEPOT} -type f -name '*.yaml'))
FILES=($(find "${DEFINITION_DEPOT}" -type f -name '*.yaml'))
HOSTNAME_REGEX='[a-zA-Z0-9][a-zA-Z0-9_-]{0,62}'
DOMAIN_NAME_REGEX="${HOSTNAME_REGEX}(\.${HOSTNAME_REGEX})*"

View File

@ -1,8 +1,11 @@
#!/bin/bash
rsync_cmd() {
rsync -e "ssh -F ${SSH_CONFIG_DIR}/config" "${@}"
}
ssh_cmd_raw() {
# shellcheck disable=SC2068
ssh -F "${SSH_CONFIG_DIR}/config" $@
}
@ -11,8 +14,10 @@ ssh_cmd() {
shift
args=$(shell-quote -- "${@}")
if [[ -v GATE_DEBUG && ${GATE_DEBUG} = "1" ]]; then
# shellcheck disable=SC2029
ssh -F "${SSH_CONFIG_DIR}/config" -v "${HOST}" "${args}"
else
# shellcheck disable=SC2029
ssh -F "${SSH_CONFIG_DIR}/config" "${HOST}" "${args}"
fi
}
@ -28,9 +33,9 @@ ssh_config_declare() {
env -i \
"SSH_CONFIG_DIR=${SSH_CONFIG_DIR}" \
"SSH_NODE_HOSTNAME=${n}" \
"SSH_NODE_IP=$(config_vm_net_ip ${n} "$ssh_net")" \
"SSH_NODE_IP=$(config_vm_net_ip "${n}" "$ssh_net")" \
envsubst < "${TEMPLATE_DIR}/ssh-config-node.sub" >> "${SSH_CONFIG_DIR}/config"
if [[ "$(config_vm_bootstrap ${n})" == "true" ]]
if [[ "$(config_vm_bootstrap "${n}")" == "true" ]]
then
echo " User root" >> "${SSH_CONFIG_DIR}/config"
else

View File

@ -184,8 +184,10 @@ iso_gen() {
export NAME
export SSH_PUBLIC_KEY
export NTP_POOLS="$(join_array ',' "$NTP_POOLS")"
export NTP_SERVERS="$(join_array ',' "$NTP_SERVERS")"
NTP_POOLS="$(join_array ',' "$NTP_POOLS")"
export NTP_POOLS
NTP_SERVERS="$(join_array ',' "$NTP_SERVERS")"
export NTP_SERVERS
envsubst < "${TEMPLATE_DIR}/user-data.sub" > user-data
fs_header="false"
@ -199,11 +201,7 @@ iso_gen() {
echo "fs_header:" >> user-data
fs_header="true"
fi
export FS_TYPE=$(config_format_type "$disk_format")
export DISK_DEVICE="$disk"
envsubst < "${TEMPLATE_DIR}/disk-data.sub" >> user-data
unset FS_TYPE
unset DISK_DEVICE
FS_TYPE="$(config_format_type "$disk_format")" DISK_DEVICE="$disk" envsubst < "${TEMPLATE_DIR}/disk-data.sub" >> user-data
fi
done
@ -221,11 +219,7 @@ iso_gen() {
mount_header="true"
fi
export MOUNTPOINT=$(config_format_mount "$disk_format")
export DISK_DEVICE="$disk"
envsubst < "${TEMPLATE_DIR}/mount-data.sub" >> user-data
unset MOUNTPOINT
unset DISK_DEVICE
MOUNTPOINT="$(config_format_mount "$disk_format")" DISK_DEVICE="$disk" envsubst < "${TEMPLATE_DIR}/mount-data.sub" >> user-data
fi
done
@ -288,6 +282,7 @@ nets_clean() {
for iface in $(ip -oneline l show type vlan | grep "$netname" | awk -F ' ' '{print $2}' | tr -d ':' | awk -F '@' '{print $1}')
do
# shellcheck disable=SC2024
sudo ip l del dev "$iface" &>> "$LOG_FILE"
done
virsh net-destroy "$netname" &>> "${LOG_FILE}"
@ -303,11 +298,11 @@ net_create() {
if [[ $(config_net_is_layer3 "$net") == "true" ]]; then
net_template="${TEMPLATE_DIR}/l3network-definition.sub"
NETNAME="${virsh_netname}" NETIP="$(config_net_selfip "$netname")" NETMASK="$(cidr_to_netmask $(config_net_cidr "$netname"))" NETMAC="$(config_net_mac "$netname")" envsubst < "$net_template" > ${TEMP_DIR}/net-${netname}.xml
NETNAME="${virsh_netname}" NETIP="$(config_net_selfip "$netname")" NETMASK="$(cidr_to_netmask "$(config_net_cidr "$netname")")" NETMAC="$(config_net_mac "$netname")" envsubst < "$net_template" > "${TEMP_DIR}/net-${netname}.xml"
else
net_template="${TEMPLATE_DIR}/l2network-definition.sub"
NETNAME="${virsh_netname}" envsubst < "$net_template" > ${TEMP_DIR}/net-${netname}.xml
NETNAME="${virsh_netname}" envsubst < "$net_template" > "${TEMP_DIR}/net-${netname}.xml"
fi
log Creating network "${namekey}"_"${netname}"
@ -499,12 +494,13 @@ vm_create() {
wait
log Creating VM "${NAME}" and bootstrapping the boot drive
# shellcheck disable=SC2086
virt-install \
--name "${NAME}" \
--os-variant ubuntu16.04 \
--virt-type kvm \
--cpu ${VIRSH_CPU_OPTS} \
--serial file,path=${TEMP_DIR}/console/${NAME}.log \
--cpu "${VIRSH_CPU_OPTS}" \
--serial "file,path=${TEMP_DIR}/console/${NAME}.log" \
--graphics none \
--noautoconsole \
$NETWORK_OPTS \
@ -520,13 +516,14 @@ vm_create() {
else
log Creating VM "${NAME}"
# shellcheck disable=SC2086
virt-install \
--name "${NAME}" \
--os-variant ubuntu16.04 \
--virt-type kvm \
--cpu ${VIRSH_CPU_OPTS} \
--cpu "${VIRSH_CPU_OPTS}" \
--graphics none \
--serial file,path=${TEMP_DIR}/console/${NAME}.log \
--serial file,path="${TEMP_DIR}/console/${NAME}.log" \
--noautoconsole \
$NETWORK_OPTS \
--vcpus "$(config_vm_vcpus "${NAME}")" \
@ -607,7 +604,7 @@ get_libvirt_group() {
make_virtmgr_account() {
for libvirt_group in $(get_libvirt_group)
do
if [[ -z "$(grep -oE '^virtmgr:' /etc/passwd)" ]]
if ! grep -qE '^virtmgr:' /etc/passwd
then
sudo useradd -m -s /bin/sh -g "${libvirt_group}" virtmgr
else
@ -628,17 +625,19 @@ gen_libvirt_key() {
sudo cp "${GATE_SSH_KEY}.pub" ~virtmgr/.ssh/airship_gate.pub
else
log "Generating new SSH keypair for virtmgr"
#shellcheck disable=SC2024
sudo ssh-keygen -N '' -b 2048 -t rsa -f ~virtmgr/.ssh/airship_gate &>> "${LOG_FILE}"
fi
}
# Install private key into site definition
install_libvirt_key() {
export PUB_KEY=$(sudo cat ~virtmgr/.ssh/airship_gate.pub)
PUB_KEY=$(sudo cat ~virtmgr/.ssh/airship_gate.pub)
export PUB_KEY
mkdir -p ${TEMP_DIR}/tmp
envsubst < "${TEMPLATE_DIR}/authorized_keys.sub" > ${TEMP_DIR}/tmp/virtmgr.authorized_keys
sudo cp ${TEMP_DIR}/tmp/virtmgr.authorized_keys ~virtmgr/.ssh/authorized_keys
mkdir -p "${TEMP_DIR}/tmp"
envsubst < "${TEMPLATE_DIR}/authorized_keys.sub" > "${TEMP_DIR}/tmp/virtmgr.authorized_keys"
sudo cp "${TEMP_DIR}/tmp/virtmgr.authorized_keys" ~virtmgr/.ssh/authorized_keys
sudo chown -R virtmgr ~virtmgr/.ssh
sudo chmod 700 ~virtmgr/.ssh
sudo chmod 600 ~virtmgr/.ssh/authorized_keys
@ -649,7 +648,7 @@ install_libvirt_key() {
fi
mkdir -p "${GATE_DEPOT}"
cat << EOF > ${GATE_DEPOT}/airship_drydock_kvm_ssh_key.yaml
cat << EOF > "${GATE_DEPOT}/airship_drydock_kvm_ssh_key.yaml"
---
schema: deckhand/CertificateKey/v1
metadata:
@ -661,5 +660,5 @@ metadata:
storagePolicy: cleartext
data: |-
EOF
sudo cat ~virtmgr/.ssh/airship_gate | sed -e 's/^/ /' >> ${GATE_DEPOT}/airship_drydock_kvm_ssh_key.yaml
sudo cat ~virtmgr/.ssh/airship_gate | sed -e 's/^/ /' >> "${GATE_DEPOT}/airship_drydock_kvm_ssh_key.yaml"
}

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -17,7 +17,7 @@ set -e
source "${GATE_UTILS}"
BGP_ROUTER=$1
BGP_ROUTER="$1"
bgp_router_config
bgp_router_start ${BGP_ROUTER}
bgp_router_start "${BGP_ROUTER}"

View File

@ -1,19 +1,19 @@
#!/usr/bin/env bash
#!/bin/bash
set -e
source "${GATE_UTILS}"
mkdir -p ${SCRIPT_DEPOT}
chmod 777 ${SCRIPT_DEPOT}
mkdir -p "${SCRIPT_DEPOT}"
chmod 777 "${SCRIPT_DEPOT}"
DOCKER_RUN_OPTS=("-e PROMENADE_DEBUG=${PROMENADE_DEBUG}")
DOCKER_RUN_OPTS=("-e" "PROMENADE_DEBUG=${PROMENADE_DEBUG}")
for v in HTTPS_PROXY HTTP_PROXY NO_PROXY https_proxy http_proxy no_proxy
do
if [[ -v "${v}" ]]
then
DOCKER_RUN_OPTS+=(" -e ${v}=${!v}")
DOCKER_RUN_OPTS+=("-e" "${v}=${!v}")
fi
done
@ -25,18 +25,18 @@ then
KEYS_PATH=""
fi
PROMENADE_TMP_LOCAL="$(basename $PROMENADE_TMP_LOCAL)"
PROMENADE_TMP_LOCAL="$(basename "$PROMENADE_TMP_LOCAL")"
PROMENADE_TMP="${TEMP_DIR}/${PROMENADE_TMP_LOCAL}"
mkdir -p $PROMENADE_TMP
chmod 777 $PROMENADE_TMP
mkdir -p "$PROMENADE_TMP"
chmod 777 "$PROMENADE_TMP"
log Prepare hyperkube
docker run --rm -t \
--network host \
-v "${PROMENADE_TMP}:/tmp/${PROMENADE_TMP_LOCAL}" \
${DOCKER_RUN_OPTS[*]} \
"${DOCKER_RUN_OPTS[@]}" \
"${IMAGE_HYPERKUBE}" \
cp /hyperkube /tmp/${PROMENADE_TMP_LOCAL}
cp /hyperkube "/tmp/${PROMENADE_TMP_LOCAL}"
log Building scripts
docker run --rm -t \
@ -48,10 +48,10 @@ docker run --rm -t \
-v "${SCRIPT_DEPOT}:/scripts" \
-v "${PROMENADE_TMP}:/tmp/${PROMENADE_TMP_LOCAL}" \
-e "PROMENADE_ENCRYPTION_KEY=${PROMENADE_ENCRYPTION_KEY}" \
${DOCKER_RUN_OPTS[*]} \
"${DOCKER_RUN_OPTS[@]}" \
"${IMAGE_PROMENADE_CLI}" \
promenade \
build-all \
--validators \
-o /scripts \
/config/*.yaml ${CERTS_PATH} ${KEYS_PATH}
/config/*.yaml "${CERTS_PATH}" "${KEYS_PATH}"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -22,7 +22,7 @@ source "${GATE_UTILS}"
# registry_up
# Create temp_dir structure
mkdir -p ${TEMP_DIR}/console
mkdir -p "${TEMP_DIR}/console"
# SSH setup
ssh_setup_declare

View File

@ -1,32 +1,11 @@
#!/usr/bin/env bash
#!/bin/bash
set -e
source "${GATE_UTILS}"
IS_UPDATE=0
DO_EXCLUDE=0
EXCLUDE_PATTERNS=()
while getopts "ux:" opt; do
case "${opt}" in
u)
IS_UPDATE=1
;;
x)
DO_EXCLUDE=1
EXCLUDE_PATTERNS+=("${OPTARG}")
;;
*)
echo "Unknown option"
exit 1
;;
esac
done
shift $((OPTIND-1))
DESIGN_FILES=($(find "${DEFINITION_DEPOT}" -name '*.yaml' | xargs -n 1 basename | xargs -n 1 printf "/tmp/design/%s\n"))
GATE_FILES=($(find "${GATE_DEPOT}" -name '*.yaml' | xargs -n 1 basename | xargs -n 1 printf "/tmp/gate/%s\n"))
DESIGN_FILES=($(find "${DEFINITION_DEPOT}" -name '*.yaml' -print0 | xargs -0 -n 1 basename | xargs -n 1 printf "/tmp/design/%s\n"))
GATE_FILES=($(find "${GATE_DEPOT}" -name '*.yaml' -print0 | xargs -0 -n 1 basename | xargs -n 1 printf "/tmp/gate/%s\n"))
mkdir -p "${CERT_DEPOT}"
chmod 777 "${CERT_DEPOT}"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -17,7 +17,7 @@ set -e
source "${GATE_UTILS}"
DNS_SERVER=$1
DNS_SERVER="$1"
ingress_dns_config
ingress_dns_start ${DNS_SERVER}
ingress_dns_start "${DNS_SERVER}"

View File

@ -21,29 +21,23 @@ mkdir -p "${DEFINITION_DEPOT}"
chmod 777 "${DEFINITION_DEPOT}"
render_pegleg_cli() {
cli_string="pegleg -v site"
cli_string=("pegleg" "-v" "site")
if [[ "${GERRIT_SSH_USER}" ]]
then
cli_string+=" -u ${GERRIT_SSH_USER}"
cli_string+=("-u" "${GERRIT_SSH_USER}")
fi
if [[ "${GERRIT_SSH_KEY}" ]]
then
cli_string+=" -k /workspace/${GERRIT_SSH_KEY}"
cli_string+=("-k" "/workspace/${GERRIT_SSH_KEY}")
fi
primary_repo=$(config_pegleg_primary_repo)
primary_repo="$(config_pegleg_primary_repo)"
if [[ -d "${REPO_ROOT}/${primary_repo}" ]]
then
# NOTE: to get latest pegleg colllect to work
# airship-in-bottle repo has versions (v1.0demo, v1.0dev) within global
# and that is preventing pegleg to collect documents.
# It complains with duplicate data
$(find ${REPO_ROOT}/${primary_repo} -name "v1.0dev" -type d \
-exec rm -r {} +)
cli_string="${cli_string} -r /workspace/${primary_repo}"
cli_string+=("-r" "/workspace/${primary_repo}")
else
log "${primary_repo} not a valid primary repository"
return 1
@ -55,18 +49,20 @@ render_pegleg_cli() {
then
for r in ${aux_repos[*]}
do
cli_string="${cli_string} -e ${r}=/workspace/${r}"
cli_string+=("-e" "${r}=/workspace/${r}")
done
fi
cli_string="${cli_string} collect -s /collect"
cli_string+=("collect" "-s" "/collect")
cli_string="${cli_string} $(config_pegleg_sitename)"
cli_string+=("$(config_pegleg_sitename)")
echo ${cli_string}
printf " %s " "${cli_string[@]}"
}
collect_design_docs() {
# shellcheck disable=SC2091
# shellcheck disable=SC2046
docker run \
--rm -t \
--network host \

View File

@ -21,29 +21,23 @@ mkdir -p "${RENDERED_DEPOT}"
chmod 777 "${RENDERED_DEPOT}"
render_pegleg_cli() {
cli_string="pegleg -v site"
cli_string=("pegleg" "-v" "site")
if [[ "${GERRIT_SSH_USER}" ]]
then
cli_string+=" -u ${GERRIT_SSH_USER}"
cli_string+=("-u" "${GERRIT_SSH_USER}")
fi
if [[ "${GERRIT_SSH_KEY}" ]]
then
cli_string+=" -k /workspace/${GERRIT_SSH_KEY}"
cli_string+=("-k" "/workspace/${GERRIT_SSH_KEY}")
fi
primary_repo=$(config_pegleg_primary_repo)
primary_repo="$(config_pegleg_primary_repo)"
if [[ -d "${REPO_ROOT}/${primary_repo}" ]]
then
# NOTE: to get latest pegleg colllect to work
# airship-in-bottle repo has versions (v1.0demo, v1.0dev) within global
# and that is preventing pegleg to collect documents.
# It complains with duplicate data
$(find ${REPO_ROOT}/${primary_repo} -name "v1.0dev" -type d \
-exec rm -r {} +)
cli_string="${cli_string} -r /workspace/${primary_repo}"
cli_string+=("-r" "/workspace/${primary_repo}")
else
log "${primary_repo} not a valid primary repository"
return 1
@ -55,18 +49,20 @@ render_pegleg_cli() {
then
for r in ${aux_repos[*]}
do
cli_string="${cli_string} -e ${r}=/workspace/${r}"
cli_string+=("-e" "${r}=/workspace/${r}")
done
fi
cli_string="${cli_string} render -o /collect/rendered.yaml"
cli_string+=("render" "-o" "/collect/rendered.yaml")
cli_string="${cli_string} $(config_pegleg_sitename)"
cli_string+=("$(config_pegleg_sitename)")
echo ${cli_string}
printf " %s " "${cli_string[@]}"
}
collect_rendered_doc() {
# shellcheck disable=SC2091
# shellcheck disable=SC2046
docker run \
--rm -t \
--network host \

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -29,9 +29,9 @@ while getopts "og" opt; do
o)
OMIT_CERTS=1
;;
g)
g)
OMIT_GATE=1
;;
;;
*)
echo "Unknown option"
exit 1
@ -57,7 +57,7 @@ ssh_cmd "${BUILD_NAME}" mkdir -p "${BUILD_WORK_DIR}/site"
rsync_cmd "${DEFINITION_DEPOT}"/*.yaml "${BUILD_NAME}:${BUILD_WORK_DIR}/site/"
sleep 120
check_configdocs_result "$(shipyard_cmd create configdocs design --directory=${BUILD_WORK_DIR}/site --replace)"
check_configdocs_result "$(shipyard_cmd create configdocs design "--directory=${BUILD_WORK_DIR}/site" --replace)"
# Skip certs/gate if already part of site manifests
if [[ -n "${USE_EXISTING_SECRETS}" ]]
@ -70,14 +70,14 @@ if [[ "${OMIT_CERTS}" == "0" ]]
then
ssh_cmd "${BUILD_NAME}" mkdir -p "${BUILD_WORK_DIR}/certs"
rsync_cmd "${CERT_DEPOT}"/*.yaml "${BUILD_NAME}:${BUILD_WORK_DIR}/certs/"
check_configdocs_result "$(shipyard_cmd create configdocs certs --directory=${BUILD_WORK_DIR}/certs --append)"
check_configdocs_result "$(shipyard_cmd create configdocs certs "--directory=${BUILD_WORK_DIR}/certs" --append)"
fi
if [[ "${OMIT_GATE}" == "0" ]]
then
ssh_cmd "${BUILD_NAME}" mkdir -p "${BUILD_WORK_DIR}/gate"
rsync_cmd "${GATE_DEPOT}"/*.yaml "${BUILD_NAME}:${BUILD_WORK_DIR}/gate/"
check_configdocs_result "$(shipyard_cmd create configdocs gate --directory=${BUILD_WORK_DIR}/gate --append)"
check_configdocs_result "$(shipyard_cmd create configdocs gate "--directory=${BUILD_WORK_DIR}/gate" --append)"
fi
check_configdocs_result "$(shipyard_cmd commit configdocs)"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -12,7 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [[ -n $GATE_DEBUG && $GATE_DEBUG = "1" ]]; then
if [[ -n "$GATE_DEBUG" && "$GATE_DEBUG" = "1" ]]; then
set -x
fi
@ -22,9 +22,9 @@ set -e
function upload_script() {
source "$GATE_UTILS"
BASENAME=$(basename $BASH_SOURCE)
BASENAME="$(basename "${BASH_SOURCE[0]}")"
# Copies script to genesis VM
rsync_cmd "$BASH_SOURCE" "$GENESIS_NAME:/root/airship/"
rsync_cmd "${BASH_SOURCE[0]}" "$GENESIS_NAME:/root/airship/"
set -o pipefail
ssh_cmd_raw "$GENESIS_NAME" "KUBECONFIG=${KUBECONFIG} GATE_DEBUG=${GATE_DEBUG} NUM_NODES=$1 /root/airship/${BASENAME}" 2>&1 | tee -a "$LOG_FILE"
set +o pipefail
@ -38,7 +38,7 @@ function kubectl_retry() {
cnt=$((cnt+1))
if [[ "$ret" -ne "0" ]]; then
if [[ "$cnt" -lt "$MAX_TRIES" ]]; then
sleep $PAUSE
sleep "$PAUSE"
else
return 1
fi
@ -51,12 +51,12 @@ function kubectl_retry() {
function check_kube_nodes() {
try=0
while true; do
nodes_list=$(kubectl_retry get nodes --no-headers) || true
ret=$?
try=$((try+1))
nodes_list="$(kubectl_retry get nodes --no-headers)" || true
ret="$?"
try="$((try+1))"
if [ "$ret" -ne "0" ]; then
if [[ "$try" -lt "$MAX_TRIES" ]]; then
sleep $PAUSE
sleep "$PAUSE"
else
echo -e "Can't get nodes"
return 1
@ -81,9 +81,9 @@ function check_kube_components() {
res=$(kubectl_retry get cs -o jsonpath="{.items[*].conditions[?(@.type == \"Healthy\")].status}") || true
try=$((try+1))
if $(echo $res | grep -q False); then
if echo "$res" | grep -q False; then
if [[ "$try" -lt "$MAX_TRIES" ]]; then
sleep $PAUSE
sleep "$PAUSE"
else
echo "Error: kubernetes components are not working properly"
kubectl_retry get cs
@ -96,7 +96,7 @@ function check_kube_components() {
}
if [[ -n "$GATE_UTILS" ]]; then
upload_script $NUM_NODES
upload_script "$NUM_NODES"
else
set +e
KUBECONFIG="${KUBECONFIG:-/etc/kubernetes/admin/kubeconfig.yaml}"
@ -108,8 +108,7 @@ set +e
echo "Error: ${KUBECTL} not found"
exit 1
fi
exit_code=0
check_kube_nodes $NUM_NODES
check_kube_nodes "$NUM_NODES"
nodes_status=$?
check_kube_components
components_status=$?

View File

@ -58,7 +58,7 @@ jq -cr '.stages | .[]' "${GATE_MANIFEST}" > "${STAGES}"
# the read below, since we will be calling SSH, which will consume the
# remaining data on STDIN.
exec 3< "$STAGES"
while read -u 3 stage; do
while read -r -u 3 stage; do
NAME=$(echo "${stage}" | jq -r .name)
STAGE_SCRIPT="$(echo "${stage}" | jq -r .script)"
STAGE_CMD=""

View File

@ -22,5 +22,5 @@ docker run -d \
-e REGISTRY_HTTP_ADDR=0.0.0.0:5000 \
--restart=always \
--name registry \
-v $REGISTRY_DATA_DIR:/var/lib/registry \
-v "$REGISTRY_DATA_DIR:/var/lib/registry" \
registry:2

View File

@ -15,15 +15,15 @@
set -ex
IMAGES_FILE=$(dirname $0)/IMAGES
IMAGES_FILE="$(dirname "$0")/IMAGES"
IFS=,
grep -v '^#.*' $IMAGES_FILE | while read src tag dst; do
echo src=$src tag=$tag dst=$dst
sudo docker pull $src:$tag
grep -v '^#.*' "$IMAGES_FILE" | while read -r src tag dst; do
echo "src=$src tag=$tag dst=$dst"
sudo docker pull "$src:$tag"
full_dst=localhost:5000/$dst:$tag
sudo docker tag $src:$tag $full_dst
full_dst="localhost:5000/$dst:$tag"
sudo docker tag "$src:$tag" "$full_dst"
sudo docker push $full_dst
sudo docker push "$full_dst"
done

View File

@ -40,3 +40,5 @@ sudo docker run -t --rm --net=host
-v /:/host:rshared
EndOfCommand
)
export base_docker_command

View File

@ -21,8 +21,10 @@ OPENSTACK_CLI_IMAGE="${OPENSTACK_CLI_IMAGE:-docker.io/openstackhelm/heat:ocata}"
# Get the path of the directory where the script is located
# Source Base Docker Command
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd ${DIR} && source openstack_cli_docker_base_command.sh
DIR="$(dirname "${BASH_SOURCE[0]}")"
# shellcheck disable=SC1091
cd "${DIR}" && source openstack_cli_docker_base_command.sh
# Execute OpenStack CLI
${base_docker_command} ${OPENSTACK_CLI_IMAGE} ${COMMAND} $@
# shellcheck disable=SC2154
${base_docker_command} "${OPENSTACK_CLI_IMAGE}" "${COMMAND}" "$@"