tools/stx-init-env

766 lines
24 KiB
Bash
Executable File

#!/bin/bash
if [[ "$0" != "${BASH_SOURCE[0]}" ]] ; then
echo >&2 "Don't source this file, execute it instead, eg: ${BASH_SOURCE[0]} --help"
return 1
fi
usage() {
cat <<END
Usage: $0 OPTIONS
Initialize StarlingX build environment & (re-)start builder pods
-R,--restart-minikube
restart minikube profile before starting pods
--rebuild[=IMG,...]
build specified pod images instead of downloading them
--cache when rebuilding, allow docker to use its filesystem cache
when pulling, skip images that already exist locally
CAUTION: this option may not pick up all the changes to
docker source files and is meant for debugging
the build scripts.
--dockerhub-login
execute "docker login" prior to building or pulling builder
images
--no-start Refresh builder images, but don't (re-)start pods
ENVIRONMENT RESET OPTIONS
=========================
-y,--assumeyes
Assume "yes" for all questions
-D,--delete-minikube-profile
Delete minikube profile and exit
This will also delete any builder images.
Following this command you have to re-run this script
(possibly with --rebuild).
--nuke DEPRECATED: same as --delete-minikube-profile
--reset delete chroots and restart the environment
--reset-hard Delete env containers, minikube profile and all generated
content, including the workspace directory, compiled DEBs,
ISO, OSTree, chroots, aptly repositories, docker FS layers
and build logs.
Keep the "downloads" directory and stx.conf.
Following this action you must re-run this script
(possibly with --rebuild) to start minikube and the pods
again, followed by 'downloader', 'build-pkgs' etc.
END
}
source "$(dirname "$0")"/import-stx || exit 1
PROGNAME=$(basename "$0") || exit 1
STX_TOOLS_DIR="$(readlink -v -e "$(dirname "$0")")" || exit 1
MINIKUBE=minikube
HELM=helm
DOCKER=docker
PYTHON3=python3
KUBECTL=kubectl
DOCKER_PREFIX=${STX_PREBUILT_BUILDER_IMAGE_PREFIX:-'starlingx/'}
DOCKER_IMAGES="stx-builder stx-pkgbuilder stx-lat-tool stx-aptly"
DOCKER_TAG="$STX_PREBUILT_BUILDER_IMAGE_TAG"
DOCKERHUB_LOGIN=0
BUILD_DOCKER=0
DELETE_MINIKUBE_PROFILE=0
RESTART_MINIKUBE=0
CLEAN_CONFIG=0
USE_DOCKER_CACHE=0
START_PODS=1
RESET_SOFT=0
RESET_HARD=0
ASSUME_YES=0
COREUTILS_DOCKER_IMAGE="debian:bookworm-20240130-slim"
info() {
local tty_on tty_off
if [[ -t 2 ]] ; then
tty_on=$'\033[0;36m'
tty_off=$'\033[0m'
fi
echo >&2 "${tty_on}$*${tty_off}"
}
notice() {
local tty_on tty_off
if [[ -t 2 ]] ; then
tty_on=$'\033[1;36m'
tty_off=$'\033[0m'
fi
echo >&2 "${tty_on}$*${tty_off}"
}
warn() {
local tty_on tty_off
if [[ -t 2 ]] ; then
tty_on=$'\033[33m'
tty_off=$'\033[0m'
fi
echo >&2 "${tty_on}WARNING: $*${tty_off}"
}
error() {
local tty_on tty_off
if [[ -t 2 ]] ; then
tty_on=$'\033[31m'
tty_off=$'\033[0m'
fi
echo >&2 "${tty_on}ERROR: $*${tty_off}"
}
die() {
error "$@"
exit 1
}
# Usage: confirm "ACTION DESCRIPTION"
confirm() {
local continue_yn="Continue (yes/no)? "
if [[ "$ASSUME_YES" -eq 1 ]] ; then
echo "$1"
echo "${continue_yn}yes"
return 0
fi
if [[ ! -t 0 ]] ; then
echo "$1"
die "Won't read from non-terminal"
fi
local answer
echo "$1"
while true ; do
read -e -r -p "$continue_yn" answer || exit 1
if [[ "$answer" == "yes" ]] ; then
return 0
elif [[ "$answer" == "no" ]] ; then
return 1
else
echo >&2 "Please type \`yes' or \`no'"
echo >&2
fi
done
}
# Usage: regex_quote "STR"
regex_quote() {
echo "$1" | sed -r 's/([$.(){}+*^[\])/\\\1/g'
}
# Usage: regex_match "STR" "PYTHON_STYLE_REGEX"...
regex_match() {
local str="$1" ; shift || :
python3 -c "\
import re,sys;
str = sys.argv[1]
exprlist = sys.argv[2:]
for expr in exprlist:
#print (\"========= [%s] [%s]\" % (str, expr))
if re.match(expr, str):
sys.exit(0)
sys.exit(1)
" "$str" "$@"
}
# Usage: starts_with "STR" "PREFIX"
starts_with() {
local str="$1"
local prefix="$2"
if [[ "${str#$prefix}" == "$str" ]] ; then
return 1
fi
return 0
}
minikube_profile_is_started() {
minikube status -p "$MINIKUBENAME" >/dev/null 2>&1
}
minikube_profile_exists() {
local script=$(cat <<'END'
import json,sys
data = json.load (sys.stdin)
if 'valid' not in data or 'invalid' not in data:
sys.exit(1)
for x in data['valid']+data['invalid']:
if x['Name'] == sys.argv[1]:
sys.exit(0)
sys.exit(1)
END
)
$MINIKUBE profile list -l -o json | $PYTHON3 -c "$script" "$MINIKUBENAME"
}
minikube_profile_start() {
notice "Starting minikube profile \`$MINIKUBENAME'"
$MINIKUBE start --driver=docker -p $MINIKUBENAME \
--cpus=$STX_BUILD_CPUS \
--memory=$MINIKUBEMEMORY \
--mount=true \
--mount-string="$STX_BUILD_HOME:/workspace" \
|| exit 1
}
minikube_profile_stop() {
if minikube_profile_is_started ; then
notice "Stopping minikube profile \`$MINIKUBENAME'"
$MINIKUBE stop -p $MINIKUBENAME
if minikube_profile_is_started ; then
echo >&2 "minikube container $MINIKUBENAME exist!"
echo >&2 "And the command 'minikube -p $MINIKUBENAME stop' failed. The reason may be"
echo >&2 "the current MINIKUBE_HOME/HOME is not the same as the $MINIKUBENAME"
echo >&2 "Please change the MINIKUBE_HOME/HOME directory to the previous value"
echo >&2 "then re-execute this script"
exit 1
fi
fi
}
stx_is_started() {
stx control is-started >/dev/null 2>&1
}
stx_stop() {
stx control stop --wait || exit 1
}
stx_start() {
stx config --upgrade || exit 1
stx control start --wait || exit 1
}
#
# Blacklist for root-owned deletions.
# A multi-line string, one Python regex per line, leading/trailing
# spaces and comments will be stripped.
#
if [[ -z "$STX_RM_BLACKLIST" ]] ; then
USER_REGEX="$(regex_quote "$USER")" || exit 1
HOME_REGEX="$(regex_quote "$HOME")" || exit 1
STX_RM_BLACKLIST='
^/$
^/bin(/.*)?$
^/boot(/.*)?$
^/dev(/.*)?$
^/etc(/.*)?$
^/export(/.*)?$
^/home$ # deny "/home"
^/home/'"$USER_REGEX"'$ # deny "/home/$USER"
^/home/(?!'"$USER_REGEX"'(/.*)?$) # deny "/home/SOME_USER_OTHER_THAN_CURRENT"
^'"$HOME_REGEX"'$
^/import(/.*)?$
^/localdisk$
^/localdisk/designer$
^/localdisk/designer/'"$USER_REGEX"'$
^/localdisk/designer/(?!'"$USER_REGEX"'(/.*)?$)
^/localdisk/loadbuild$
^/localdisk/loadbuild/'"$USER_REGEX"'$
^/localdisk/loadbuild/(?!'"$USER_REGEX"'(/.*)?$)
^/folk(/.*)?$
^/lib[^/]*(/.*)?$
^/media(/.*)?$
^/mnt(/.*)?$
^/opt(/.*)?$
^/proc(/.*)?$
^/root(/.*)?$
^/run(/.*)?$
^/sbin(/.*)?$
^/snap(/.*)?$
^/srv(/.*)?$
^/starlingx(/.*)?$
^/sys(/.*)?$
^/tmp(/.*)?$
^/usr(/.*)?$
^/var(/.*)?$
'
fi
# Usage: safe_rm PATHs...
#
# Delete PATHs as root user, by default via "sudo"; or else
# via "docker run [...]". Bail out on blacklisted paths.
#
safe_rm() {
local build_home
build_home="$(readlink -v -e "$STX_BUILD_HOME")" || exit 1
local build_home_quoted
build_home_quoted="$(regex_quote "$build_home")"
# Compile blacklist from $STX_RM_BLACKLIST + current $STX_BUILD_HOME
local -a re_list
readarray -t re_list < <(echo "$STX_RM_BLACKLIST" | sed -r -e 's/\s#.*//g' -e 's/^\s+//' -e 's/\s+$//' -e '/^\s*$/d') || exit 1
re_list+=("^$build_home_quoted$")
# Validate inputs
local -a paths_to_delete
local path basename dirname
local canon_dirname canon_path canon_path_expr
for path in "$@" ; do
# Resolve paths before checking against blacklist. We want to resolve
# them similarly to how "rm -rf" would, ie:
#
# - recursively resolve symlinks leading up to the leaf (basename) of
# the target path
# - do not resolve the leaf; if it happens to be a symlink, just delete
# the symlink
#
# special case 1: never remove anything that ends with "." or ".."
#
# special case 2: if path ends with a slash, the leaf must exist and be a
# directory or a symlink to one; otherwise we skip it:
# - real dir: remove recursively
# - symlink to a dir: remove target's children only
# - anything else: skip
#
# don't remove "." or ".."
if [[ "$path" =~ (^|/)[.][.]?$ ]] ; then
error "refusing to remove \".\" or \"..\" directory"
exit 1
fi
# path doesn't end with "/": resolve parents, but not the leaf
if [[ ! "$path" =~ /$ ]] ; then
basename="$(basename "$path")"
[[ -n "$basename" ]] || continue
dirname="$(dirname "$path")"
[[ -n "$dirname" ]] || continue
canon_dirname="$(realpath -q -e "$dirname" || true)"
[[ -n "$canon_dirname" ]] || continue
canon_path="$canon_dirname/$basename"
# ie path exists or is a broken symlink
[[ -e "$canon_path" || -L "$canon_path" ]] || continue
canon_path_expr="$canon_path" # argument to "rm"
# path ends with "/": only makes sense for dirs or symlinks to dirs
else
# Try to resolve the entire path, including the leaf.
# If leaf is a legit symlink, "rm" would follow it, so we do the same
canon_path="$(realpath -q -m "$path" || true)"
[[ -d "$canon_path" ]] || continue
canon_path_expr="$canon_path/" # argument to "rm" must preserve trailing /
fi
# Make sure it's a subdirectory of $STX_BUILD_HOME
if ! starts_with "$canon_path" "$build_home/" ; then
error "Attempted to delete unsafe path \`$canon_path', expecting a subdirectory of \`$STX_BUILD_HOME'"
exit 1
fi
# Check it against black list
if regex_match "$canon_path" "${re_list[@]}" ; then
die "Attempted to delete blacklisted path \`$canon_path'"
fi
# ok to delete
paths_to_delete+=("$canon_path_expr")
done
# Delete them
local -a rm_cmd
for path in "${paths_to_delete[@]}" ; do
#confirm "Deleting \`$path'"$'' || continue
# Delete via docker or sudo
if [[ "$STX_RM_METHOD" == "docker" ]] ; then
local tty_opt=
if [[ -t 0 ]] ; then
tty_opt="-t"
fi
rm_cmd=(docker run -i $tty_opt --rm --mount "type=bind,src=$build_home,dst=$build_home" $COREUTILS_DOCKER_IMAGE rm -rf --one-file-system "$path")
else
rm_cmd=(sudo rm -rf --one-file-system "$path")
fi
echo "running: ${rm_cmd[*]}" >&2
"${rm_cmd[@]}" || exit 1
done
}
init_stx_mirror_url() {
if [[ -z "$STX_MIRROR_URL" && -f "$STX_TOOLS_DIR/stx.conf" ]] ; then
STX_MIRROR_URL=$(sed -n -r 's/^\s*stx_mirror_url\s*=\s*//gp' "$STX_TOOLS_DIR/stx.conf" | sed "s/[\"']//g")
fi
}
cmdline_error() {
if [[ -n "$1" ]] ; then
echo "error: $1" >&2
fi
echo "Type \`$0 --help' for more info." >&2
exit 1
}
# process command line
temp=$(getopt -o hRyD --long help,clean,restart-minikube,rebuild::,cache,delete-minikube-profile,nuke,reset,reset-hard,assumeyes,dockerhub-login,no-start -n "$PROGNAME" -- "$@") || cmdline_error
eval set -- "$temp"
while true ; do
case "$1" in
-h|--help)
usage
exit 0
;;
-R|--restart-minikube)
RESTART_MINIKUBE=1
shift
;;
--clean)
CLEAN_CONFIG=1
shift
;;
--rebuild)
if [[ -n "$2" ]] ; then
for img in $(echo "$2" | sed 's/,,*/ /g') ; do
img_ok=no
for known_img in $DOCKER_IMAGES ; do
if [[ "$img" == "$known_img" || "stx-$img" == "$known_img" ]] ; then
BUILD_DOCKER_IMAGES+="$known_img "
img_ok=yes
break
fi
done
if [[ $img_ok != yes ]] ; then
cmdline_error "invalid image \"$img\""
fi
done
else
BUILD_DOCKER_IMAGES="$DOCKER_IMAGES"
fi
shift 2
;;
--cache)
USE_DOCKER_CACHE=1
shift
;;
-y|--assumeyes)
ASSUME_YES=1
shift
;;
--nuke)
warn "--nuke is deprecated, use --delete-minikube-profile instead"
DELETE_MINIKUBE_PROFILE=1
shift
;;
-D|--delete-minikube-profile)
DELETE_MINIKUBE_PROFILE=1
shift
;;
--reset)
RESET_SOFT=1
shift
;;
--reset-hard)
RESET_HARD=1
shift
;;
--dockerhub-login)
DOCKERHUB_LOGIN=1
shift
;;
--no-start)
START_PODS=0
shift
;;
--)
shift
break
;;
-?*)
cmdline_error
;;
*)
break
;;
esac
done
[[ "$#" -le 0 ]] || cmdline_error "too many arguments"
# make sure required programs are installed
if [ "$STX_PLATFORM" = "minikube" ]; then
if ! command -v "$MINIKUBE" &> /dev/null; then
echo >&2 "Command $MINIKUBE could not be found."
echo >&2 "Please install it as https://minikube.sigs.k8s.io/docs/start/"
echo ""
exit 1
fi
fi
if [ "$STX_PLATFORM" = "kubernetes" ]; then
if ! command -v "$KUBECTL" &> /dev/null; then
echo >&2 "Command $KUBECTL could not be found."
echo >&2 "Please install and configure kubectl."
echo ""
exit 1
fi
fi
if ! command -v "$HELM" &> /dev/null; then
echo >&2 "Command $HELM could not be found."
echo >&2 "Please install it as https://helm.sh/"
echo ""
exit 1
fi
if ! command -v "$DOCKER" &> /dev/null; then
echo >&2 "Command $DOCKER could not be found. Please install it."
echo >&2 ""
exit 1
fi
# Delete minikube profile/cluster. This will also delete the locally-built
# or downloaded builder pods.
if [[ $DELETE_MINIKUBE_PROFILE -eq 1 ]] ; then
if [[ "$STX_PLATFORM" != "minikube" ]] ; then
notice "--delete-minikube-profile is not supported for Kubernetes platform"
elif minikube_profile_exists ; then
notice "Deleting minikube profile \`$MINIKUBENAME'"
$MINIKUBE delete -p "$MINIKUBENAME" || exit 1
else
notice "Please check your minikube profile MINIKUBENAME: \`$MINIKUBENAME'."
notice "It doesn't exist or it existed but not for your MINIKUBE_HOME: \`$MINIKUBE_HOME'."
notice "Please re-export the correct project variable pairs!!!"
fi
exit 0
fi
# clean the configuration and configmap data
if [[ $CLEAN_CONFIG -eq 1 ]] ; then
if stx_is_started ; then
notice "Please firstly stop the helm project with 'stx control stop' command."
notice "Then execute this cleanup operation again."
exit 1
fi
notice "Clean the config file and configmap data for builder|pkgbuilder container."
# copy a fresh config file
rm -f "$STX_TOOLS_DIR/stx.conf"
cp "$STX_TOOLS_DIR/stx.conf.sample" "$STX_TOOLS_DIR/stx.conf"
rm -f "$STX_TOOLS_DIR"/stx/lib/stx/__pycache__/*
rm -f "$STX_TOOLS_DIR"/stx/stx-build-tools-chart/stx-builder/Chart.lock
rm -f "$STX_TOOLS_DIR"/stx/stx-build-tools-chart/stx-builder/charts/*
rm -f "$STX_TOOLS_DIR"/stx/stx-build-tools-chart/stx-builder/configmap/stx-localrc
rm -f "$STX_TOOLS_DIR"/stx/stx-build-tools-chart/stx-builder/dependency_chart/stx-pkgbuilder/configmap/stx-localrc
exit 0
fi
# --reset-hard: stop pods, delete pod state and minikube profile
if [[ $RESET_HARD -eq 1 ]] ; then
# "stx" tool can't work without stx.conf
if [[ ! -f "$STX_TOOLS_DIR/stx.conf" ]] ; then
error "$STX_TOOLS_DIR/stx.conf: file not found"
exit 1
fi
confirm "\
This will delete env containers, minikube profile and all generated
content, including the workspace directory, generated DEBs, ISO,
OSTree, chroots, aptly repositories, docker FS layers and build logs.
Keep the 'downloads' directory and stx.conf.
Following this action you must re-run this script (possibly with
--rebuild) to start minikube and the pods again, followed by
'downloader', 'build-pkgs' etc.
" || exit 1
# Deleting minikube profile also deletes env pods within it
if [[ "$STX_PLATFORM" = "minikube" ]] ; then
if minikube_profile_exists ; then
notice "Deleting minikube profile \`$MINIKUBENAME'"
$MINIKUBE delete -p "$MINIKUBENAME" || exit 1
fi
else
# stop & delete env pods
if stx_is_started ; then
info "stopping env pods"
stx_stop || exit 1
fi
fi
notice "deleting generated files"
safe_rm "$STX_BUILD_HOME/localdisk/pkgbuilder" \
"$STX_BUILD_HOME/docker" \
"$STX_BUILD_HOME/aptly" \
"$STX_BUILD_HOME/localdisk/loadbuild"/*/*/* \
"$STX_BUILD_HOME/localdisk"/*.log \
"$STX_BUILD_HOME/localdisk"/*.yaml \
"$STX_BUILD_HOME/localdisk"/log \
"$STX_BUILD_HOME/localdisk"/CERTS \
"$STX_BUILD_HOME/localdisk"/channel \
"$STX_BUILD_HOME/localdisk"/deploy \
"$STX_BUILD_HOME/localdisk"/workdir \
"$STX_BUILD_HOME/localdisk"/sub_workdir \
|| exit 1
notice "please use \`$0' to start the environment again"
exit 0
fi
# --reset: delete chroots + restart pods
if [[ $RESET_SOFT -eq 1 ]] ; then
# "stx" tool can't work without stx.conf
if [[ ! -f "$STX_TOOLS_DIR/stx.conf" ]] ; then
error "$STX_TOOLS_DIR/stx.conf: file not found"
exit 1
fi
# Caveat: we have to have minikube started in order to re-start
# env pods (below), otherwise the old/dormant instances
# of the pods may get re-activated later when the user starts
# minikube manually. In this case those may be outdated due
# to changes in stx.conf.
if [[ "$STX_PLATFORM" = "minikube" ]] && ! minikube_profile_is_started ; then
error "minikube profile \`$MINIKUBENAME' is not running, please start it first"
exit 1
fi
# stop env pods
want_stx_start=0
if stx_is_started ; then
want_stx_start=1
notice "stopping env pods"
stx_stop || exit 1
fi
# clean up
notice "deleting chroots"
safe_rm "$STX_BUILD_HOME/localdisk/pkgbuilder"
# start the pods again
if [[ $want_stx_start -eq 1 ]] ; then
notice "starting env pods"
stx_start || exit 1
fi
exit 0
fi
# Make sure $STX_BUILD_HOME exists
if [[ ! -d "$STX_BUILD_HOME" ]] ; then
echo >&2 "The directory $STX_BUILD_HOME doesn't exist, please create it with the command:"
echo >&2 ""
echo >&2 " mkdir -p $STX_BUILD_HOME"
echo >&2 ""
echo >&2 "Then execute this script again!"
exit 1
fi
# Make sure mirror directory exists. If this directory doesn't exist,
# it will be automatically created with root permission.
if [[ ! -d "$STX_BUILD_HOME/mirrors/starlingx" ]] ; then
mkdir -p $STX_BUILD_HOME/mirrors/starlingx || exit 1
fi
# Login to docker hub if necessary
if [[ "$DOCKERHUB_LOGIN" -eq 1 ]] ; then
# This will save docker hub credentials in $HOME/.docker/config.json. If docker hub
# credentials in that file are missing or invalid, this will ask for the username
# and password on the current TTY and save them in the json file. If credentials
# are already in the file, it will reuse them. In any case this will instruct
# host's docker daemon to obtain an authentication token for all subsequent
# interactions with docker hub.
info "authenticating host docker daemon with docker hub"
docker login || exit 1
fi
if [ "$STX_PLATFORM" = "minikube" ]; then
# Stop minikube if necessary
WANT_START_MINIKUBE=0
if [[ $RESTART_MINIKUBE -eq 1 ]] ; then
minikube_profile_stop
WANT_START_MINIKUBE=1
elif ! minikube_profile_is_started ; then
WANT_START_MINIKUBE=1
fi
# Start minikube
if [[ $WANT_START_MINIKUBE -eq 1 ]] ; then
minikube_profile_start
fi
# Record the project environment variables
echo "The last minikube profile startup date: `date`" > "$STX_TOOLS_DIR"/minikube_history.log
echo "MINIKUBE_HOME: $MINIKUBE_HOME" >> "$STX_TOOLS_DIR"/minikube_history.log
echo "MINIKUBENAME: $MINIKUBENAME" >> "$STX_TOOLS_DIR"/minikube_history.log
echo "STX_BUILD_HOME: $STX_BUILD_HOME" >> "$STX_TOOLS_DIR"/minikube_history.log
# Import minikube's docker environment. This points docker CLI to minikube's
# embedded docker daemon.
eval $(minikube -p $MINIKUBENAME docker-env)
# Ask minikube's docker daemon to obtain docker hub's access token
if [[ "$DOCKERHUB_LOGIN" -eq 1 ]] ; then
info "authenticating minikube's docker daemon with docker hub"
docker login || exit 1
fi
elif [[ $RESTART_MINIKUBE -eq 1 ]] ; then
warn "--restart-minikube is only supported on minikube platform -- ignoring"
fi
if [[ -n "${BUILD_DOCKER_IMAGES}" ]] ; then
notice "Building docker images"
declare -a docker_build_args
if [[ "$USE_DOCKER_CACHE" != "1" ]] ; then
docker_build_args+=("--no-cache")
fi
for img in $BUILD_DOCKER_IMAGES; do
extra_build_args=()
if grep -q -E '^\s*ARG\s+STX_MIRROR_URL\s*=' "$STX_TOOLS_DIR/"stx/dockerfiles/$img.Dockerfile ; then
init_stx_mirror_url
if [[ -n "$STX_MIRROR_URL" ]] ; then
extra_build_args+=("--build-arg" "STX_MIRROR_URL=$STX_MIRROR_URL")
fi
fi
docker build "${docker_build_args[@]}" "${extra_build_args[@]}" -t $img:$DOCKER_TAG_LOCAL -f "$STX_TOOLS_DIR/"stx/dockerfiles/$img.Dockerfile "$STX_TOOLS_DIR" || exit 1
info "built image $img:$DOCKER_TAG_LOCAL"
done
fi
# Pull images that we didn't rebuild
PULL_DOCKER_IMAGES=$(
for img in ${DOCKER_IMAGES} ; do
built=no
for build_img in ${BUILD_DOCKER_IMAGES} ; do
if [[ "$img" == "$build_img" ]] ; then
built=yes
break
fi
done
if [[ "$built" != "yes" ]] && \
{ [[ "$USE_DOCKER_CACHE" != 1 ]] || ! docker image inspect ${img}:${DOCKER_TAG_LOCAL} >/dev/null 2>&1 ; } ; then
echo "$img"
fi
done
)
if [[ -n "$PULL_DOCKER_IMAGES" ]] ; then
notice "Pulling docker images: "$PULL_DOCKER_IMAGES
for img in $PULL_DOCKER_IMAGES; do
docker pull ${DOCKER_PREFIX}${img}:${DOCKER_TAG} || exit 1
docker tag ${DOCKER_PREFIX}${img}:${DOCKER_TAG} ${img}:${DOCKER_TAG_LOCAL} || exit 1
info "created image ${img}:${DOCKER_TAG_LOCAL} from pre-built ${DOCKER_PREFIX}${img}:${DOCKER_TAG}"
done
fi
# Restart pods
if [[ $START_PODS -eq 1 ]] ; then
if stx_is_started ; then
stx_stop || exit 1
fi
notice "starting env pods"
stx_start || exit 1
fi