jenkins-pipelines/scripts/lib/job_utils.sh

920 lines
25 KiB
Bash

# bash
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
LOADBUILD_ROOTS="/localdisk/loadbuild:/home/localdisk/loadbuild"
DESIGNER_ROOTS="/localdisk/designer:/home/localdisk/designer"
source "${BASH_SOURCE[0]%/*}"/utils.sh || return 1
source "${BASH_SOURCE[0]%/*}"/log_utils.sh || return 1
# Top-level source directory of jenkins scripts repo
TOP_SRC_DIR=$(readlink -f "${BASH_SOURCE[0]%/*}"/../..)
# Library scripts dir
LIB_DIR="$TOP_SRC_DIR/lib"
# Scripts dir
SCRIPTS_DIR="$TOP_SRC_DIR/scripts"
# Templates directory
TEMPLATES_DIR="${SCRIPTS_DIR}/templates"
# Disable repo trace output
export REPO_TRACE=0
# When true produce less noise
#QUIET=false
# Python 3.x executable
: ${PYTHON3:=python3}
# docker images
SAFE_RSYNC_DOCKER_IMG="servercontainers/rsync:3.1.3"
COREUTILS_DOCKER_IMG="debian:bullseye-20220509"
notice() {
( set +x ; print_log -i --notice "$@" ; )
}
info() {
( set +x ; print_log -i --info "$@" ; )
}
error() {
( set +x ; print_log -i --error --location --dump-stack "$@" ; )
}
warn() {
( set +x; print_log -i --warning --location --dump-stack "$@" ; )
}
die() {
( set +x ; print_log -i --error --location --dump-stack "$@" ; )
exit 1
}
bail() {
( set +x ; print_log -i --notice "$@" ; )
exit 0
}
trim() {
echo "$@" | sed -r -e 's/^\s+//' -e 's/\s+$//'
}
shell_quote() {
local str
local arg
local sep
for arg in "$@" ; do
str+=$sep
str+=$(printf '%q' "$arg")
sep=' '
done
echo "$str"
}
maybe_run() {
if $DRY_RUN ; then
echo "running (dry run): $(shell_quote "$@")"
else
echo "running: $(shell_quote "$@")"
"$@"
fi
}
#
# Usage: declare_job_env NAME [DFLT]
#
# Make sure the specified env var is defined & non-empty,
# otherwise it to a default value.
# Trim and export it in either case.
declare_job_env() {
local var="$1"
local dflt="$2"
# trim it
local val="$(trim "${!var}")"
# set to default
if [[ -z "$val" ]] ; then
val="$(trim "$dflt")"
declare -g -x "$var=$val"
return
fi
# export it
declare -g -x "$var"
}
#
# Usage: require_job_env NAME [DFLT]
#
# Same as declare_job_env, but fail & exit if the var is empty
require_job_env() {
local var="$1" ; shift || :
declare_job_env "$var" "$@"
[[ -n "${!var}" ]] || die "required variable \"$var\" is not set"
}
#
# Usage: require_file FILENAME
#
# Make sure file exists and is readable; die otherwise
#
require_file() {
: <"$1" || die "$1: couldn't open file file reading"
}
__set_common_vars() {
require_job_env BUILD_HOME
require_job_env TIMESTAMP
declare_job_env PUBLISH_TIMESTAMP "$TIMESTAMP"
declare_job_env DRY_RUN
DOCKER_BASE_OS="debian"
DOCKER_OS_LIST="debian distroless"
# Set dry-run options
if [[ "$DRY_RUN" != "false" ]] ; then
DRY_RUN="true"
DRY_RUN_ARG="--dry-run"
else
DRY_RUN="false"
DRY_RUN_ARG=""
fi
export PATH="/usr/local/bin:$PATH"
}
__set_build_vars() {
require_job_env BUILD_USER
require_job_env PROJECT
require_job_env BUILD_HOME
require_job_env BUILD_OUTPUT_ROOT
require_job_env BUILD_OUTPUT_ROOT_URL
require_job_env TIMESTAMP
require_job_env PUBLISH_ROOT
require_job_env PUBLISH_ROOT_URL
require_job_env PUBLISH_TIMESTAMP
# Set a few additional globals
REPO_ROOT_SUBDIR=localdisk/designer/$BUILD_USER/$PROJECT
WORKSPACE_ROOT_SUBDIR=localdisk/loadbuild/$BUILD_USER/$PROJECT
REPO_ROOT="$BUILD_HOME/repo"
WORKSPACE_ROOT="$BUILD_HOME/workspace"
USER_ID=$(id -u $BUILD_USER) || exit 1
BUILD_OUTPUT_HOME="$BUILD_OUTPUT_ROOT/$TIMESTAMP"
BUILD_OUTPUT_HOME_URL="$BUILD_OUTPUT_ROOT_URL/$TIMESTAMP"
# publish vars
PUBLISH_DIR="${PUBLISH_ROOT}/${PUBLISH_TIMESTAMP}${PUBLISH_SUBDIR:+/$PUBLISH_SUBDIR}"
PUBLISH_URL="${PUBLISH_ROOT_URL}/${PUBLISH_TIMESTAMP}${PUBLISH_SUBDIR:+/$PUBLISH_SUBDIR}"
# parallel
if [[ -n "$PARALLEL_CMD" && "${PARALLEL_CMD_JOBS:-0}" -gt 0 ]] ; then
PARALLEL="$PARALLEL_CMD -j ${PARALLEL_CMD_JOBS}"
else
PARALLEL=
fi
# Validate & set defaults for ISO & secureboot options
# SIGN_ISO_FORMAL was spelled as SIGN_ISO in the past
if [[ -n "$SIGN_ISO" ]] ; then
warn "SIGN_ISO is deprecated, please use SIGN_ISO_FORMAL instead"
fi
if [[ -z "$SIGN_ISO_FORMAL" ]] ; then
if [[ -n "$SIGN_ISO" ]] ; then
SIGN_ISO_FORMAL="$SIGN_ISO"
elif [[ -n "$SIGNING_SERVER" ]] ; then
SIGN_ISO_FORMAL="true"
else
SIGN_ISO_FORMAL="false"
fi
warn "SIGN_ISO_FORMAL is missing, assuming \"$SIGN_ISO_FORMAL\""
fi
if [[ "$SIGN_ISO_FORMAL" != "true" && "$SIGN_ISO_FORMAL" != "false" ]] ; then
die "SIGN_ISO_FORMAL must be \"true\" or \"false\""
fi
# SECUREBOOT_FORMAL
if [[ -z "$SECUREBOOT_FORMAL" ]] ; then
if [[ -n "$SIGNING_SERVER" ]] ; then
SECUREBOOT_FORMAL="true"
else
SECUREBOOT_FORMAL="false"
fi
warn "SECUREBOOT_FORMAL is missing, assuming \"$SECUREBOOT_FORMAL\""
elif [[ "$SECUREBOOT_FORMAL" != "true" && "$SECUREBOOT_FORMAL" != "false" ]] ; then
die "SECUREBOOT_FORMAL must be \"true\" or \"false\""
fi
declare_job_env SIGN_MAX_ATTEMPTS 3
declare_job_env SIGN_BACKOFF_DELAY 10
declare_job_env DOCKER_BUILD_RETRY_COUNT 3
declare_job_env DOCKER_BUILD_RETRY_DELAY 30
}
__started_by_jenkins() {
[[ -n "$JENKINS_HOME" ]]
}
#
# Usage: load_build_config
#
# Source $BUILD_HOME/build.conf and set a few common globals
#
load_build_config() {
__set_common_vars || exit 1
source "$BUILD_HOME/build.conf" || exit 1
__set_build_vars || exit 1
}
#
# Usage: load_build_env
#
# Load $BUILD_HOME/build.conf and source stx tools env script
#
load_build_env() {
__set_common_vars || exit 1
require_file "$BUILD_HOME/build.conf" || exit 1
source "$BUILD_HOME/source_me.sh" || exit 1
__set_build_vars || exit 1
}
# Usage: stx_docker_cmd [--dry-run] SHELL_SNIPPET
stx_docker_cmd() {
local dry_run=0
if [[ "$1" == "--dry-run" ]] ; then
dry_run=1
shift
fi
if [[ "$QUIET" != "true" ]] ; then
echo ">>> running builder pod command:" >&2
echo "$1" | sed -r 's/^/\t/' >&2
fi
if [[ "$dry_run" -ne 1 ]] ; then
local -a args
if __started_by_jenkins ; then
args+=("--no-tty")
fi
stx -d shell "${args[@]}" -c "$1"
fi
}
# Usage: docker_login REGISTRY
# Login to docker in builder pod
docker_login() {
local reg="$1"
local login_arg
if [[ "$reg" != "docker.io" ]] ; then
login_arg="$reg"
fi
stx_docker_cmd "docker login $login_arg </dev/null"
}
#
# Usage: parse_docker_registry REGISTRY[/NAMESPACE]
#
# Parse a registry name and print the registry and the namespace
# separated by a space. Print an error and return non-zero
# if the registry string is invalid.
#
# Examples:
# parse_docker_registry foo # ERROR
# parse_docker_registry foo/bar # ERROR
# parse_docker_registry foo.com/bar///baz # foo.com bar/baz
#
parse_docker_registry() {
local spec="$1"
local registry namespace
# up to 1st slash
registry="$(echo "$spec" | sed 's!/.*!!' || :)"
# remove double-shashes & extract everything past the 1st slash
namespace="$(echo "$spec" | sed -e 's!//*!/!g' | sed -n -e 's!^[^/]*/\(.*\)!\1!p' || :)"
# registry must contain a dot or a colon to distinguish it from a local namespace
if ! { echo "$registry" | grep -q -E "[.:]" ; } ||
! { echo "$registry" | grep -q -E "^[a-zA-Z0-9._-]+(:[0-9]{1,5})?$" ; } ; then
error "invalid docker registry spec \"$spec\""
return 1
fi
echo $registry $namespace
}
#
# Print directories that are safe to be mounted in a privileged container,
# "DIR" or "DIR ro", one per line:
# /localdisk/designer/$USER ro # read-only
# /localdisk/loadbuild/$USER ro # read-only
# /localdisk/designer/$USER/$PROJECT # read/write ie BUILD_HOME
# /localdisk/loadbuild/$USER/$PROJECT/$TIMESTAMP # read/write ie BUILD_OUTPUT_HOME
#
# With "--writeable-archive-root" the last entry above is replaced with
# /localdisk/loadbuild/$USER/$PROJECT # read/write ie BUILD_OUTPUT_ROOT
#
# This is required in order to create hardlinks between files under
# different $TIMESTAMP's
#
__get_safe_dirs() {
require_env TIMESTAMP
require_env USER
local root norm_root
local writeable_archive_root="no"
if [[ "$1" == "--writeable-archive-root" ]] ; then
writeable_archive_root="yes"
fi
# designer & loadbuild roots
for root in ${DESIGNER_ROOTS/:/ } ${LOADBUILD_ROOTS/:/ } ; do
norm_root="$(realpath -m -s "$root")" || return 1
echo "$norm_root/$USER ro"
done
# current BUILD_HOME -- make sure its under /localdisk/designer/$USER
(
local build_home
local -a safe_rw_roots
local build_home_ok
safe_rw_roots=()
build_home="$(realpath -m -s "$BUILD_HOME")" || return 1
for root in ${DESIGNER_ROOTS/:/ } ; do
norm_root="$(realpath -m -s "$root")" || return 1
if starts_with "$build_home" "$norm_root/$USER/" ; then
build_home_ok=1
fi
safe_rw_roots+=("$norm_root/$USER")
done
if [[ $build_home_ok -ne 1 ]] ; then
echo >&2
echo "ERROR: $BUILD_HOME: BUILD_HOME is invalid" >&2
echo "ERROR: expecting a descendant of any of the following:" >&2
for safe_rw_root in "${safe_rw_roots[@]}" ; do
echo " $safe_rw_root" >&2
done
error -i --dump-stack "invalid BUILD_HOME"
return 1
fi
echo "$build_home"
)
# current build dir under loadbuild
# make sure it starts with /localdisk/loadbuild/$USER
(
local out_root
local safe_rw_roots
local out_root_ok=0
safe_rw_roots=()
out_root="$(realpath -m -s "$BUILD_OUTPUT_ROOT")" || return 1
for root in ${LOADBUILD_ROOTS/:/ } ; do
norm_root="$(realpath -m -s "$root")" || return 1
if starts_with "$out_root" "$norm_root/$USER/" ; then
out_root_ok=1
fi
safe_rw_roots+=("$norm_root/$USER")
done
if [[ $out_root_ok -ne 1 ]] ; then
echo >&2
echo "ERROR: $BUILD_OUTPUT_ROOT: BUILD_OUTPUT_ROOT is invalid" >&2
echo "ERROR: expecting a descendant of any of the following:" >&2
for safe_rw_root in "${safe_rw_roots[@]}" ; do
echo " $safe_rw_root" >&2
done
error -i --dump-stack "invalid BUILD_OUTPUT_ROOT"
return 1
fi
if [[ "$writeable_archive_root" == "yes" ]] ; then
echo "$out_root"
else
echo "$out_root/$TIMESTAMP"
fi
) || return 1
}
#
# Usage: __ensure_host_path_readable_in_priv_container [--writeable-archive-root] PATHS...
#
# Make sure each host PATH can be read in a privileged container,
# ie anything under
# /localdisk/designer/$USER
# /localdisk/loadbuild/$USER
#
__ensure_host_path_readable_in_priv_container() {
# safe roots
local safe_roots_str
local safe_dirs_args=()
if [[ "$1" == "--writeable-archive-root" ]] ; then
safe_dirs_args+=("$1")
shift
fi
safe_roots_str="$(__get_safe_dirs "${safe_dirs_args[@]}" | sed -r 's/\s+ro$//' ; check_pipe_status)" || return 1
local -a safe_roots
readarray -t safe_roots <<<"$safe_roots_str" || return 1
# check each path
local path norm_path
for path in "$@" ; do
local path_ok=0
norm_path="$(realpath -m -s "$path")" || return 1
for safe_root in "${safe_roots[@]}" ; do
if [[ "$safe_root" == "$norm_path" ]] || starts_with "$norm_path" "$safe_root/" ; then
path_ok=1
break
fi
done
if [[ "$path_ok" != 1 ]] ; then
echo "error: $path: this directory can't be read in a privileged container" >&2
echo "error: expecting one of the followng paths or their descendants:" >&2
local safe_root
for safe_root in "${safe_roots[@]}" ; do
echo " $safe_root" >&2
done
error -i --dump-stack "$path: attempted to read from an invalid path in a privileged container" >&2
return 1
fi
done
}
#
# Usage: __ensure_host_path_writable_in_priv_container [--writeable-archive-root] PATHS...
#
# Make sure a host path is OK to write in a privileged container,
# ie any path under BUILD_OUTPUT_ROOT
#
__ensure_host_path_writable_in_priv_container() {
# safe roots that don't end with " ro"
local safe_roots_str
local safe_dirs_args=()
if [[ "$1" == "--writeable-archive-root" ]] ; then
safe_dirs_args+=("$1")
shift
fi
safe_roots_str="$(__get_safe_dirs "${safe_dirs_args[@]}" | grep -v -E '\s+ro$' ; check_pipe_status)" || return 1
local -a safe_roots
readarray -t safe_roots <<<"$safe_roots_str" || return 1
# check each path
local path norm_path
for path in "$@" ; do
local path_ok=0
norm_path="$(realpath -m -s "$path")" || return 1
for safe_root in "${safe_roots[@]}" ; do
if [[ "$safe_root" == "$norm_path" ]] || starts_with "$norm_path" "$safe_root/" ; then
path_ok=1
break
fi
done
if [[ "$path_ok" != 1 ]] ; then
echo "ERROR: $path: this directory can't be written in a privileged container" >&2
echo "ERROR: expecting one of the followng paths or their descendants:" >&2
local safe_root
for safe_root in "${safe_roots[@]}" ; do
echo " $safe_root" >&2
done
error -i --dump-stack "$path: attempted to write to an invalid path in a privileged container" >&2
return 1
fi
done
}
#
# Usage: __safe_docker_run [--dry-run] [--writeable-archive-root] <DOCKER RUN OPTIONS>
#
safe_docker_run() {
local dry_run=0
local dry_run_prefix
while [[ "$#" -gt 0 ]] ; do
if [[ "$1" == "--dry-run" ]] ; then
dry_run=1
dry_run_prefix="(dry_run) "
shift || true
continue
fi
if [[ "$1" == "--writeable-archive-root" ]] ; then
safe_dirs_args+=("$1")
shift || true
continue
fi
break
done
# construct mount options
local -a mount_opts
local safe_dirs_str
safe_dirs_str="$(__get_safe_dirs "${safe_dirs_args[@]}")" || return 1
local dir flags
while read dir flags ; do
[[ -d "$dir" ]] || continue
local mount_str="type=bind,src=$dir,dst=$dir"
if [[ -n "$flags" ]] ; then
mount_str+=",$flags"
fi
mount_opts+=("--mount" "$mount_str")
done <<<"$safe_dirs_str"
# other docker opts
local docker_opts=("-i")
if [[ -t 0 ]] ; then
docker_opts+=("-t")
fi
local -a cmd=(docker run "${docker_opts[@]}" "${mount_opts[@]}" "$@")
if [[ "$QUIET" != "true" ]] ; then
info "${dry_run_prefix}running: $(shell_quote "${cmd[@]}")"
fi
if [[ $dry_run -ne 1 ]] ; then
"${cmd[@]}"
fi
}
#
# Copy directories as root user; similar to "cp -ar", except:
#
# if SRC_DIR ends with "/", its contents will be copied, rather
# than the directory iteslef
#
# Usage:
# safe_copy_dir [--exclude PATTERN]
# [--include PATTERN]
# [--delete]
# [--chown USER:GROUP]
# [--writeable-archive-root]
# [--dry-run]
# [-v | --verbose]
# SRC_DIR... DST_DIR
#
safe_copy_dir() {
local usage_msg="
Usage: ${FUNCNAME[0]} [OPTIONS...] SRC_DIR... DST_DIR
"
# parse command line
local opts
local -a rsync_opts
local -a safe_dirs_args
local user_group
local dry_run_arg=
opts=$(getopt -n "${FUNCNAME[0]}" -o "v" -l exclude:,include:,delete,chown:,--writeable-archive-root,dry-run,verbose -- "$@")
[[ $? -eq 0 ]] || return 1
eval set -- "${opts}"
while true ; do
case "$1" in
--exclude)
rsync_opts+=("--exclude" "$2")
shift 2
;;
--include)
rsync_opts+=("--include" "$2")
shift 2
;;
--delete)
rsync_opts+=("--delete-after")
shift
;;
--dry-run)
dry_run_arg="--dry-run"
shift
;;
--chown)
user_group="$2"
shift 2
;;
--writeable-archive-root)
safe_dirs_args+=("$1")
shift
;;
-v | --verbose)
rsync_opts+=("--verbose")
shift
;;
--)
shift
break
;;
-*)
error --epilog="$usage_msg" "invalid options"
return 1
;;
*)
break
;;
esac
done
if [[ "$#" -lt 2 ]] ; then
error --epilog="$usage_msg" "invalid options"
return 1
fi
local src_dirs_count; let src_dirs_count="$# - 1"
local -a src_dirs=("${@:1:$src_dirs_count}")
local dst_dir="${@:$#:1}"
# make sure src dirs exist
local dir
for dir in "${src_dirs[@]}" ; do
if [[ ! -d "$dir" ]] ; then
error "$dir: does not exist or not a directory"
return 1
fi
done
# make sure all dirs are readable
__ensure_host_path_readable_in_priv_container "${safe_dirs_args[@]}" "$@" || return 1
# if dst_dir exists, it must be writable
if [[ -d "${dst_dir}" ]] ; then
__ensure_host_path_writable_in_priv_container "${safe_dirs_args[@]}" "$dst_dir" || return 1
# dst_dir doesn't exist, but there are multiple sources
elif [[ "${#src_dirs[@]}" -gt 1 ]] ; then
error "$dst_dir: does not exist or not a directory"
return 1
# dst_dir doesn't exist, and there's one source: copy source to dst_dir's
# parent, but rename it to basename(dst_dir). This is how "cp" behaves.
else
src_dirs=("${src_dirs[0]%/}/")
__ensure_host_path_writable_in_priv_container "${safe_dirs_args[@]}" "$dst_dir" || return 1
fi
# --chown: resolve USER:GROUP to UID:GID
if [[ -n "$user_group" ]] ; then
local uid_gid
uid_gid=$(
set -x
gid_suffix=
user="${user_group%%:*}"
if echo "$user_group" | grep -q ":" ; then
group="${user_group#*:}"
if [[ -n "$group" ]] ; then
gid=$(getent group "$group" | awk -F ':' '{print $3}')
[[ -n "$gid" ]] || exit 1
fi
gid=$(id -g $user) || exit 1
gid_suffix=":$gid"
fi
uid=$(id -u $user) || exit 1
echo "${uid}${gid_suffix}"
) || {
error "unable to resolve owner $user_group"
return 1
}
rsync_opts+=("--chown" "$uid_gid")
fi
# run rsync in docker
rsync_opts+=(--archive --devices --specials --hard-links --recursive --one-file-system)
if ! safe_docker_run $dry_run_arg --rm "$SAFE_RSYNC_DOCKER_IMG" rsync "${rsync_opts[@]}" "${src_dirs[@]}" "${dst_dir%/}/" ; then
error "failed to copy files"
return 1
fi
}
#
# Usage: safe_rm [OPTIONS...] PATHS
#
safe_rm() {
local usage_msg="
Usage: ${FUNCNAME[0]} [OPTIONS...] PATHS...
--writeable-archive-root
--dry-run
-v,--verbose
"
# parse command line
local opts
local -a safe_dirs_args
local -a rm_opts
local -a rm_cmd=("rm")
opts=$(getopt -n "${FUNCNAME[0]}" -o "v" -l writeable-archive-root,dry-run,verbose -- "$@")
[[ $? -eq 0 ]] || return 1
eval set -- "${opts}"
while true ; do
case "$1" in
--writeable-archive-root)
safe_dirs_args+=("$1")
shift
;;
--dry-run)
rm_cmd=("echo" "(dry run)" "rm")
shift
;;
-v | --verbose)
rm_opts+=("--verbose")
shift
;;
--)
shift
break
;;
-*)
error --epilog="$usage_msg" "invalid options"
return 1
;;
*)
break
;;
esac
done
if [[ "$#" -lt 1 ]] ; then
error --epilog="$usage_msg" "invalid options"
return 1
fi
# make sure all paths are writeable
__ensure_host_path_writable_in_priv_container "${safe_dirs_args[@]}" "$@"
# run rsync in docker
rm_opts+=(--one-file-system --preserve-root --recursive --force)
info "removing $*"
if ! safe_docker_run --rm "$COREUTILS_DOCKER_IMG" "${rm_cmd[@]}" "${rm_opts[@]}" -- "$@" ; then
error "failed to remove files"
return 1
fi
}
#
# Usage: safe_chown OPTIONS USER[:GROUP] PATHS...
safe_chown() {
local usage_msg="
Usage: ${FUNCNAME[0]} [OPTIONS...] USER[:GROUP] PATHS...
--writeable-archive-root
--dry-run
-v,--verbose
-R,--recursive
"
# parse command line
local cmd_args
local dry_run_arg
local -a safe_dirs_args
local -a cmd=("chown")
opts=$(getopt -n "${FUNCNAME[0]}" -o "vR" -l dry-run,verbose,recursive,writeable-archive-root -- "$@")
[[ $? -eq 0 ]] || return 1
eval set -- "${opts}"
while true ; do
case "$1" in
--dry-run)
dry_run_arg="--dry-run"
shift
;;
-v | --verbose)
cmd_args+=("--verbose")
shift
;;
-R | --recursive)
cmd_args+=("--recursive")
shift
;;
--writeable-archive-root)
safe_dirs_args+=("$1")
shift
;;
--)
shift
break
;;
-*)
error --epilog="$usage_msg" "invalid options"
return 1
;;
*)
break
;;
esac
done
if [[ "$#" -lt 2 ]] ; then
error --epilog="$usage_msg" "invalid options"
return 1
fi
local user_group="$1" ; shift
__ensure_host_path_writable_in_priv_container "${safe_dirs_args[@]}" "$@"
# resolve USER:GROUP to UID:GID
local uid_gid
uid_gid=$(
gid_suffix=
user="${user_group%%:*}"
if echo "$user_group" | grep -q ":" ; then
group="${user_group#*:}"
if [[ -n "$group" ]] ; then
gid=$(getent group "$group" | awk -F ':' '{print $3}')
[[ -n "$gid" ]] || exit 1
fi
gid=$(id -g $user) || exit 1
gid_suffix=":$gid"
fi
uid=$(id -u $user) || exit 1
echo "${uid}${gid_suffix}"
) || {
error "unable to resolve owner $user_group"
return 1
}
if ! safe_docker_run $dry_run_arg --rm "$COREUTILS_DOCKER_IMG" \
"${cmd[@]}" "${cmd_args[@]}" -- "$uid_gid" "$@" ; then
error "failed to change file ownership"
return 1
fi
}
# Usage: gen_deb_repo_meta_data [--origin=ORIGIN] [--label=LABEL] DIR
make_deb_repo() {
local origin
local label
while [[ "$#" -gt 0 ]] ; do
case "$1" in
--origin=*)
origin="${1#--origin=}"
shift
;;
--label=*)
label="${1#--label=}"
shift
;;
*)
break
;;
esac
done
local dir="$1"
(
set -e
cd "$dir"
rm -f Packages Packages.gz
(
set -e
dpkg-scanpackages -t deb -t deb --multiversion .
dpkg-scanpackages -t deb -t udeb --multiversion .
) >Packages
gzip -c Packages >Packages.gz
__print_deb_release "$origin" "$label" >Release.tmp
mv -f Release.tmp Release
rm -f Packages
)
}
__print_deb_release_checksums() {
local section="$1"
local checksum_prog="$2"
local body
local files="Packages"
body="$(
set -e
for base in Packages ; do
for file in "$base" "${base}.gz" "${base}.xz" "${base}.bz2" ; do
if [[ -f "$file" ]] ; then
checksum=$($checksum_prog "$file" | awk '{print $1}' ; check_pipe_status) || exit 1
size=$(stat --format '%s' "$file") || exit 1
printf ' %s %16d %s\n' "$checksum" "$size" "$file"
fi
done
done
)" || return 1
if [[ -n "$body" ]] ; then
echo "${section}:"
echo "${body}"
fi
}
__print_deb_release() {
local origin="$1"
local label="$2"
local now
# Date: ...
now="$(date --rfc-2822 --utc)" || return 1
echo "Date: $now"
# Origin: ...
if [[ -n "$origin" ]] ; then
echo "Origin: $origin"
fi
# Label: ...
if [[ -n "$label" ]] ; then
echo "Label: $label"
fi
# <checksums>
__print_deb_release_checksums "MD5Sum" "md5sum" || return 1
__print_deb_release_checksums "SHA1" "sha1sum" || return 1
__print_deb_release_checksums "SHA256" "sha256sum" || return 1
__print_deb_release_checksums "SHA512" "sha512sum" || return 1
}
if [[ "${SHELL_XTRACE,,}" == "true" || "${SHELL_XTRACE}" == "1" ]] ; then
set -x
fi