Better safe_copy_dir & friends

This patch improves safe_copy_dir() and related functions:
* clean up & simplify implementation
* path sanity checks no longer depend on $PROJECT.
* safe_copy_dir(): --chown: resolve user name to UID on host
* safe_copy_dir(): interpret dest_dir as in "cp" command,
  but src_dir as in "rsync"

Story: 2010226
Task: 46386

Signed-off-by: Davlet Panech <davlet.panech@windriver.com>
Change-Id: I9428d9fceb50f78840fc9fb93e8a6a132425cddc
This commit is contained in:
Davlet Panech 2022-08-24 21:21:40 -04:00
parent 838a7713b8
commit 93f0b873b6
5 changed files with 248 additions and 176 deletions

View File

@ -22,12 +22,10 @@ dir_is_empty() {
}
if ! dir_is_empty "$BUILD_HOME/workspace/helm-charts" ; then
my_user="$(id -u)"
my_group="$(id -g)"
if [[ ! -d "$BUILD_OUTPUT_HOME/workspace/helm-charts" ]] ; then
mkdir "$BUILD_OUTPUT_HOME/workspace/helm-charts"
fi
safe_copy_dir $DRY_RUN_ARG $VERBOSE_ARG --delete --chown $my_user:$my_group \
safe_copy_dir $DRY_RUN_ARG $VERBOSE_ARG --delete --chown "$USER:" \
"$BUILD_HOME/workspace/helm-charts/" \
"$BUILD_OUTPUT_HOME/workspace/helm-charts/"

View File

@ -18,7 +18,7 @@ LAT_SUBDIR="localdisk/deploy"
$BUILD_ISO || bail "BUILD_ISO=false, bailing out"
declare -a iso_files
declare -a chown_files
mkdir -p "${BUILD_OUTPUT_HOME}/localdisk"
src_dir="${BUILD_HOME}/${LAT_SUBDIR}"
@ -27,12 +27,12 @@ if [[ -d "${src_dir}" ]] ; then
notice "archving $src_dir"
mkdir -p "$dst_dir"
safe_copy_dir $DRY_RUN_ARG $VERBOSE_ARG "${src_dir}/" "${dst_dir}/"
iso_files+=($(find "${dst_dir}" -mindepth 1 -maxdepth 1 -type f))
chown_files+=($(find "${dst_dir}" -mindepth 1 -maxdepth 1 -type f))
fi
if [[ "${#iso_files[@]}" -gt 0 ]] ; then
if [[ "${#chown_files[@]}" -gt 0 ]] ; then
notice "changing file ownership to $USER"
safe_chown $DRY_RUN_ARG $VERBOSE_ARG "$USER:" "${iso_files[@]}"
safe_chown $DRY_RUN_ARG $VERBOSE_ARG "$USER:" "${chown_files[@]}"
fi

View File

@ -18,10 +18,8 @@ load_build_env
$BUILD_PACKAGES || bail "BUILD_PACKAGES=false, skipping build"
if [[ -d "$BUILD_HOME/$WORKSPACE_ROOT_SUBDIR" ]] ; then
my_user="$(id -u)"
my_group="$(id -g)"
mkdir -p "$BUILD_OUTPUT_HOME/$WORKSPACE_ROOT_SUBDIR"
safe_copy_dir --chown "$my_user:$my_group" $DRY_RUN_ARG $VERBOSE_ARG \
safe_copy_dir --chown "$USER:" $DRY_RUN_ARG $VERBOSE_ARG \
"$BUILD_HOME/$WORKSPACE_ROOT_SUBDIR/" "$BUILD_OUTPUT_HOME/$WORKSPACE_ROOT_SUBDIR/"
ln -sfn "$WORKSPACE_ROOT_SUBDIR" "$BUILD_OUTPUT_HOME/workspace"
fi

View File

@ -6,8 +6,8 @@
# SPDX-License-Identifier: Apache-2.0
#
: ${LOADBUILD_ROOTS:="/localdisk/loadbuild:/home/localdisk/loadbuild"}
: ${DESIGNER_ROOTS:="/localdisk/designer:/home/localdisk/designer"}
LOADBUILD_ROOTS="/localdisk/loadbuild:/home/localdisk/loadbuild"
DESIGNER_ROOTS="/localdisk/designer:/home/localdisk/designer"
source "${BASH_SOURCE[0]%/*}"/utils.sh || return 1
source "${BASH_SOURCE[0]%/*}"/log_utils.sh || return 1
@ -33,7 +33,6 @@ TEMPLATES_DIR="${SCRIPTS_DIR}/templates"
# docker images
SAFE_RSYNC_DOCKER_IMG="servercontainers/rsync:3.1.3"
COREUTILS_DOCKER_IMG="debian:bullseye-20220509"
APT_UTILS_DOCKER_IMG="debian:bullseye-20220509"
notice() {
( set +x ; print_log -i --notice "$@" ; )
@ -65,18 +64,23 @@ trim() {
echo "$@" | sed -r -e 's/^\s+//' -e 's/\s+$//'
}
maybe_run() {
local cmd
local sep=''
shell_quote() {
local str
local arg
local sep
for arg in "$@" ; do
cmd+="$sep" ; sep=' '
cmd+="$(printf '%q' "$arg")"
str+=$sep
str+=$(printf '%q' "$arg")
sep=' '
done
echo "$str"
}
maybe_run() {
if $DRY_RUN ; then
echo "running (dry run): $cmd"
echo "running (dry run): $(shell_quote "$@")"
else
echo "running: $cmd"
echo "running: $(shell_quote "$@")"
"$@"
fi
}
@ -263,112 +267,161 @@ parse_docker_registry() {
echo $registry $namespace
}
__get_protected_dirs() {
[[ -n "$USER" ]] || die "USER not set"
[[ -n "$PROJECT" ]] || die "PROJECT not set"
#
# Print directories that are safe to be mounted in a privileged container,
# "DIR" or "DIR ro", one per line:
# /localdisk/designer/$USER ro # read-only
# /localdisk/loadbuild/$USER ro # read-only
# /localdisk/designer/$USER/$PROJECT # read/write ie BUILD_HOME
# /localdisk/loadbuild/$USER/$PROJECT/$TIMESTAMP # read/write ie BUILD_OUTPUT_ROOT
#
__get_safe_dirs() {
require_env TIMESTAMP
require_env USER
local root norm_root
local dir
for dir in $(echo "$DESIGNER_ROOTS" "$LOADBUILD_ROOTS" | sed 's/:/ /g') ; do
if [[ -d "$dir" ]] ; then
echo "$dir:ro"
if [[ -d "$dir/$USER/$PROJECT" ]] ; then
echo "$dir/$USER/$PROJECT"
fi
fi
# designer & loadbuild roots
for root in ${DESIGNER_ROOTS/:/ } ${LOADBUILD_ROOTS/:/ } ; do
norm_root="$(realpath -m -s "$root")" || return 1
echo "$norm_root/$USER ro"
done
# current BUILD_HOME -- make sure its under /localdisk/designer/$USER
(
local build_home
local -a safe_rw_roots
local build_home_ok
safe_rw_roots=()
build_home="$(realpath -m -s "$BUILD_HOME")" || return 1
for root in ${DESIGNER_ROOTS/:/ } ; do
norm_root="$(realpath -m -s "$root")" || return 1
if starts_with "$build_home" "$norm_root/$USER/" ; then
build_home_ok=1
fi
safe_rw_roots+=("$norm_root/$USER")
done
if [[ $build_home_ok -ne 1 ]] ; then
echo >&2
echo "ERROR: $BUILD_HOME: BUILD_HOME is invalid" >&2
echo "ERROR: expecting a descendant of any of the following:" >&2
for safe_rw_root in "${safe_rw_roots[@]}" ; do
echo " $safe_rw_root" >&2
done
error -i --dump-stack "invalid BUILD_HOME"
return 1
fi
echo "$build_home"
)
# current build dir under loadbuild
# make sure it starts with /localdisk/loadbuild/$USER
(
local out_root
local safe_rw_roots
local out_root_ok=0
safe_rw_roots=()
out_root="$(realpath -m -s "$BUILD_OUTPUT_ROOT")" || return 1
for root in ${LOADBUILD_ROOTS/:/ } ; do
norm_root="$(realpath -m -s "$root")" || return 1
if starts_with "$out_root" "$norm_root/$USER/" ; then
out_root_ok=1
fi
safe_rw_roots+=("$norm_root/$USER")
done
if [[ $out_root_ok -ne 1 ]] ; then
echo >&2
echo "ERROR: $BUILD_OUTPUT_ROOT: BUILD_OUTPUT_ROOT is invalid" >&2
echo "ERROR: expecting a descendant of any of the following:" >&2
for safe_rw_root in "${safe_rw_roots[@]}" ; do
echo " $safe_rw_root" >&2
done
error -i --dump-stack "invalid BUILD_OUTPUT_ROOT"
return 1
fi
echo "$out_root/$TIMESTAMP"
) || return 1
}
#
# Usage: __ensure_dirs_within_protected_set PROTECTED_DIRS... -- DIRS...
# Make sure wach DIR equals or starts with any of PROTECTED_DIRS
# Usage: __ensure_host_path_readable_in_priv_container PATHS...
#
__ensure_dirs_within_protected_set() {
local -a protected_dirs
while [[ "$#" -gt 0 && "$1" != "--" ]] ; do
protected_dirs+=("$1")
dir="$1"
shift
done
shift || true
# Make sure each host PATH can be read in a privileged container,
# ie anything under
# /localdisk/designer/$USER
# /localdisk/loadbuild/$USER
#
__ensure_host_path_readable_in_priv_container() {
# safe roots
local safe_roots_str
safe_roots_str="$(__get_safe_dirs | sed -r 's/\s+ro$//' ; check_pipe_status)" || return 1
local -a safe_roots
readarray -t safe_roots <<<"$safe_roots_str" || return 1
while [[ "$#" -gt 0 ]] ; do
local dir="$1" ; shift || true
if ! echo "$dir" | grep -q '^/' ; then
error -i "$dir: directories must be absolute"
return 1
fi
# check if $dir under any of $protected_dirs
local safe=0
local parent_dir
for protected_dir in "${protected_dirs[@]}" ; do
protected_dir="${protected_dir%%:*}"
if [[ "$dir" == "$protected_dir" || "${dir#$protected_dir/}" != "${dir}" ]] ; then
safe=1
# check each path
local path norm_path
for path in "$@" ; do
local path_ok=0
norm_path="$(realpath -m -s "$path")" || return 1
for safe_root in "${safe_roots[@]}" ; do
if [[ "$safe_root" == "$norm_path" ]] || starts_with "$norm_path" "$safe_root/" ; then
path_ok=1
break
fi
done
if [[ $safe != 1 ]] ; then
error -i "attempted to operate on an unsafe directory \"$dir\""
if [[ "$path_ok" != 1 ]] ; then
echo "error: $path: this directory can't be read in a privileged container" >&2
echo "error: expecting one of the followng paths or their descendants:" >&2
local safe_root
for safe_root in "${safe_roots[@]}" ; do
echo " $safe_root" >&2
done
error -i --dump-stack "$path: attempted to read from an invalid path in a privileged container" >&2
return 1
fi
done
}
#
# Usage: __ensure_dir_not_blacklisted_for_writing [--skip-missing] PATH...
# Usage: __ensure_host_path_writable_in_priv_container PATHS...
#
__ensure_dir_not_blacklisted_for_writing() {
local -a blacklist_dir_list=(
"/"
)
local -a blacklist_prefix_list=(
"/usr/"
"/etc/"
"/var/"
"/run/"
"/proc/"
"/sys/"
"/boot/"
"/dev/"
"/media/"
"/mnt/"
"/proc/"
"/net/"
"/sys/"
)
local skip_missing=0
if [[ "$1" == "--skip-missing" ]] ; then
skip_missing=1
shift
fi
local dir
for dir in "$@" ; do
local abs_dir
if ! abs_dir="$(readlink -f "$dir")" ; then
if [[ $skip_missing -eq 1 ]] ; then
continue
# Make sure a host path is OK to write in a privileged container,
# ie any path under BUILD_OUTPUT_ROOT
#
__ensure_host_path_writable_in_priv_container() {
# safe roots that don't end with " ro"
local safe_roots_str
safe_roots_str="$(__get_safe_dirs | grep -v -E '\s+ro$' ; check_pipe_status)" || return 1
local -a safe_roots
readarray -t safe_roots <<<"$safe_roots_str" || return 1
# check each path
local path norm_path
for path in "$@" ; do
local path_ok=0
norm_path="$(realpath -m -s "$path")" || return 1
for safe_root in "${safe_roots[@]}" ; do
if [[ "$safe_root" == "$norm_path" ]] || starts_with "$norm_path" "$safe_root/" ; then
path_ok=1
break
fi
error -i "$dir: does not exist or is not readable"
return 1
fi
#if [[ ! -w "$abs_dir" ]] ; then
# error -i "$dir: not writable"
# return 1
#fi
if in_list "$abs_dir" "${blacklist_dir_list}" || \
starts_with "$abs_dir" "${blacklist_prefix_list}" ; then
error -i "$dir: is blacklisted for writing"
done
if [[ "$path_ok" != 1 ]] ; then
echo "ERROR: $path: this directory can't be written in a privileged container" >&2
echo "ERROR: expecting one of the followng paths or their descendants:" >&2
local safe_root
for safe_root in "${safe_roots[@]}" ; do
echo " $safe_root" >&2
done
error -i --dump-stack "$path: attempted to write to an invalid path in a privileged container" >&2
return 1
fi
done
}
#
# Usage: __safe_docker_run [--dry-run] PROTECTED_DIRS... -- <DOCKER RUN OPTIONS>
# Usage: __safe_docker_run [--dry-run] <DOCKER RUN OPTIONS>
#
__safe_docker_run() {
local loc="${BASH_SOURCE[0]}(${BASH_LINENO[0]}): ${FUNCNAME[0]}: "
safe_docker_run() {
local dry_run=0
local dry_run_prefix
if [[ "$1" == "--dry-run" ]] ; then
@ -379,54 +432,42 @@ __safe_docker_run() {
# construct mount options
local -a mount_opts
while [[ "$#" -gt 0 && "$1" != "--" ]] ; do
local dir="$1" ; shift
local extra_mount_str=""
if echo "$dir" | grep -q : ; then
local opt
local -a extra_mount_opts
for opt in $(echo "$dir" | sed -e 's/.*://' -e 's/,/ /g') ; do
if [[ "$opt" == "ro" ]] ; then
extra_mount_str+=",ro"
continue
fi
error -i "invalid mount option \"$opt\""
return 1
done
dir="${dir%%:*}"
local safe_dirs_str
safe_dirs_str="$(__get_safe_dirs)" || return 1
while read dir flags ; do
[[ -d "$dir" ]] || continue
local mount_str="type=bind,src=$dir,dst=$dir"
if [[ -n "$flags" ]] ; then
mount_str+=",$flags"
fi
mount_opts+=("--mount" "type=bind,src=$dir,dst=$dir""$extra_mount_str")
done
shift || true
mount_opts+=("--mount" "$mount_str")
done <<<"$safe_dirs_str"
# other docker opts
local docker_opts=("-i")
if [[ -t 0 ]] ; then
docker_opts+=("-t")
fi
local -a cmd=(docker run "${docker_opts[@]}" "${mount_opts[@]}" "$@")
if [[ "$QUIET" != "true" ]] ; then
echo ">>> ${dry_run_prefix}running: docker run ${mount_opts[@]} $@" >&2
info "${dry_run_prefix}running: $(shell_quote "${cmd[@]}")"
fi
if [[ $dry_run -ne 1 ]] ; then
local docker_opts=("-i")
if [[ -t 0 ]] ; then
docker_opts+=("-t")
fi
docker run "${docker_opts[@]}" "${mount_opts[@]}" "$@"
"${cmd[@]}"
fi
}
#
# Usage: safe_docker_run <DOCKER RUN OPTIONS>
# Run a docker container with safe/protected dirs mounted
# Copy directories as root user; similar to "cp -ar", except:
#
safe_docker_run() {
local -a protected_dirs
local protected_dirs_str
protected_dirs_str="$(__get_protected_dirs)" || return 1
readarray -t protected_dirs <<<"$(echo -n "$protected_dirs_str")" || return 1
__safe_docker_run "${protected_dirs[@]}" -- "$@"
}
# if SRC_DIR ends with "/", its contents will be copied, rather
# than the directory iteslef
#
# Usage:
# safe_copy_dir [--exclude PATTERN ...]
# [--include PATTERN ...]
# safe_copy_dir [--exclude PATTERN]
# [--include PATTERN]
# [--delete]
# [--chown USER:GROUP]
# [--dry-run]
@ -437,15 +478,10 @@ safe_copy_dir() {
local usage_msg="
Usage: ${FUNCNAME[0]} [OPTIONS...] SRC_DIR... DST_DIR
"
# get protected dirs
local -a protected_dirs
local protected_dirs_str
protected_dirs_str="$(__get_protected_dirs)" || return 1
readarray -t protected_dirs <<<"$(echo -n "$protected_dirs_str")" || return 1
# parse command line
local opts
local -a rsync_opts
local user_group
local dry_run_arg=
opts=$(getopt -n "${FUNCNAME[0]}" -o "v" -l exclude:,include:,delete,chown:,dry-run,verbose -- "$@")
[[ $? -eq 0 ]] || return 1
@ -469,7 +505,7 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] SRC_DIR... DST_DIR
shift
;;
--chown)
rsync_opts+=("--chown" "$2")
user_group="$2"
shift 2
;;
-v | --verbose)
@ -493,18 +529,65 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] SRC_DIR... DST_DIR
error --epilog="$usage_msg" "invalid options"
return 1
fi
local src_dirs_count; let src_dirs_count="$# - 1"
local -a src_dirs=("${@:1:$src_dirs_count}")
local dst_dir="${@:$#:1}"
# make sure dirs start with a known prefix
__ensure_dirs_within_protected_set "${protected_dirs[@]}" -- "$@" || return 1
# make sure src dirs exist
local dir
for dir in "${src_dirs[@]}" ; do
if [[ ! -d "$dir" ]] ; then
error "$dir: does not exist or not a directory"
return 1
fi
done
# make sure last destination dir is writeable
__ensure_dir_not_blacklisted_for_writing "${dst_dir}"
# make sure all dirs are readable
__ensure_host_path_readable_in_priv_container "$@" || return 1
# run rsync in docker, filter out noisy greetings
# if dst_dir exists, it must be writable
if [[ -d "${dst_dir}" ]] ; then
__ensure_host_path_writable_in_priv_container "$dst_dir" || return 1
# dst_dir doesn't exist, but there are multiple sources
elif [[ "${#src_dirs[@]}" -gt 1 ]] ; then
error "$dst_dir: does not exist or not a directory"
return 1
# dst_dir doesn't exist, and there's one source: copy source to dst_dir's
# parent, but rename it to basename(dst_dir). This is how "cp" behaves.
else
src_dirs=("${src_dirs[0]%/}/")
__ensure_host_path_writable_in_priv_container "$dst_dir" || return 1
fi
# --chown: resolve USER:GROUP to UID:GID
if [[ -n "$user_group" ]] ; then
local uid_gid
uid_gid=$(
set -x
gid_suffix=
user="${user_group%%:*}"
if echo "$user_group" | grep -q ":" ; then
group="${user_group#*:}"
if [[ -n "$group" ]] ; then
gid=$(getent group "$group" | awk -F ':' '{print $3}')
[[ -n "$gid" ]] || exit 1
fi
gid=$(id -g $user) || exit 1
gid_suffix=":$gid"
fi
uid=$(id -u $user) || exit 1
echo "${uid}${gid_suffix}"
) || {
error "unable to resolve owner $user_group"
return 1
}
rsync_opts+=("--chown" "$uid_gid")
fi
# run rsync in docker
rsync_opts+=(--archive --devices --specials --hard-links --recursive --one-file-system)
__safe_docker_run $dry_run_arg "${protected_dirs[@]}" -- --rm "$SAFE_RSYNC_DOCKER_IMG" rsync "${rsync_opts[@]}" "$@"
if [[ ${PIPSTATUS[0]} -ne 0 ]] ; then
if ! safe_docker_run $dry_run_arg --rm "$SAFE_RSYNC_DOCKER_IMG" rsync "${rsync_opts[@]}" "${src_dirs[@]}" "${dst_dir%/}/" ; then
error "failed to copy files"
return 1
fi
@ -520,12 +603,6 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] PATHS...
--dry-run
-v,--verbose
"
# get protected dirs
local -a protected_dirs
local protected_dirs_str
protected_dirs_str="$(__get_protected_dirs)" || return 1
readarray -t protected_dirs <<<"$(echo -n "$protected_dirs_str")" || return 1
# parse command line
local opts
local -a rm_opts
@ -561,13 +638,13 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] PATHS...
return 1
fi
__ensure_dirs_within_protected_set "${protected_dirs[@]}" -- "$@" || return 1
__ensure_dir_not_blacklisted_for_writing --skip-missing "$@"
# make sure all paths are writeable
__ensure_host_path_writable_in_priv_container "$@"
# run rsync in docker
rm_opts+=(--one-file-system --preserve-root --recursive --force)
info "removing $*"
if ! __safe_docker_run "${protected_dirs[@]}" -- --rm "$COREUTILS_DOCKER_IMG" "${rm_cmd[@]}" "${rm_opts[@]}" -- "$@" ; then
if ! safe_docker_run --rm "$COREUTILS_DOCKER_IMG" "${rm_cmd[@]}" "${rm_opts[@]}" -- "$@" ; then
error "failed to remove files"
return 1
fi
@ -583,12 +660,6 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] USER[:GROUP] PATHS...
-v,--verbose
-R,--recursive
"
# get protected dirs
local -a protected_dirs
local protected_dirs_str
protected_dirs_str="$(__get_protected_dirs)" || return 1
readarray -t protected_dirs <<<"$(echo -n "$protected_dirs_str")" || return 1
# parse command line
local cmd_args
local dry_run_arg
@ -629,8 +700,7 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] USER[:GROUP] PATHS...
fi
local user_group="$1" ; shift
__ensure_dirs_within_protected_set "${protected_dirs[@]}" -- "$@" || return 1
__ensure_dir_not_blacklisted_for_writing --skip-missing "$@"
__ensure_host_path_writable_in_priv_container "$@"
# resolve USER:GROUP to UID:GID
local uid_gid
@ -640,7 +710,7 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] USER[:GROUP] PATHS...
if echo "$user_group" | grep -q ":" ; then
group="${user_group#*:}"
if [[ -n "$group" ]] ; then
gid=$(getent "$group" | awk -F ':' '{print $3}')
gid=$(getent group "$group" | awk -F ':' '{print $3}')
[[ -n "$gid" ]] || exit 1
fi
gid=$(id -g $user) || exit 1
@ -653,8 +723,8 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] USER[:GROUP] PATHS...
return 1
}
if ! __safe_docker_run $dry_run_arg "${protected_dirs[@]}" -- --rm "$COREUTILS_DOCKER_IMG" \
"${cmd[@]}" "${cmd_args[@]}" -- "$uid_gid" "$@" ; then
if ! safe_docker_run $dry_run_arg --rm "$COREUTILS_DOCKER_IMG" \
"${cmd[@]}" "${cmd_args[@]}" -- "$uid_gid" "$@" ; then
error "failed to change file ownership"
return 1
fi

View File

@ -15,12 +15,13 @@ require_job_env TIMESTAMP
load_build_env
$DRY_RUN && exit 0 || :
notice "publishing $DOCKER_BASE_OS $BUILD_STREAM docker image lists"
src_dir="$STX_BUILD_HOME/workspace/std/build-images"
dst_dir="$PUBLISH_DIR/outputs/docker-images"
if [[ ! -d "$src_dir" ]] ; then
bail "$src_dir doesn't exist, exiting"
fi
mkdir -p "$dst_dir"
declare -a find_args
or=
@ -32,8 +33,13 @@ for os in $(echo $DOCKER_OS_LIST | sed 's/,/ /g') ; do
)
or="-or"
done
if [[ ${#find_args[@]} -gt 0 ]] ; then
if [[ ${#find_args[@]} -gt 0 ]] && [[ -d "$src_dir" ]] ; then
notice "publishing $DOCKER_BASE_OS $BUILD_STREAM docker image lists"
for src in $(find "$src_dir" -maxdepth 1 -type f \( "${find_args[@]}" \) ) ; do
cp -v "$src" "$dst_dir/"
if $DRY_RUN ; then
info "$src => $dst_dir/"
else
cp -v "$src" "$dst_dir/"
fi
done
fi