diff --git a/build-tools/branching/create_branches_and_tags.sh b/build-tools/branching/create_branches_and_tags.sh index d87e0987..27c820c4 100755 --- a/build-tools/branching/create_branches_and_tags.sh +++ b/build-tools/branching/create_branches_and_tags.sh @@ -24,7 +24,7 @@ CREATE_BRANCHES_AND_TAGS_SH_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" source "${CREATE_BRANCHES_AND_TAGS_SH_DIR}/../git-repo-utils.sh" usage () { - echo "create_branches_and_tags.sh --branch= [--tag=] [ --remotes= ] [ --projects= ] [ --manifest [ --lock-down ]]" + echo "create_branches_and_tags.sh --branch= [--tag=] [ --remotes= ] [ --projects= ] [ --gitreview-default ] [ --manifest [ --lock-down | --soft-lock-down ] [ --default-revision ]]" echo "" echo "Create a branch and a tag in all listed projects, and all" echo "projects hosted by all listed remotes. Lists are comma separated." @@ -33,13 +33,19 @@ usage () { echo "If the tag is omitted, one is automativally generate by adding the" echo "prefix 'v' to the branch name." echo "" - echo "If a manifest is requested, it will recieve the name '.xml' and" - echo "it will specify the branch as the revision for all tagged projects." + echo "If a manifest is requested, the current manifest is modified." + echo "to specify the new branch for all select remotes and projects." echo "If lockdown is requested, all other projects get the current" echo "HEAD's sha set as the revision." + echo "If default-revision is selected, then the manifest default revision" + wcho "will be set." + echo "" + echo "If a gitreview-default is selected, then all branched projects" + echo "with a .gitreview file will have a defaultbranch entry added" + echo "or updated." } -TEMP=$(getopt -o h --long remotes:,projects:,branch:,tag:,manifest,lock-down,help -n 'create_branches_and_tags.sh' -- "$@") +TEMP=$(getopt -o h --long remotes:,projects:,branch:,tag:,manifest,lock-down,hard-lock-down,soft-lock-down,default-revision,gitreview-default,help -n 'create_branches_and_tags.sh' -- "$@") if [ $? -ne 0 ]; then usage exit 1 @@ -49,6 +55,8 @@ eval set -- "$TEMP" HELP=0 MANIFEST=0 LOCK_DOWN=0 +GITREVIEW_DEFAULT=0 +SET_DEFAULT_REVISION=0 remotes="" projects="" branch="" @@ -59,15 +67,19 @@ repo_root_dir="" while true ; do case "$1" in - -h|--help) HELP=1 ; shift ;; - --remotes) remotes+=$(echo "$2 " | tr ',' ' '); shift 2;; - --projects) projects+=$(echo "$2 " | tr ',' ' '); shift 2;; - --branch) branch=$2; shift 2;; - --tag) tag=$2; shift 2;; - --manifest) MANIFEST=1 ; shift ;; - --lock-down) LOCK_DOWN=1 ; shift ;; - --) shift ; break ;; - *) usage; exit 1 ;; + -h|--help) HELP=1 ; shift ;; + --remotes) remotes+=$(echo "$2 " | tr ',' ' '); shift 2;; + --projects) projects+=$(echo "$2 " | tr ',' ' '); shift 2;; + --branch) branch=$2; shift 2;; + --tag) tag=$2; shift 2;; + --manifest) MANIFEST=1 ; shift ;; + --lock-down) LOCK_DOWN=2 ; shift ;; + --hard-lock-down) LOCK_DOWN=2 ; shift ;; + --soft-lock-down) LOCK_DOWN=1 ; shift ;; + --default-revision) SET_DEFAULT_REVISION=1 ; shift ;; + --gitreview-default) GITREVIEW_DEFAULT=1 ; shift ;; + --) shift ; break ;; + *) usage; exit 1 ;; esac done @@ -88,6 +100,37 @@ if [ $? -ne 0 ]; then exit 1 fi +update_gitreview () { + local DIR=$1 + ( + cd $DIR || exit 1 + if [ $GITREVIEW_DEFAULT -eq 1 ] && [ -f .gitreview ]; then + if ! grep -q "^defaultbranch=$branch$" .gitreview; then + echo "Updating defaultbranch in ${DIR}/.gitreview" + if grep -q defaultbranch= .gitreview; then + sed "s#\(defaultbranch=\).*#\1$branch#" -i .gitreview + else + echo "defaultbranch=$branch" >> .gitreview + fi + + git add .gitreview + if [ $? != 0 ] ; then + echo_stderr "ERROR: failed to add .gitreview in ${DIR}" + exit 1 + fi + + git commit -s -m "Update .gitreview for $branch" + if [ $? != 0 ] ; then + echo_stderr "ERROR: failed to commit .gitreview in ${DIR}" + exit 1 + fi + else + echo "defaultbranch in ${DIR}/.gitreview already set" + fi + fi + ) +} + if [ $MANIFEST -eq 1 ]; then manifest=$(repo_manifest $repo_root_dir) if [ $? -ne 0 ]; then @@ -205,6 +248,7 @@ for subgit in $SUBGITS; do git checkout $branch fi + # check if destination tag already exists tag_check=$(git tag -l $tag) if [ -z "$tag_check" ]; then echo "Creating tag $tag in ${subgit}" @@ -216,6 +260,8 @@ for subgit in $SUBGITS; do else echo "Tag '$tag' already exists in ${subgit}" fi + + update_gitreview ${subgit} || exit 1 ) || exit 1 done ) || exit 1 @@ -276,8 +322,10 @@ if [ $MANIFEST -eq 1 ]; then exit 1 fi + update_gitreview ${manifest_dir} || exit 1 + echo "Creating manifest ${new_manifest_name}" - manifest_set_revision "${manifest}" "${new_manifest}" "$branch" ${LOCK_DOWN} $projects || exit 1 + manifest_set_revision "${manifest}" "${new_manifest}" "$branch" ${LOCK_DOWN} ${SET_DEFAULT_REVISION} $projects || exit 1 echo "Move manifest ${new_manifest_name}, overwriting ${manifest_name}" \cp -f "${manifest}" "${manifest}.save" diff --git a/build-tools/branching/push_branches_tags.sh b/build-tools/branching/push_branches_tags.sh index 1f94cc42..baa73bb6 100755 --- a/build-tools/branching/push_branches_tags.sh +++ b/build-tools/branching/push_branches_tags.sh @@ -17,6 +17,7 @@ PUSH_BRANCHES_TAGS_SH_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )" source "${PUSH_BRANCHES_TAGS_SH_DIR}/../git-repo-utils.sh" +source "${PUSH_BRANCHES_TAGS_SH_DIR}/../url_utils.sh" usage () { echo "push_branches_tags.sh --branch= [--tag=] [ --remotes= ] [ --projects= ] [ --manifest ]" @@ -151,6 +152,8 @@ for subgit in $SUBGITS; do ( cd $subgit + git fetch --all + branch_check=$(git branch -a --list $branch) if [ -z "$branch_check" ]; then echo_stderr "ERROR: Expected branch '$branch' to exist in ${subgit}" @@ -169,21 +172,49 @@ for subgit in $SUBGITS; do exit 1 fi - if [ "${review_method}" == "gerrit" ]; then - remote=$(git_repo_review_remote) - else - remote=$(git_repo_remote) + remote=$(git_remote) + if [ "${remote}" == "" ]; then + echo_stderr "ERROR: Failed to determine remote in ${manifest_dir}" + exit 1 fi - if [ "${remote}" == "" ]; then - echo_stderr "ERROR: Failed to determine remote in ${subgit}" + if [ "${review_method}" == "gerrit" ]; then + review_remote=$(git_repo_review_remote) + else + review_remote=${remote} + fi + + if [ "${review_remote}" == "" ]; then + echo_stderr "ERROR: Failed to determine review_remote in ${subgit}" exit 1 fi + branch_check=$(git branch -a --list $remote/$branch) + if [ "${branch_check}" != "" ]; then + echo "Branch $branch already exists in ${subgit}" + exit 0 + fi + echo "Pushing branch $branch in ${subgit}" if [ "${review_method}" == "gerrit" ]; then - echo "git push --tags ${remote} ${branch}" - git push --tags ${remote} ${branch} + url=$(git_repo_review_url) + if [ "${review_remote}" == "" ]; then + echo_stderr "ERROR: Failed to determine review_url in ${subgit}" + exit 1 + fi + + host=$(url_server "${url}") + port=$(url_port "${url}") + path=$(url_path "${url}") + if [ "${host}" == "review.opendev.org" ]; then + git push ${review_remote} ${tag} && \ + ssh -p ${port} ${host} gerrit create-branch ${path} ${branch} ${tag} && \ + git config --local --replace-all "branch.${branch}.merge" refs/heads/${branch} && \ + git review --topic="${branch}" + else + echo "git push --tags ${remote} ${branch}" + git push --tags ${remote} ${branch} + fi else echo "git push --tags --set-upstream ${remote} ${branch}" git push --tags --set-upstream ${remote} ${branch} @@ -232,23 +263,44 @@ if [ $MANIFEST -eq 1 ]; then exit 1 fi - - remote=$(git_review_remote) + remote=$(git_remote) if [ "${remote}" == "" ]; then echo_stderr "ERROR: Failed to determine remote in ${manifest_dir}" exit 1 fi + review_remote=$(git_review_remote) + if [ "${review_remote}" == "" ]; then + echo_stderr "ERROR: Failed to determine review_remote in ${manifest_dir}" + exit 1 + fi + echo "Pushing branch $branch in ${manifest_dir}" if [ "${review_method}" == "gerrit" ]; then # Is a reviewless push possible as part of creating a new branch in gerrit? - git push --tags ${remote} ${branch} + url=$(git_review_url) + if [ "${review_remote}" == "" ]; then + echo_stderr "ERROR: Failed to determine review_url in ${subgit}" + exit 1 + fi + + host=$(url_server "${url}") + port=$(url_port "${url}") + path=$(url_path "${url}") + if [ "${host}" == "review.opendev.org" ]; then + git push ${review_remote} ${tag} && \ + ssh -p ${port} ${host} gerrit create-branch ${path} ${branch} ${tag} && \ + git config --local --replace-all "branch.${branch}.merge" refs/heads/${branch} && \ + git review --yes --topic="${branch}" + else + git push --tags ${review_remote} ${branch} + fi else - git push --tags --set-upstream ${remote} ${branch} + git push --tags --set-upstream ${review_remote} ${branch} fi if [ $? != 0 ] ; then - echo_stderr "ERROR: Failed to push tag '${tag}' to remote '${remote}' in ${manifest_dir}" + echo_stderr "ERROR: Failed to push tag '${tag}' to remote '${review_remote}' in ${manifest_dir}" exit 1 fi ) || exit 1 diff --git a/build-tools/build-docker-images/build-stx-images.sh b/build-tools/build-docker-images/build-stx-images.sh index 34494b2e..e45a9a4a 100755 --- a/build-tools/build-docker-images/build-stx-images.sh +++ b/build-tools/build-docker-images/build-stx-images.sh @@ -26,7 +26,6 @@ IMAGE_VERSION=$(date --utc '+%Y.%m.%d.%H.%M') # Default version, using timestamp PREFIX=dev LATEST_PREFIX="" PUSH=no -CONFIG_FILE="" HTTP_PROXY="" HTTPS_PROXY="" NO_PROXY="" @@ -34,16 +33,13 @@ DOCKER_USER=${USER} DOCKER_REGISTRY= BASE= WHEELS= -WHEELS_ALTERNATE= -DEFAULT_CONFIG_FILE_DIR="${MY_REPO}/build-tools/build-docker-images" -DEFAULT_CONFIG_FILE_PREFIX="docker-image-build" +WHEELS_PY2= CLEAN=no TAG_LATEST=no TAG_LIST_FILE= TAG_LIST_LATEST_FILE= declare -a ONLY declare -a SKIP -declare -a SERVICES_ALTERNATE declare -i MAX_ATTEMPTS=1 function usage { @@ -52,30 +48,32 @@ Usage: $(basename $0) Options: - --os: Specify base OS (valid options: ${SUPPORTED_OS_ARGS[@]}) - --version: Specify version for output image - --stream: Build stream, stable or dev (default: stable) - --base: Specify base docker image (required option) - --wheels: Specify path to wheels tarball or image, URL or docker tag (required option) - --wheels-alternate: Specify path to alternate wheels tarball or image, URL or docker tag - --push: Push to docker repo - --http_proxy: Set proxy :, urls splitted with "," - --https_proxy: Set proxy :, urls splitted with "," - --no_proxy: Set proxy , urls splitted with "," - --user: Docker repo userid - --registry: Docker registry - --prefix: Prefix on the image tag (default: dev) - --latest: Add a 'latest' tag when pushing + --os: Specify base OS (valid options: ${SUPPORTED_OS_ARGS[@]}) + --version: Specify version for output image + --stream: Build stream, stable or dev (default: stable) + --base: Specify base docker image (required option) + --wheels: Specify path to wheels tarball or image, URL or docker tag + (required when building loci projects) + --wheels-py2: Use this wheels tarball for Python2 projects + (default: work out from --wheels) + --wheels-alternate: same as --wheels-py2 + --push: Push to docker repo + --http_proxy: Set proxy :, urls splitted with "," + --https_proxy: Set proxy :, urls splitted with "," + --no_proxy: Set proxy , urls splitted with "," + --user: Docker repo userid + --registry: Docker registry + --prefix: Prefix on the image tag (default: dev) + --latest: Add a 'latest' tag when pushing --latest-prefix: Alternative prefix on the latest image tag - --clean: Remove image(s) from local registry + --clean: Remove image(s) from local registry --only : Only build the specified image(s). Multiple images can be specified with a comma-separated list, or with multiple --only arguments. --skip : Skip building the specified image(s). Multiple images can be specified with a comma-separated list, or with multiple --skip arguments. - --attempts: Max attempts, in case of failure (default: 1) - --config-file:Specify a path to a config file which will specify additional arguments to be passed into the the command + --attempts: Max attempts, in case of failure (default: 1) EOF @@ -97,63 +95,6 @@ function is_empty { test $# -eq 0 } -function get_args_from_file { - # get additional build args from specified file. - local -a config_items - - echo "Get args from file: $1" - for i in $(cat $1) - do - config_items=($(echo $i | sed s/=/\ /g)) - echo "--${config_items[0]} ${config_items[1]}" - case ${config_items[0]} in - base) - if [ -z "${BASE}" ]; then - BASE=${config_items[1]} - fi - ;; - user) - if [ -z "${DOCKER_USER}" ]; then - DOCKER_USER=${config_items[1]} - fi - ;; - proxy) - if [ -z "${PROXY}" ]; then - PROXY=${config_items[1]} - fi - ;; - registry) - if [ -z "${DOCKER_REGISTRY}" ]; then - # Add a trailing / if needed - DOCKER_REGISTRY="${config_items[1]%/}/" - fi - ;; - only) - # Read comma-separated values into array - if [ -z "${ONLY}" ]; then - # Read comma-separated values into array - ONLY=(`echo ${config_items[1]} | sed s/,/\ /g`) - fi - ;; - wheels) - if [ -z "${WHEELS}" ]; then - WHEELS=${config_items[1]} - fi - ;; - wheels_alternate) - if [ -z "${WHEELS_ALTERNATE}" ]; then - WHEELS_ALTERNATE=${config_items[1]} - echo "WHEELS_ALTERNATE: ${WHEELS_ALTERNATE}" >&2 - fi - ;; - services_alternate) - SERVICES_ALTERNATE=(`echo ${config_items[1]} | sed s/,/\ /g`) - echo "SERVICES_ALTERNATE: ${SERVICES_ALTERNATE[@]}" >&2 - ;; - esac - done -} - # # get_git: Clones a git into a subdirectory of ${WORKDIR}, and # leaves you in that directory. On error the directory @@ -393,27 +334,54 @@ function build_image_loci { PROFILES=$(source ${image_build_file} && echo ${PROFILES}) local PYTHON3 PYTHON3=$(source ${image_build_file} && echo ${PYTHON3}) - - if is_in ${PROJECT} ${SKIP[@]} || is_in ${LABEL} ${SKIP[@]}; then - echo "Skipping ${LABEL}" - return 0 - fi - - if ! is_empty ${ONLY[@]} && ! is_in ${PROJECT} ${ONLY[@]} && ! is_in ${LABEL} ${ONLY[@]}; then - echo "Skipping ${LABEL}" - return 0 - fi + local MIRROR_LOCAL + MIRROR_LOCAL=$(source ${image_build_file} && echo ${MIRROR_LOCAL}) echo "Building ${LABEL}" + local ORIGWD=${PWD} + + if [ "${MIRROR_LOCAL}" = "yes" ]; then + # Setup a local mirror of PROJECT_REPO + + local BARE_CLONES=${WORKDIR}/bare_clones + mkdir -p ${BARE_CLONES} + if [ $? -ne 0 ]; then + echo "Failed to create ${BARE_CLONES}" >&2 + RESULTS_FAILED+=(${LABEL}) + return 1 + fi + + local CLONE_DIR=${BARE_CLONES}/${PROJECT}.git + + # Remove prior clone dir, if it exists + \rm -rf ${CLONE_DIR} + + echo "Creating bare clone of ${PROJECT_REPO} for ${LABEL} build..." + git clone --bare ${PROJECT_REPO} ${CLONE_DIR} \ + && mv ${CLONE_DIR}/hooks/post-update.sample ${CLONE_DIR}/hooks/post-update \ + && chmod a+x ${CLONE_DIR}/hooks/post-update \ + && cd ${CLONE_DIR} \ + && git update-server-info \ + && cd ${ORIGWD} + if [ $? -ne 0 ]; then + echo "Failed to clone ${PROJECT_REPO}... Aborting ${LABEL} build" + RESULTS_FAILED+=(${LABEL}) + cd ${ORIGWD} + return 1 + fi + + PROJECT_REPO=http://${HOSTNAME}:8088/${CLONE_DIR} + fi + local -a BUILD_ARGS= BUILD_ARGS=(--build-arg PROJECT=${PROJECT}) BUILD_ARGS+=(--build-arg PROJECT_REPO=${PROJECT_REPO}) BUILD_ARGS+=(--build-arg FROM=${BASE}) - if is_in ${LABEL} ${SERVICES_ALTERNATE[@]}; then + if [ "${PYTHON3}" != "yes" ] ; then echo "Python2 service ${LABEL}" - BUILD_ARGS+=(--build-arg WHEELS=${WHEELS_ALTERNATE}) + BUILD_ARGS+=(--build-arg WHEELS=${WHEELS_PY2}) else echo "Python3 service ${LABEL}" BUILD_ARGS+=(--build-arg WHEELS=${WHEELS}) @@ -518,16 +486,6 @@ function build_image_docker { local DOCKER_PATCHES DOCKER_PATCHES=$(source ${image_build_file} && for p in ${DOCKER_PATCHES}; do echo $(dirname ${image_build_file})/${p}; done) - if is_in ${PROJECT} ${SKIP[@]} || is_in ${LABEL} ${SKIP[@]}; then - echo "Skipping ${LABEL}" - return 0 - fi - - if ! is_empty ${ONLY[@]} && ! is_in ${PROJECT} ${ONLY[@]} && ! is_in ${LABEL} ${ONLY[@]}; then - echo "Skipping ${LABEL}" - return 0 - fi - echo "Building ${LABEL}" local real_docker_context @@ -625,16 +583,6 @@ function build_image_script { local SOURCE_PATCHES SOURCE_PATCHES=$(source ${image_build_file} && for p in ${SOURCE_PATCHES}; do echo $(dirname ${image_build_file})/${p}; done) - if is_in ${PROJECT} ${SKIP[@]} || is_in ${LABEL} ${SKIP[@]}; then - echo "Skipping ${LABEL}" - return 0 - fi - - if ! is_empty ${ONLY[@]} && ! is_in ${PROJECT} ${ONLY[@]} && ! is_in ${LABEL} ${ONLY[@]}; then - echo "Skipping ${LABEL}" - return 0 - fi - # Validate the COMMAND option SUPPORTED_COMMAND_ARGS=('bash') local VALID_COMMAND=1 @@ -715,7 +663,7 @@ function build_image { esac } -OPTS=$(getopt -o h -l help,os:,version:,release:,stream:,push,http_proxy:,https_proxy:,no_proxy:,user:,registry:,base:,wheels:,wheels-alternate:,only:,skip:,prefix:,latest,latest-prefix:,clean,attempts:,config-file: -- "$@") +OPTS=$(getopt -o h -l help,os:,version:,release:,stream:,push,http_proxy:,https_proxy:,no_proxy:,user:,registry:,base:,wheels:,wheels-alternate:,wheels-py2:,only:,skip:,prefix:,latest,latest-prefix:,clean,attempts: -- "$@") if [ $? -ne 0 ]; then usage exit 1 @@ -742,8 +690,8 @@ while true; do WHEELS=$2 shift 2 ;; - --wheels-alternate) - WHEELS_ALTERNATE=$2 + --wheels-alternate|--wheels-py2) + WHEELS_PY2=$2 shift 2 ;; --version) @@ -813,10 +761,6 @@ while true; do MAX_ATTEMPTS=$2 shift 2 ;; - --config-file) - CONFIG_FILE=$2 - shift 2 - ;; -h | --help ) usage exit 1 @@ -842,37 +786,80 @@ if [ ${VALID_OS} -ne 0 ]; then exit 1 fi -DEFAULT_CONFIG_FILE="${DEFAULT_CONFIG_FILE_DIR}/${DEFAULT_CONFIG_FILE_PREFIX}-${OS}-${BUILD_STREAM}.cfg" - -# Read additional arguments from config file if it exists. -if [[ -z "$CONFIG_FILE" ]] && [[ -f ${DEFAULT_CONFIG_FILE} ]]; then - CONFIG_FILE=${DEFAULT_CONFIG_FILE} -fi -if [[ ! -z ${CONFIG_FILE} ]]; then - if [[ -f ${CONFIG_FILE} ]]; then - get_args_from_file ${CONFIG_FILE} - else - echo "Config file not found: ${CONFIG_FILE}" - exit 1 - fi -fi - -if [ -z "${WHEELS}" ]; then - echo "Path to wheels tarball must be specified with --wheels option." >&2 - exit 1 -fi - -if [ ${#SERVICES_ALTERNATE[@]} -ne 0 ] && [ -z "${WHEELS_ALTERNATE}" ]; then - echo "Path to wheels-alternate tarball must be specified with --wheels-alternate option"\ - "if python2 based services need to be build!" >&2 - exit 1 -fi - if [ -z "${BASE}" ]; then echo "Base image must be specified with --base option." >&2 exit 1 fi +# Guess WHEELS_PY2 if missing +if [[ -z "$WHEELS_PY2" && -n "$WHEELS" ]]; then + # http://foo/bar.tar?xxx#yyy => http://foo/bar-py2.tar?xxx#yyy + WHEELS_PY2="$(echo "$WHEELS" | sed -r 's,^([^#?]*)(\.tar)(\.gz|\.bz2|\.xz)?([#?].*)?$,\1-py2\2\3\4,i')" + if [[ "$WHEELS" == "$WHEELS_PY2" ]]; then + echo "Unable to guess --wheels-py2, please specify it explicitly" >&2 + exit 1 + fi +fi + +# Find the directives files +IMAGE_BUILD_FILES=() +function find_image_build_files { + local image_build_inc_file image_build_dir image_build_file + local -A all_labels + + for image_build_inc_file in $(find ${GIT_LIST} -maxdepth 1 -name "${OS}_${BUILD_STREAM}_docker_images.inc"); do + basedir=$(dirname ${image_build_inc_file}) + for image_build_dir in $(sed -e 's/#.*//' ${image_build_inc_file} | sort -u); do + for image_build_file in ${basedir}/${image_build_dir}/${OS}/*.${BUILD_STREAM}_docker_image; do + + # reset & read image build directive vars + local BUILDER= + local PROJECT= + local LABEL= + local PYTHON3= + PROJECT="$(source ${image_build_file} && echo ${PROJECT})" + BUILDER="$(source ${image_build_file} && echo ${BUILDER})" + LABEL="$(source ${image_build_file} && echo ${LABEL})" + PYTHON3="$(source ${image_build_file} && echo ${PYTHON3})" + + # make sure labels are unique + if [[ -n "${all_labels["$LABEL"]}" ]] ; then + echo "The following files define the same LABEL $LABEL" >&2 + echo " ${all_labels["$LABEL"]}" >&2 + echo " ${image_build_file}" >&2 + exit 1 + fi + all_labels["$LABEL"]="$image_build_file" + + # skip images we don't want to build + if is_in ${PROJECT} ${SKIP[@]} || is_in ${LABEL} ${SKIP[@]}; then + continue + fi + if ! is_empty ${ONLY[@]} && ! is_in ${PROJECT} ${ONLY[@]} && ! is_in ${LABEL} ${ONLY[@]}; then + continue + fi + + # loci builders require a wheels tarball + if [[ "${BUILDER}" == "loci" ]] ; then + # python3 projects require $WHEELS + if [[ "${PYTHON3}" == "yes" && -z "${WHEELS}" ]] ; then + echo "You are building python3 services with loci, but you didn't specify --wheels!" >&2 + exit 1 + # python2 projects require WHEELS_PY2 + elif [[ "${PYTHON3}" != "yes" && -z "${WHEELS_PY2}" ]] ; then + echo "You are building python2 services with loci, but you didn't specify --wheels-py2!" >&2 + exit 1 + fi + fi + + # Save image build file in the global list + IMAGE_BUILD_FILES+=("$image_build_file") + done + done + done +} +find_image_build_files + IMAGE_TAG="${OS}-${BUILD_STREAM}" IMAGE_TAG_LATEST="${IMAGE_TAG}-latest" @@ -933,15 +920,10 @@ if ! (grep -q rh-python36-mod_wsgi ${WORKDIR}/loci/bindep.txt); then echo 'rh-python36-mod_wsgi [platform:rpm !platform:suse (apache python3)]' >> ${WORKDIR}/loci/bindep.txt fi -# Find the directives files -for image_build_inc_file in $(find ${GIT_LIST} -maxdepth 1 -name "${OS}_${BUILD_STREAM}_docker_images.inc"); do - basedir=$(dirname ${image_build_inc_file}) - for image_build_dir in $(sed -e 's/#.*//' ${image_build_inc_file} | sort -u); do - for image_build_file in ${basedir}/${image_build_dir}/${OS}/*.${BUILD_STREAM}_docker_image; do - # Failures are reported by the build functions - build_image ${image_build_file} - done - done +# Build everything +for image_build_file in "${IMAGE_BUILD_FILES[@]}" ; do + # Failures are reported by the build functions + build_image ${image_build_file} done if [ "${CLEAN}" = "yes" -a ${#RESULTS_BUILT[@]} -gt 0 ]; then diff --git a/build-tools/build-docker-images/docker-image-build-centos-dev.cfg b/build-tools/build-docker-images/docker-image-build-centos-dev.cfg deleted file mode 100644 index 80e5ace3..00000000 --- a/build-tools/build-docker-images/docker-image-build-centos-dev.cfg +++ /dev/null @@ -1,2 +0,0 @@ -services_alternate=stx-fm-rest-api,stx-keystone-api-proxy,stx-nova-api-proxy,stx-platformclients -wheels_alternate=http://mirror.starlingx.cengn.ca/mirror/starlingx/master/centos/stx-centos-py2_dev-wheels.tar diff --git a/build-tools/build-docker-images/docker-image-build-centos-stable.cfg b/build-tools/build-docker-images/docker-image-build-centos-stable.cfg deleted file mode 100644 index 871d2271..00000000 --- a/build-tools/build-docker-images/docker-image-build-centos-stable.cfg +++ /dev/null @@ -1,2 +0,0 @@ -services_alternate=stx-fm-rest-api,stx-keystone-api-proxy,stx-nova-api-proxy,stx-platformclients -wheels_alternate=http://mirror.starlingx.cengn.ca/mirror/starlingx/master/centos/stx-centos-py2_stable-wheels.tar diff --git a/build-tools/build-docker-images/tag-management/image-tags.yaml b/build-tools/build-docker-images/tag-management/image-tags.yaml index 9340cad2..c5d93d1a 100644 --- a/build-tools/build-docker-images/tag-management/image-tags.yaml +++ b/build-tools/build-docker-images/tag-management/image-tags.yaml @@ -2,10 +2,10 @@ --- images: - name: docker.io/starlingx/k8s-cni-sriov - src_build_tag: master-centos-stable-20191203T153530Z.0 - src_ref: https://opendev.org/starlingx/integ/commit/dac417bd31ed36d455e94db4aabe5916367654d4 - # Tag determined based on release tag associated with upstream commit - tag: stx.4.0-v2.2 + src_build_tag: master-centos-stable-20210218T003113Z.0 + src_ref: https://opendev.org/starlingx/integ/commit/eccff3b0e661592084d9114a9a41816761e1f9b5 + # Version determined by running 'git describe --tags' in clone of upstream repo + tag: stx.5.0-v2.6-7-gb18123d8 - name: docker.io/starlingx/k8s-plugins-sriov-network-device src_build_tag: master-centos-stable-20200512T184214Z.0 src_ref: https://opendev.org/starlingx/integ/commit/e2dc5c2dd0042788697ade268ac5c24fe9dc2f8c @@ -37,10 +37,10 @@ images: # Version determined by running 'git describe --tags' in clone of upstream repo tag: stx.4.0-v0.11.0-109-gc48c502 - name: docker.io/starlingx/stx-oidc-client - src_build_tag: master-centos-stable-20200901T001315Z.0 - src_ref: https://opendev.org/starlingx/oidc-auth-armada-app/commit/957fc7c2092c7574a0a931012a0ec1cf5bb66429 - # StarlingX app. Setting version to v1.0.3 - tag: stx.5.0-v1.0.3 + src_build_tag: master-centos-stable-20210119T015305Z.0 + src_ref: https://opendev.org/starlingx/oidc-auth-armada-app/commit/70147e64e910e9878dd5bdf464cfd9672894ba18 + # StarlingX app. Setting version to v1.0.4 + tag: stx.5.0-v1.0.4 - name: docker.io/starlingx/dex src_build_tag: master-centos-stable-20200204T162546Z.0 src_ref: https://opendev.org/starlingx/oidc-auth-armada-app/commit/5d6701bdf214e77f460f2e3dd2b6f7d3186830c8 @@ -55,9 +55,9 @@ images: src_ref: https://opendev.org/starlingx/metal/commit/d46c9c55a9a9b7ea09e8d0fe66c8cfbeeb9ac75f tag: stx.5.0-v1.0.0 - name: docker.io/starlingx/stx-platformclients - src_build_tag: master-centos-stable-20200803T230630Z.0 - src_ref: https://opendev.org/starlingx/distcloud-client/commit/7036f1fd11cd3bbae743aee89908e8195e4ded40 - tag: stx.5.0-v1.4.0 + src_build_tag: master-centos-stable-20210512T053357Z.0 + src_ref: https://opendev.org/starlingx/distcloud-client/commit/d52a9080082db5fda2e77fb9e342f812ea8c17e1 + tag: stx.5.0-v1.4.3 - name: docker.io/starlingx/stx-vault-manager src_build_tag: master-centos-stable-20200722T035334Z.0 src_ref: https://opendev.org/starlingx/vault-armada-app/commit/2cd206d6703cc2733e39ecad4539c0d5f1600550 @@ -68,3 +68,27 @@ images: src_ref: https://opendev.org/starlingx/portieris-armada-app/commit/a6123ffebb77f23d5182576be17e69d62fd8d701 # Tag based on upstream package version tag: stx.5.0-v0.7.0 + - name: docker.io/starlingx/stx-snmp + src_build_tag: master-centos-stable-20210105T023146Z.0 + src_ref: https://opendev.org/starlingx/snmp-armada-app/commit/2b370655a7f9a506ea139cfffa6c466d1a82cce4 + tag: stx.5.0-v1.0.0 + - name: docker.io/starlingx/stx-fm-subagent + src_build_tag: master-centos-stable-20210105T023146Z.0 + src_ref: https://opendev.org/starlingx/snmp-armada-app/commit/2b370655a7f9a506ea139cfffa6c466d1a82cce4 + tag: stx.5.0-v1.0.0 + - name: docker.io/starlingx/stx-fm-trap-subagent + src_build_tag: master-centos-stable-20210314T171252Z.0 + src_ref: https://opendev.org/starlingx/snmp-armada-app/commit/5aca0dd1661bc87a7927c00cf95e0c8aa6f2e2a0 + tag: stx.5.0-v1.0.1 + - name: docker.io/starlingx/notificationservice-base + src_build_tag: master-centos-stable-20210504T193232Z.0 + src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/eb4458e37ebe170f4c1289362f9cbc55fb1f32aa + tag: stx.5.0-v1.0.4 + - name: docker.io/starlingx/locationservice-base + src_build_tag: master-centos-stable-20210204T224209Z.0 + src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/545e6b6bb093235c2f8dab8d171f30c6ae8682d3 + tag: stx.5.0-v1.0.1 + - name: docker.io/starlingx/notificationclient-base + src_build_tag: master-centos-stable-20210503T050004Z.0 + src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/6e87c185baf927b28d0bcff6e2763a1e62c8145e + tag: stx.5.0-v1.0.4 diff --git a/build-tools/build-img b/build-tools/build-img index fb71bd1d..e0f22f86 100755 --- a/build-tools/build-img +++ b/build-tools/build-img @@ -566,7 +566,7 @@ if [ ${#RPM_ADDON_LIST[@]} -gt 0 ] ; then pushd $MY_WORKSPACE patch_file="PATCH.img-addon" patched_iso="$TEMPFILES_DIR/bootimage_${AUTO_MODE}${GRAPHICAL_SUFFIX}_patched.iso" - cmd=("$PATCH_BUILD" --id "${patch_file}" --summary "additional packages for qcow2 image" --desc "Adds customizations to qcow2 image") + cmd=("$PATCH_BUILD" --id "${patch_file}" --summary "additional packages for qcow2 image" --desc "Adds customizations to qcow2 image" --status "REL" --reboot-required "N") for rpm_addon in "${RPM_ADDON_LIST[@]}"; do cmd+=(--all-nodes "${rpm_addon}") done diff --git a/build-tools/build-rpms-parallel b/build-tools/build-rpms-parallel index e2830c24..d3eea32b 100755 --- a/build-tools/build-rpms-parallel +++ b/build-tools/build-rpms-parallel @@ -122,6 +122,10 @@ number_of_users () { users | tr ' ' '\n' | sort --uniq | wc -l } +total_mem_gb () { + free -g | grep 'Mem:' | awk '{ print $2 }' +} + available_mem_gb () { free -g | grep 'Mem:' | awk '{ print $7 }' } @@ -238,26 +242,41 @@ compute_resources () { local users=$(number_of_users) if [ $users -lt 1 ]; then users=1; fi local mem=$(available_mem_gb) + local total_mem=$(total_mem_gb) local disk=$(available_disk_gb) local cpus=$(number_of_cpus) local num_users=$(sqrt $users) local num_build=$(number_of_builds_in_progress) num_build=$((num_build+1)) - echo "compute_resources: total: cpus=$cpus, mem=$mem, disk=$disk, weight=$weight, num_build=$num_build" + echo "compute_resources: total: cpus=$cpus, total_mem=$total_mem, avail_mem=$mem, disk=$disk, weight=$weight, num_build=$num_build" # What fraction of the machine will we use local share_factor=$num_users if [ $share_factor -gt $((MAX_SHARE_FACTOR+num_build-1)) ]; then share_factor=$((MAX_SHARE_FACTOR+num_build-1)); fi if [ $share_factor -lt $num_build ]; then share_factor=$num_build; fi - local mem_share_factor=$((share_factor-num_build)) + + # What fraction of free memory can we use. + # e.g. + # We intend to support 4 concurrent builds (share_factor) + # Two builds (excluding ours) are already underway (num_build-1) + # So we should be able to support 2 more builds (mem_share_factor) + local mem_share_factor=$((share_factor-(num_build-1))) if [ $mem_share_factor -lt 1 ]; then mem_share_factor=1; fi + echo "compute_resources: share_factor=$share_factor mem_share_factor=$mem_share_factor" # What resources are we permitted to use + # Continuing the example from above ... memory share is the lesser of + # - Half the available memory (mem/mem_share_factor) + # - A quarter of the total memory (total_mem/share_factor) local mem_share=$(((mem-MEMORY_RESERVE)/mem_share_factor)) if [ $mem_share -lt 0 ]; then mem_share=0; fi + local total_mem_share=$(((total_mem-MEMORY_RESERVE)/share_factor)) + if [ $total_mem_share -lt 0 ]; then total_mem_share=0; fi + if [ $mem_share -gt $total_mem_share ]; then mem_share=$total_mem_share; fi local disk_share=$((disk/share_factor)) local cpus_share=$((cpus/share_factor)) + echo "compute_resources: our share: cpus=$cpus_share, mem=$mem_share, disk=$disk_share" # How many build jobs, how many jobs will use tmpfs, and how much mem for each tmpfs @@ -293,7 +312,7 @@ compute_resources () { fi done - # Our output is saved in environmnet variables + # Our output is saved in environment variables MOCKCHAIN_RESOURCE_ALLOCATION=$(echo $x | sed 's#^:##') MAX_WORKERS=$workers echo "compute_resources: MAX_WORKERS=$MAX_WORKERS, MOCKCHAIN_RESOURCE_ALLOCATION=$MOCKCHAIN_RESOURCE_ALLOCATION" @@ -654,7 +673,7 @@ kill_descendents () local relevant_recursive_children="$ME" local relevant_recursive_promote_children="mock" - local relevant_other_children="mockchain-parallel mockchain-parallel-1.3.4 mockchain-parallel-1.4.16" + local relevant_other_children="mockchain-parallel mockchain-parallel-1.3.4 mockchain-parallel-1.4.16 mockchain-parallel-2.6 mockchain-parallel-2.7" local recursive_promote_children=$(for relevant_child in $relevant_recursive_promote_children; do pgrep -P $kill_pid $relevant_child; done) local recursive_children=$(for relevant_child in $relevant_recursive_children; do pgrep -P $kill_pid $relevant_child; done) @@ -1181,14 +1200,24 @@ mock_clean_metadata_cfg () { return 1 fi - CMD=$((cat $CFG; \ - grep config_opts\\[\'yum.conf\'\\\] $CFG | \ - sed 's#\\n#\n#g') | \ - grep '^[[]' | \ - grep -v main | \ - sed -e 's/[][]//g' -e "s#^#${PKG_MANAGER} --enablerepo=#" -e 's#$# clean metadata#' | \ - sort -u | \ - tr '\n' ';') + # + # From mock config, extract the embedded yum/dnf config. + # Then extract the repo definitions, + # and convert to a series of yum commands to clean the + # metadata one repo at a time. e.g. + # CMD="yum --disablerepo=* --enablerepo=StxCentos7Distro clean metadata; \ + # yum --disablerepo=* --enablerepo=StxCentos7Distro-rt clean metadata; + # ... + # " + # + CMD=$((grep -e config_opts\\[\'yum.conf\'\\\] $CFG \ + -e config_opts\\[\'dnf.conf\'\\\] $CFG | \ + sed 's#\\n#\n#g') | \ + grep '^[[]' | \ + grep -v main | \ + sed -e 's/[][]//g' -e "s#^#${PKG_MANAGER} --disablerepo=* --enablerepo=#" -e 's#$# clean metadata#' | \ + sort -u | \ + tr '\n' ';') echo "$MOCK --root $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --chroot "bash -c '($CMD)'" &>>$TMP RC=$? @@ -2338,6 +2367,7 @@ if [ $CAREFUL -eq 1 ]; then CMD_OPTIONS="$MOCK_PASSTHROUGH --no-cleanup-after" fi +CMD_OPTIONS+=" $MOCK_PASSTHROUGH --enable-plugin=package_state" CMD_OPTIONS+=" --log=$MOCKCHAIN_LOG" echo "CAREFUL=$CAREFUL" diff --git a/build-tools/build-rpms-serial b/build-tools/build-rpms-serial index c55b5eed..60a91d2b 100755 --- a/build-tools/build-rpms-serial +++ b/build-tools/build-rpms-serial @@ -25,7 +25,14 @@ export ME=$(basename "$0") CMDLINE="$ME $@" +BUILD_RPMS_PARALLEL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )" +# Set PKG_MANAGER for our build environment. +source "${BUILD_RPMS_PARALLEL_DIR}/pkg-manager-utils.sh" + + +# Build for distribution. Currently 'centos' is only supported value. +export DISTRO="centos" CREATEREPO=$(which createrepo_c) if [ $? -ne 0 ]; then @@ -42,6 +49,7 @@ if [ ! -d ${LOCAL_REPO} ]; then fi fi +# Make sure we have a dependency cache DEPENDANCY_DIR="${LOCAL_REPO}/dependancy-cache" SRPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-direct-requires" SRPM_TRANSITIVE_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-transitive-requires" @@ -118,7 +126,7 @@ create-no-clean-list () { local g for g in $install_groups; do - # Find manditory packages in the group. + # Find mandatory packages in the group. # Discard anything before (and including) 'Mandatory Packages:' # and anything after (and including) 'Optional Packages:'. # Also discard leading spaces or '+' characters. @@ -135,7 +143,7 @@ create-no-clean-list () { while [ $noclean_list_len -gt $noclean_last_list_len ]; do noclean_last_list_len=$noclean_list_len - noclean_list=$( (dnf -c $MY_YUM_CONF deplist $noclean_list 2>> /dev/null | grep provider: | awk '{ print $2 }' | awk -F . '{ print $1 }'; for p in $noclean_list; do echo $p; done) | sort --uniq) + noclean_list=$( (${PKG_MANAGER} -c $MY_YUM_CONF deplist $noclean_list 2>> /dev/null | grep provider: | awk '{ print $2 }' | awk -F . '{ print $1 }'; for p in $noclean_list; do echo $p; done) | sort --uniq) noclean_list_len=$(echo $noclean_list | wc -w) done @@ -475,7 +483,7 @@ kill_descendents () local relevant_recursive_children="$ME" local relevant_recursive_promote_children="mock" - local relevant_other_children="mockchain-parallel" + local relevant_other_children="mockchain-parallel mockchain-parallel-1.3.4 mockchain-parallel-1.4.16 mockchain-parallel-2.6 mockchain-parallel-2.7" local recursive_promote_children=$(for relevant_child in $relevant_recursive_promote_children; do pgrep -P $kill_pid $relevant_child; done) local recursive_children=$(for relevant_child in $relevant_recursive_children; do pgrep -P $kill_pid $relevant_child; done) @@ -964,7 +972,24 @@ mock_clean_metadata_cfg () { return 1 fi - CMD=$((cat $CFG; grep config_opts\\[\'yum.conf\'\\\] $CFG | sed 's#\\n#\n#g') | grep '^[[]' | grep -v main | sed 's/[][]//g' | sed 's#^#yum --enablerepo=#' | sed 's#$# clean metadata#' | sort -u | tr '\n' ';') + # + # From mock config, extract the embedded yum/dnf config. + # Then extract the repo definitions, + # and convert to a series of yum commands to clean the + # metadata one repo at a time. e.g. + # CMD="yum --disablerepo=* --enablerepo=StxCentos7Distro clean metadata; \ + # yum --disablerepo=* --enablerepo=StxCentos7Distro-rt clean metadata; + # ... + # " + # + CMD=$((grep -e config_opts\\[\'yum.conf\'\\\] $CFG \ + -e config_opts\\[\'dnf.conf\'\\\] $CFG | \ + sed 's#\\n#\n#g') | \ + grep '^[[]' | \ + grep -v main | \ + sed -e 's/[][]//g' -e "s#^#${PKG_MANAGER} --disablerepo=* --enablerepo=#" -e 's#$# clean metadata#' | \ + sort -u | \ + tr '\n' ';') echo "$MOCK --root $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --chroot "bash -c '($CMD)'" &>>$TMP RC=$? @@ -1129,6 +1154,7 @@ clean_yum_cache_cfg () { return $RC } + clean_yum_cache () { echo "${FUNCNAME[0]}: in" clean_yum_cache_cfg $BUILD_CFG @@ -1249,7 +1275,6 @@ while true ; do esac done - # Reset variables if [ -n "$MY_WORKSPACE" ]; then export MY_WORKSPACE_TOP=${MY_WORKSPACE_TOP:-$MY_WORKSPACE} diff --git a/build-tools/build-wheels/build-base-wheels.sh b/build-tools/build-wheels/build-base-wheels.sh index 4ab913c1..b0274f22 100755 --- a/build-tools/build-wheels/build-base-wheels.sh +++ b/build-tools/build-wheels/build-base-wheels.sh @@ -114,8 +114,6 @@ while true; do esac done -BUILD_OUTPUT_PATH=${MY_WORKSPACE}/std/build-wheels-${OS}-${BUILD_STREAM}/base - BUILD_IMAGE_NAME="${USER}-$(basename ${MY_WORKSPACE})-wheelbuilder:${OS}-${BUILD_STREAM}" # BUILD_IMAGE_NAME can't have caps if it's passed to docker build -t $BUILD_IMAGE_NAME. @@ -123,7 +121,6 @@ BUILD_IMAGE_NAME="${USER}-$(basename ${MY_WORKSPACE})-wheelbuilder:${OS}-${BUILD BUILD_IMAGE_NAME="${BUILD_IMAGE_NAME,,}" DOCKER_FILE=${DOCKER_PATH}/${OS}-dockerfile -WHEELS_CFG=${DOCKER_PATH}/${BUILD_STREAM}-wheels.cfg function supported_os_list { for f in ${DOCKER_PATH}/*-dockerfile; do @@ -137,40 +134,75 @@ if [ ! -f ${DOCKER_FILE} ]; then exit 1 fi -if [ ! -f ${WHEELS_CFG} ]; then - echo "Required file does not exist: ${WHEELS_CFG}" >&2 - exit 1 -fi +# Print a loud message +function notice { + ( + set +x + echo + echo ====================================== + for s in "$@" ; do + echo "$s" + done + echo ====================================== + echo + ) 2>&1 +} -# -# Check build output directory for unexpected files, -# ie. wheels from old builds that are no longer in wheels.cfg -# -if [ -d ${BUILD_OUTPUT_PATH} ]; then +# prefix each line of a command's output +# also redirects command's STDERR to STDOUT +log_prefix() { + local prefix="$1" ; shift + "$@" 2>&1 | awk -v prefix="$prefix" '{print prefix $0}' + # return false if the command (rather than awk) failed + [ ${PIPESTATUS[0]} -eq 0 ] +} - for f in ${BUILD_OUTPUT_PATH}/*; do - grep -q "^$(basename $f)|" ${WHEELS_CFG} - if [ $? -ne 0 ]; then - echo "Deleting stale file: $f" - rm -f $f - fi - done -else - mkdir -p ${BUILD_OUTPUT_PATH} - if [ $? -ne 0 ]; then - echo "Failed to create directory: ${BUILD_OUTPUT_PATH}" >&2 + +# Make sure a file exists, exit otherwise +function require_file { + if [ ! -f "${1}" ]; then + echo "Required file does not exist: ${1}" >&2 exit 1 fi -fi +} -# Check to see if we need to build anything -BUILD_NEEDED=no -for wheel in $(cat ${WHEELS_CFG} | sed 's/#.*//' | awk -F '|' '{print $1}'); do - if [[ "${wheel}" =~ \* || ! -f ${BUILD_OUTPUT_PATH}/${wheel} ]]; then - BUILD_NEEDED=yes - break +# Check build output directory for unexpected files, +# ie. wheels from old builds that are no longer in wheels.cfg +function prepare_output_dir { + local output_dir="$1" + local wheels_cfg="$2" + if [ -d ${output_dir} ]; then + local f + for f in ${output_dir}/*; do + if [ -f $f ] ; then + grep -q "^$(basename $f)|" ${wheels_cfg} + if [ $? -ne 0 ]; then + echo "Deleting stale file: $f" + rm -f $f + fi + fi + done + else + mkdir -p ${output_dir} + if [ $? -ne 0 ]; then + echo "Failed to create directory: ${output_dir}" >&2 + exit 1 + fi fi -done +} + +BUILD_OUTPUT_PATH=${MY_WORKSPACE}/std/build-wheels-${OS}-${BUILD_STREAM}/base +BUILD_OUTPUT_PATH_PY2=${MY_WORKSPACE}/std/build-wheels-${OS}-${BUILD_STREAM}/base-py2 +WHEELS_CFG=${DOCKER_PATH}/${BUILD_STREAM}-wheels.cfg +WHEELS_CFG_PY2=${DOCKER_PATH}/${BUILD_STREAM}-wheels-py2.cfg + +# make sure .cfg files exist +require_file "${WHEELS_CFG}" +require_file "${WHEELS_CFG_PY2}" + +# prepare output directories +prepare_output_dir "${BUILD_OUTPUT_PATH}" "${WHEELS_CFG}" +prepare_output_dir "${BUILD_OUTPUT_PATH_PY2}" "${WHEELS_CFG_PY2}" if [ "${BUILD_STREAM}" = "dev" -o "${BUILD_STREAM}" = "master" ]; then # Download the master wheel from loci, so we're only building pieces not covered by it @@ -194,16 +226,30 @@ if [ "${BUILD_STREAM}" = "dev" -o "${BUILD_STREAM}" = "master" ]; then docker run --name ${USER}_inspect_wheels ${MASTER_WHEELS_IMAGE} noop 2>/dev/null echo "Extracting wheels from ${MASTER_WHEELS_IMAGE}" - docker export ${USER}_inspect_wheels | tar x -C ${BUILD_OUTPUT_PATH} '*.whl' + rm -rf "${BUILD_OUTPUT_PATH}-loci" + mkdir -p "$BUILD_OUTPUT_PATH-loci" + docker export ${USER}_inspect_wheels | tar x -C "${BUILD_OUTPUT_PATH}-loci" '*.whl' if [ ${PIPESTATUS[0]} -ne 0 -o ${PIPESTATUS[1]} -ne 0 ]; then echo "Failed to extract wheels from ${MASTER_WHEELS_IMAGE}" >&2 docker rm ${USER}_inspect_wheels if [ ${MASTER_WHEELS_PRESENT} -ne 0 ]; then docker image rm ${MASTER_WHEELS_IMAGE} fi + rm -rf "${BUILD_OUTPUT_PATH}-loci" exit 1 fi + # copy loci wheels in base and base-py2 directories + if ! cp "${BUILD_OUTPUT_PATH}-loci"/*.whl "${BUILD_OUTPUT_PATH}"/ ; then + echo "Failed to copy wheels to ${BUILD_OPUTPUT_PATH}" >&2 + exit 1 + fi + if ! cp "${BUILD_OUTPUT_PATH}-loci"/*.whl "${BUILD_OUTPUT_PATH_PY2}"/ ; then + echo "Failed to copy wheels to ${BUILD_OPUTPUT_PATH_PY2}" >&2 + exit 1 + fi + rm -rf "${BUILD_OUTPUT_PATH}-loci" + docker rm ${USER}_inspect_wheels if [ ${MASTER_WHEELS_PRESENT} -ne 0 ]; then @@ -211,7 +257,21 @@ if [ "${BUILD_STREAM}" = "dev" -o "${BUILD_STREAM}" = "master" ]; then fi fi -if [ "${BUILD_NEEDED}" = "no" ]; then +# check if there are any wheels missing +function all_wheels_exist { + local output_dir="$1" + local wheels_cfg="$2" + local wheel + for wheel in $(cat "${wheels_cfg}" | sed 's/#.*//' | awk -F '|' '{print $1}'); do + if [[ "${wheel}" =~ \* || ! -f ${output_dir}/${wheel} ]]; then + return 1 + fi + done + return 0 +} + +if all_wheels_exist "${BUILD_OUTPUT_PATH}" "${WHEELS_CFG}" && \ + all_wheels_exist "${BUILD_OUTPUT_PATH_PY2}" "${WHEELS_CFG_PY2}" ; then echo "All base wheels are already present. Skipping build." exit 0 fi @@ -247,12 +307,10 @@ if [ $? -ne 0 ]; then fi # Run the image, executing the build-wheel.sh script -RM_OPT= -if [ "${KEEP_CONTAINER}" = "no" ]; then - RM_OPT="--rm" -fi - declare -a RUN_ARGS +if [ "${KEEP_CONTAINER}" = "no" ]; then + RUN_ARGS+=(--rm) +fi if [ ! -z "$HTTP_PROXY" ]; then RUN_ARGS+=(--env http_proxy=$HTTP_PROXY) fi @@ -262,11 +320,23 @@ fi if [ ! -z "$NO_PROXY" ]; then RUN_ARGS+=(--env no_proxy=$NO_PROXY) fi - -RUN_ARGS+=(${RM_OPT} -v ${BUILD_OUTPUT_PATH}:/wheels ${BUILD_IMAGE_NAME} /docker-build-wheel.sh) +RUN_ARGS+=(--env DISPLAY_RESULT=no) # Run container to build wheels -with_retries ${MAX_ATTEMPTS} docker run ${RUN_ARGS[@]} +rm -f ${BUILD_OUTPUT_PATH}/failed.lst +rm -f ${BUILD_OUTPUT_PATH_PY2}/failed.lst + +notice "building python3 wheels" +log_prefix "[python3] " \ + with_retries ${MAX_ATTEMPTS} \ + docker run ${RUN_ARGS[@]} -v ${BUILD_OUTPUT_PATH}:/wheels ${BUILD_IMAGE_NAME} /docker-build-wheel.sh +BUILD_STATUS=$? + +notice "building python2 wheels" +log_prefix "[python2] " \ + with_retries ${MAX_ATTEMPTS} \ + docker run ${RUN_ARGS[@]} -v ${BUILD_OUTPUT_PATH_PY2}:/wheels --env PYTHON=python2 ${BUILD_IMAGE_NAME} /docker-build-wheel.sh +BUILD_STATUS_PY2=$? if [ "${KEEP_IMAGE}" = "no" ]; then # Delete the builder image @@ -287,8 +357,52 @@ if [ "${KEEP_IMAGE}" = "no" ]; then fi # Check for failures -if [ -f ${BUILD_OUTPUT_PATH}/failed.lst ]; then - # Failures would already have been reported +check_result() { + local python="$1" + local status="$2" + local dir="$3" + + # There's a failed images list + if [ -f "${dir}/failed.lst" ]; then + let failures=$(cat "${dir}/failed.lst" | wc -l) + + cat <&2 <&2 exit 1 fi -with_retries ${MAX_ATTEMPTS} wget https://raw.githubusercontent.com/openstack/requirements/${OPENSTACK_BRANCH}/upper-constraints.txt +with_retries ${MAX_ATTEMPTS} wget "${OPENSTACK_REQ_URL}/upper-constraints.txt" if [ $? -ne 0 ]; then echo "Failed to download upper-constraints.txt" >&2 exit 1 @@ -230,7 +259,7 @@ done shopt -s nullglob # Copy the base and stx wheels, updating upper-constraints.txt as necessary -for wheel in ../base/*.whl ../stx/wheels/*.whl; do +for wheel in ../base${PY_SUFFIX}/*.whl ../stx/wheels/*.whl; do # Get the wheel name and version from the METADATA METADATA=$(unzip -p ${wheel} '*/METADATA') name=$(echo "${METADATA}" | grep '^Name:' | awk '{print $2}') diff --git a/build-tools/build-wheels/docker/centos-dockerfile b/build-tools/build-wheels/docker/centos-dockerfile index c2dd825f..170a71a9 100644 --- a/build-tools/build-wheels/docker/centos-dockerfile +++ b/build-tools/build-wheels/docker/centos-dockerfile @@ -20,3 +20,10 @@ RUN set -ex ;\ COPY docker-build-wheel.sh / COPY ${BUILD_STREAM}-wheels.cfg /wheels.cfg +# Python2 packages +RUN set -ex; \ + yum -y install python python-devel ;\ + wget https://bootstrap.pypa.io/pip/2.7/get-pip.py ;\ + python get-pip.py +COPY ${BUILD_STREAM}-wheels-py2.cfg /wheels-py2.cfg + diff --git a/build-tools/build-wheels/docker/dev-wheels-py2.cfg b/build-tools/build-wheels/docker/dev-wheels-py2.cfg new file mode 100644 index 00000000..244178d8 --- /dev/null +++ b/build-tools/build-wheels/docker/dev-wheels-py2.cfg @@ -0,0 +1,18 @@ +# +# git: wheelname|git|git-source|basedir|branch +# tar: wheelname|tar|wget-source|basedir +# pypi: wheelname|pypi|wget-source +# zip: wheelname|zip|wget-source|basedir +# +# If fix_setup must be called, add |fix_setup at the end of the line +# +amqplib-1.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2 +lz4-0.9.0-cp27-cp27mu-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0 +panko-5.0.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0 +google_api_python_client-1.7.7-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/d7/47/940908e52487440f61fb93ad55cbbe3a28235d3bb143b26affb17b37dd28/google_api_python_client-1.7.7-py2.py3-none-any.whl +neutron_lib-*.whl|git|https://github.com/openstack/neutron-lib|neutron-lib|master +python_openstackclient-*.whl|git|https://github.com/openstack/python-openstackclient|python-openstackclient|master +openstacksdk-*.whl|git|https://github.com/openstack/openstacksdk|openstacksdk|master +networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl +croniter-0.3.29-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a9/c9/11182a2507798c661b04a7914739ea8ca73a738e6869a23742029f51bc1a/croniter-0.3.29-py2.py3-none-any.whl +pecan-1.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3 diff --git a/build-tools/build-wheels/docker/dev-wheels.cfg b/build-tools/build-wheels/docker/dev-wheels.cfg index 0bf19816..2e0d7dad 100644 --- a/build-tools/build-wheels/docker/dev-wheels.cfg +++ b/build-tools/build-wheels/docker/dev-wheels.cfg @@ -6,13 +6,14 @@ # # If fix_setup must be called, add |fix_setup at the end of the line # -amqplib-1.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2 -lz4-0.9.0-cp27-none-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0 -panko-5.0.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0 +amqplib-1.0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2 +croniter-0.3.29-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a9/c9/11182a2507798c661b04a7914739ea8ca73a738e6869a23742029f51bc1a/croniter-0.3.29-py2.py3-none-any.whl google_api_python_client-1.7.7-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/d7/47/940908e52487440f61fb93ad55cbbe3a28235d3bb143b26affb17b37dd28/google_api_python_client-1.7.7-py2.py3-none-any.whl +lz4-0.9.0-cp36-cp36m-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0 +networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl neutron_lib-*.whl|git|https://github.com/openstack/neutron-lib|neutron-lib|master python_openstackclient-*.whl|git|https://github.com/openstack/python-openstackclient|python-openstackclient|master openstacksdk-*.whl|git|https://github.com/openstack/openstacksdk|openstacksdk|master -networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl -croniter-0.3.29-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a9/c9/11182a2507798c661b04a7914739ea8ca73a738e6869a23742029f51bc1a/croniter-0.3.29-py2.py3-none-any.whl -pecan-1.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3 +panko-5.0.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0 +pecan-1.3.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3 + diff --git a/build-tools/build-wheels/docker/docker-build-wheel.sh b/build-tools/build-wheels/docker/docker-build-wheel.sh index 9da8b84f..59e752a4 100755 --- a/build-tools/build-wheels/docker/docker-build-wheel.sh +++ b/build-tools/build-wheels/docker/docker-build-wheel.sh @@ -10,8 +10,14 @@ CFGFILE=/wheels.cfg OUTPUTDIR=/wheels -FAILED_LOG=$OUTPUTDIR/failed.lst +FAILED_LOG="${OUTPUTDIR}/failed.lst" +: ${DISPLAY_RESULT=yes} declare -i MAX_ATTEMPTS=5 +: ${PYTHON=python3} +if [[ "${PYTHON}" == "python2" ]] ; then + CFGFILE=/wheels-py2.cfg + FAILED_LOG="${OUTPUTDIR}/failed-py2.lst" +fi # # Function to log the start of a build @@ -184,7 +190,7 @@ function from_git { fi # Build the wheel - python3 setup.py bdist_wheel + ${PYTHON} setup.py bdist_wheel if [ -f dist/$wheelname ]; then cp dist/$wheelname $OUTPUTDIR || echo $wheelname >> $FAILED_LOG else @@ -244,7 +250,7 @@ function from_tar { fi # Build the wheel - python3 setup.py bdist_wheel + ${PYTHON} setup.py bdist_wheel if [ -f dist/$wheelname ]; then cp dist/$wheelname $OUTPUTDIR || echo $wheelname >> $FAILED_LOG else @@ -295,7 +301,7 @@ function from_zip { fi # Build the wheel - python3 setup.py bdist_wheel + ${PYTHON} setup.py bdist_wheel if [ -f dist/$wheelname ]; then cp dist/$wheelname $OUTPUTDIR || echo $wheelname >> $FAILED_LOG else @@ -339,24 +345,28 @@ from_tar from_zip from_pypi -if [ -f $FAILED_LOG ]; then - let failures=$(cat $FAILED_LOG | wc -l) +if [ -f "${FAILED_LOG}" ]; then + if [ "${DISPLAY_RESULT}" = yes ] ; then + let failures=$(cat "${FAILED_LOG}" | wc -l) - cat < "$MY_YUM_CONF" sed -i "s%\[main\]%&\ncachedir=$YUM_CACHE%" "$MY_YUM_CONF" sed -i "s%logfile=.*%logfile=$YUM_DIR/yum.log%" "$MY_YUM_CONF" + # eg: LOCAL_BASE/MY_BUILD_DIR => file:///MY_BUILD_DIR sed -i "s%LOCAL_BASE%file://%g" "$MY_YUM_CONF" sed -i "s%MIRROR_BASE%file:///import/mirrors%g" "$MY_YUM_CONF" sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g" "$MY_YUM_CONF" + # eg: file:///MY_BUILD_DIR => file:///localdisk/loadbuild/... sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR%g" "$MY_YUM_CONF" sed -i "s%/MY_REPO_DIR%$MY_REPO%g" "$MY_YUM_CONF" + # eg = MY_BUILD_DIR/xyz => /localdisk/loadbuild/.../xyz + sed -i "s%MY_BUILD_DIR%$MY_BUILD_DIR%g" "$MY_YUM_CONF" + sed -i "s%MY_REPO_DIR%$MY_REPO%g" "$MY_YUM_CONF" else echo "ERROR: Could not find yum.conf or MOCK_CFG_PROTO" exit 1 diff --git a/build-tools/modify-build-cfg b/build-tools/modify-build-cfg index 78658623..6c273f79 100755 --- a/build-tools/modify-build-cfg +++ b/build-tools/modify-build-cfg @@ -83,11 +83,16 @@ if [ ! -f $FILE ]; then exit 1 fi + # eg: LOCAL_BASE/MY_BUILD_DIR => http://127.0.0.1:8088/MY_BUILD_DIR sed -i "s%LOCAL_BASE%http://127.0.0.1:8088%g" "$FILE" sed -i "s%MIRROR_BASE%http://127.0.0.1:8088%g" "$FILE" sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g" "$FILE" + # eg http://127.0.0.1:8088/MY_BUILD_DIR => http://12.0.0.1:8088/localdisk/loadbuild/... sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR_TOP%g" "$FILE" sed -i "s%/MY_REPO_DIR%$MY_REPO%g" "$FILE" + # eg = MY_BUILD_DIR/xyz => /localdisk/loadbuild/.../xyz + sed -i "s%MY_BUILD_DIR%$MY_BUILD_DIR_TOP%g" "$FILE" + sed -i "s%MY_REPO_DIR%$MY_REPO%g" "$FILE" # Disable all local-* repos for the build-types other than the current one for bt in std rt; do diff --git a/build-tools/repo-utils.sh b/build-tools/repo-utils.sh index 9514b0e9..49f3d8eb 100644 --- a/build-tools/repo-utils.sh +++ b/build-tools/repo-utils.sh @@ -140,6 +140,50 @@ repo_is_project () { } +# +# manifest_get_revision_of_project +# +# Extract the revision of a project within the manifest. +# The default revision is supplied in the absence +# of an explicit project revision. +# +# manifest = Path to manifest. +# project-name = name of project. +# +manifest_get_revision_of_project () { + local manifest="${1}" + local project="${2}" + + local default_revision="" + local revision="" + + default_revision=$(manifest_get_default_revision "${manifest}") + revision=$(grep ' +# +# Extract the default revision of the manifest, if any. +# +# manifest = Path to manifest. +# +manifest_get_default_revision () { + local manifest="${1}" + + grep ' # @@ -149,8 +193,10 @@ repo_is_project () { # revision = A branch, tag ,or sha. Branch and SHA can be used # directly, but repo requires that a tag be in the form # "refs/tags/". -# lock_down = 0 or 1. If 1, set a revision on all other non-listed +# lock_down = 0,1 or 2. If 2, set a revision on all other non-listed # projects to equal the SHA of the current git head. +# If 1, similar to 2, but only if the project doesn't have +# some other form of revision specified. # project-list = A space seperated list of projects. Listed projects # will have their revision set to the provided revision # value. @@ -160,9 +206,11 @@ manifest_set_revision () { local new_manifest="${2}" local revision="${3}" local lock_down="${4}" - shift 4 + local set_default="${5}" + shift 5 local projects="${@}" + local old_default_revision="" local repo_root_dir="" local line="" local FOUND=0 @@ -192,11 +240,32 @@ manifest_set_revision () { return 1 fi + old_default_revision=$(manifest_get_default_revision "${old_manifest}") + if [ ${set_default} -eq 1 ] && [ "${old_default_revision}" == "" ]; then + # We only know how to alter an existing default revision, not set a + # new one, so continue without setting a default. + set_default=0 + fi + while IFS= read -r line; do echo "${line}" | grep -q '&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + if echo "$URL" | grep -q '[:][/][/]' ;then + echo "$URL" | sed 's#^\(.*\)://.*$#\1#' + else + echo "http" + fi + return 0 +} + +url_login () { + local URL="$1" + + if [ "$URL" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + echo "$URL" | sed 's#^.*://\([^/]*\)/.*$#\1#' + return 0 +} + +url_user () { + local URL="$1" + local LOGIN + + if [ "$URL" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + url_login "$URL" | sed -e '/@/! s#.*## ; s#\([^@]*\)@.*#\1#' + if [ ${PIPESTATUS[0]} -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): url_login failed" + return 1 + fi + + return 0 +} + +url_port () { + local URL="$1" + local LOGIN + + if [ "$URL" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + url_login "$URL" | sed -e '/:/! s#.*## ; s#[^:]*:\([^:]*\)#\1#' + if [ ${PIPESTATUS[0]} -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): url_login failed" + return 1 + fi + + return 0 +} + +url_server () { + local URL="$1" + local LOGIN + + if [ "$URL" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + url_login "$URL" | sed 's#^.*@## ; s#:.*$##' + if [ ${PIPESTATUS[0]} -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): url_login failed" + return 1 + fi + + return 0 +} + +url_path () { + local URL="$1" + + if [ "$URL" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + echo "$URL" | sed 's#^.*://[^/]*/\(.*\)$#\1#' + return 0 +} + +# +# url_path_to_fs_path: +# +# Convert url format path to file system format. +# e.g. replace %20 with ' '. +# +# Note: Does NOT test the output path to ensure there are +# no illegal file system characters. +# +url_path_to_fs_path () { + local INPUT_PATH="$1" + local TEMP + + if [ "$INPUT_PATH" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + # Deviate from URI spec by not substituding '+' with ' '. + # It would alias '%20' and we need unique mappings. + # TEMP="${INPUT_PATH//+/ }" + + TEMP="$INPUT_PATH" + printf '%b' "${TEMP//%/\\x}" + return 0 +} + +# +# fs_path_to_url_path: +# +# Convert file system format path to url format. +# e.g. replace ' ' with %20. +# +fs_path_to_url_path () { + local INPUT_PATH="$1" + local LENGTH + local POS + local CHAR + + if [ "$INPUT_PATH" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + LENGTH="${#INPUT_PATH}" + for (( POS = 0; POS < LENGTH; POS++ )); do + CHAR="${1:POS:1}" + case $CHAR in + [/a-zA-Z0-9.~_-]) + # Reference https://metacpan.org/pod/URI::Escape + printf "$CHAR" + ;; + *) + printf '%%%02X' "'$CHAR" + ;; + esac + done + + return 0 +} + +# +# normalize_path: +# +# 1) replace // with / +# 2) replace /./ with / +# 3) Remove trailing / +# 4) Remove leading ./ +# + +normalize_path () { + local INPUT_PATH="$1" + + if [ "$INPUT_PATH" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + echo "$INPUT_PATH" | sed 's#[/]\+#/#g ; s#[/][.][/]#/#g ; s#/$## ; s#^[.]/##' + return 0 +} + + +# +# repo_url_to_sub_path: +# +repo_url_to_sub_path () { + local URL="$1" + local FAMILY="" + local SERVER="" + local URL_PATH="" + local FS_PATH="" + + if [ "$URL" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + # set FAMILY from URL + echo $URL | grep -q 'centos[.]org' && FAMILY=centos + echo $URL | grep -q 'fedoraproject[.]org[/]pub[/]epel' && FAMILY=epel + + SERVER=$(url_server "$URL") + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): url_server '$URL'" + return 1 + fi + + URL_PATH="$(url_path "$URL")" + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): url_path '$URL'" + return 1 + fi + + FS_PATH="$(url_path_to_fs_path "$URL_PATH")" + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): url_path_to_fs_path '$URL_PATH'" + return 1 + fi + + FS_PATH="$(normalize_path "$FS_PATH")" + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): normalize_path '$FS_PATH'" + return 1 + fi + + normalize_path "./$FAMILY/$SERVER/$FS_PATH" + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): normalize_path './$FAMILY/$SERVER/$FS_PATH'" + return 1 + fi + + return 0 +} + +CENGN_PROTOCOL="http" +CENGN_HOST="mirror.starlingx.cengn.ca" +CENGN_PORT="80" +CENGN_URL_ROOT="mirror" + +url_to_stx_mirror_url () { + local URL="$1" + local DISTRO="$2" + local URL_PATH="" + local FS_PATH="" + + if [ "$URL" == "" ] || [ "$DISTRO" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + FS_PATH="$(repo_url_to_sub_path "$URL")" + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): repo_url_to_sub_path '$URL'" + return 1 + fi + + URL_PATH=$(fs_path_to_url_path "$FS_PATH") + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): fs_path_to_url_path '$FS_PATH'" + return 1 + fi + + echo "$CENGN_PROTOCOL://$CENGN_HOST:$CENGN_PORT/$CENGN_URL_ROOT/$DISTRO/$URL_PATH" + return 0 +} diff --git a/build-tools/yum-builddep-wrapper b/build-tools/yum-builddep-wrapper new file mode 100755 index 00000000..4f82d2d2 --- /dev/null +++ b/build-tools/yum-builddep-wrapper @@ -0,0 +1,66 @@ +#!/bin/bash + +# Old versions of yum-builddep leave a stale yum.pid file behind. +# Remove that file if necessary after yum-builddep exits + +# find yum-builddep +YUM_BUILDDEP=$(which yum-builddep 2>/dev/null) + +# dnf: call it directly +if [[ -z $YUM_BUILDDEP ]] || grep -q -F dnf.cli "$YUM_BUILDDEP" ; then + yum-builddep "$@" + exit $? +fi + + +# old yum: scan command line for --installroot +ROOT_PREFIX= +YUM_CONF=/etc/yum.conf +find_root_prefix() { + while [[ "$#" -gt 0 ]] ; do + case "$1" in + --installroot) + ROOT_PREFIX="$2" + shift + ;; + --installroot=*) + ROOT_PREFIX="${1#*=}" + ;; + -c|--config) + YUM_CONF="$2" + shift + ;; + --config=*) + YUM_CONF="${1#*=}" + ;; + esac + shift + done + if [[ -z "$ROOT_PREFIX" ]] && [[ -f "$YUM_CONF" ]] ; then + ROOT_PREFIX=$(sed -rn 's/^\s*installroot\s*=\s*(\S+)\s*$/\1/p' $YUM_CONF) + fi +} +find_root_prefix "$@" + +# ignore signals -- always wait for yum-builddep +trap "" INT TERM HUP PIPE + +# run it in the background to get its PID +"$YUM_BUILDDEP" "$@" & +pid="$!" + +# wait for it +wait "$pid" +res="$?" + +# if yum.pid remains and contains yum-builddep's PID, delete it +if [[ -f "${ROOT_PREFIX}/run/yum.pid" ]] ; then + lock_owner= + read lock_owner <"${ROOT_PREFIX}/run/yum.pid" || : + if [[ -n $lock_owner && $lock_owner == $pid ]] ; then + rm -f "${ROOT_PREFIX}/run/yum.pid" + fi +fi + +# done +exit $res