Remove CentOS/OpenSUSE build support
StarlingX stopped supporting CentOS builds in the after release 7.0. This update will strip CentOS from our code base. It will also remove references to the failed OpenSUSE feature as well. There is still one CentOS based docker image (n3000), so this update will leave limited CentOS support in download and docker image building tools. Verified with a full Jenkins master branch build for Debian. - download full and incremental - package build full and incremental - iso build - build of base container image - build all flock container images - helm chart build. Story: 2011110 Task: 49939 Change-Id: I57939d2026d7df76091e8658750b4bd0fa8e4f5f Signed-off-by: Scott Little <scott.little@windriver.com>
This commit is contained in:
parent
0a3dc5d5dc
commit
a4174a1e0a
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,9 +1,6 @@
|
||||
*.swp
|
||||
.tox
|
||||
__pycache__/
|
||||
/centos-repo
|
||||
/cgcs-centos-repo
|
||||
/cgcs-tis-repo
|
||||
/local-build-data
|
||||
/local-repo
|
||||
/public-keys/
|
||||
|
@ -1,23 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
|
||||
#
|
||||
# What files and directories need to be copied
|
||||
#
|
||||
BUILD_AVOIDANCE_SRPM_DIRECTORIES="inputs srpm_assemble rpmbuild/SRPMS rpmbuild/SOURCES"
|
||||
BUILD_AVOIDANCE_SRPM_FILES=""
|
||||
BUILD_AVOIDANCE_RPM_DIRECTORIES="results rpmbuild/RPMS rpmbuild/SPECS repo/local-repo/dependancy-cache"
|
||||
BUILD_AVOIDANCE_RPM_FILES=".platform_release"
|
||||
|
||||
|
||||
#
|
||||
# Copy the lines below to $MY_REPO/local-build-data/build_avoidance_source,
|
||||
# then uncomment and fill in the values giving the location of your local reference build.
|
||||
#
|
||||
# BUILD_AVOIDANCE_USR="jenkins"
|
||||
# BUILD_AVOIDANCE_HOST="machine.corp.com"
|
||||
# BUILD_AVOIDANCE_DIR="/localdisk/loadbuild/jenkins/StarlingX_Build"
|
@ -1,22 +0,0 @@
|
||||
[-]locale[-]
|
||||
[-]doc[-]
|
||||
[-]dbg[-]
|
||||
vswitch-staticdev
|
||||
vim-spell
|
||||
openssh-server-sysvinit
|
||||
openstack-neutron-linuxbridge
|
||||
^libcacard-
|
||||
^kernel-bootwrapper
|
||||
^kernel-doc-
|
||||
^kernel-abi-whitelists
|
||||
^kernel-debug-
|
||||
^kernel-kdump
|
||||
^kernel-rt-bootwrapper
|
||||
^kernel-rt-doc-
|
||||
^kernel-rt-abi-whitelists
|
||||
^kernel-rt-debug-
|
||||
^kernel-rt-debuginfo
|
||||
^kernel-rt-kdump
|
||||
^kernel-rt-cross-headers
|
||||
^kernel-rt-kvm-debuginfo
|
||||
^kernel-rt-tools-debuginfo
|
@ -1,78 +0,0 @@
|
||||
Data on an source rpm:
|
||||
|
||||
location:
|
||||
${MY_WORKSPACE}/${BUILD_TYPE}/rpmbuild/SPECS/${SRPM_FILE_NAME}/
|
||||
|
||||
files:
|
||||
*.spec # spec file found in the source rpm
|
||||
|
||||
subdirectories:
|
||||
NAMES/ # Directory contains an emtpy file, where the file name
|
||||
# is the name of the source rpm.
|
||||
|
||||
SERVICES/ # Directory contains zero or more emtpy files, where the
|
||||
# file name is the name of the service provided by one
|
||||
# or more of the rpms.
|
||||
|
||||
BUILDS/ # Directory contains emtpy files, where the file name is
|
||||
# the name of a binary rpm built from the source rpm.
|
||||
|
||||
BUILDS_VR/ # Directory contains emtpy files, where the file name is
|
||||
# the name-verion-release of a binary rpm built from the
|
||||
# source rpm.
|
||||
|
||||
location:
|
||||
${MY_WORKSPACE}/${BUILD_TYPE}/rpmbuild/SOURCES/${SRPM_FILE_NAME}/
|
||||
|
||||
files:
|
||||
BIG # if it exists, it contains one line, the numeric value
|
||||
# extracted from build_srpms.data if the line
|
||||
# BUILD_IS_BIG=### if present.
|
||||
# This is the estimated filesystem size (GB) required to
|
||||
# host a mock build of the package.
|
||||
# Note: not all parallel build environments are the same
|
||||
# size. The smallest build environmnet is 3 GB and this
|
||||
# is sufficient for most packages. Don't bother adding a
|
||||
# BUILD_IS_BIG=### directive unless 3 gb is proven to be
|
||||
# insufficient.
|
||||
|
||||
SLOW # if it exists, it contains one line, the numeric value i
|
||||
# extracted from build_srpms.data if the line
|
||||
# BUILD_IS_SLOW=### if present.
|
||||
# This is the estimated build time (minutes) required to
|
||||
# host perform a mock build of the package.
|
||||
# Note: Currently we only use this value as a boolean.
|
||||
# Non-zero and we try to start the build of this package
|
||||
# earlier rather than later. Build times >= 3 minutes are
|
||||
# worth anotating. Else don't bother adding a
|
||||
# BUILD_IS_SLOW=### directive
|
||||
e.g.
|
||||
|
||||
cd $MY_WORKSPACE/std/rpmbuild/SPECS/openstack-cinder-9.1.1-0.tis.40.src.rpm
|
||||
find .
|
||||
./BUILDS
|
||||
./BUILDS/openstack-cinder
|
||||
./BUILDS/python-cinder
|
||||
./BUILDS/python-cinder-tests
|
||||
./NAMES
|
||||
./NAMES/openstack-cinder
|
||||
./SERVICES
|
||||
./SERVICES/cinder
|
||||
./BUILDS_VR
|
||||
./BUILDS_VR/openstack-cinder-9.1.1-0.tis.40
|
||||
./BUILDS_VR/python-cinder-9.1.1-0.tis.40
|
||||
./BUILDS_VR/python-cinder-tests-9.1.1-0.tis.40
|
||||
./openstack-cinder.spec
|
||||
|
||||
|
||||
e.g.
|
||||
cd $MY_WORKSPACE/std/rpmbuild/SOURCES/kernel-3.10.0-514.16.1.el7.29.tis.src.rpm
|
||||
find .
|
||||
./BIG
|
||||
./SLOW
|
||||
|
||||
cat ./BIG
|
||||
8
|
||||
|
||||
cat ./SLOW
|
||||
12
|
@ -1,80 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
rpm_compare () {
|
||||
local r="$1"
|
||||
local r2="$2"
|
||||
local line
|
||||
local f=$(basename $r)
|
||||
local f2=$(basename $r2)
|
||||
|
||||
rpm -q --dump --nosignature -p $r | awk ' { print $1 "\n" $1 " " $5 " " $6 " " $7 " " $8 " " $9 " " $10 " " $11 } ' > /tmp/dump.new
|
||||
rpm -q --dump --nosignature -p $r2 | awk ' { print $1 "\n" $1 " " $5 " " $6 " " $7 " " $8 " " $9 " " $10 " " $11 } ' > /tmp/dump.old
|
||||
first_line=1
|
||||
diff -y -W 200 --suppress-common-lines /tmp/dump.new /tmp/dump.old | grep '|' |
|
||||
while read -r line; do
|
||||
left=$(echo "$line" | awk -F '|' '{ print $1 }')
|
||||
right=$(echo "$line" | awk -F '|' '{ print $2 }')
|
||||
left_f=$(echo "$left" | awk '{ print $1 }')
|
||||
right_f=$(echo "$right" | awk '{ print $1 }')
|
||||
if [ "$left_f" != "$right_f" ];then
|
||||
continue
|
||||
fi
|
||||
if [ $first_line -eq 1 ]; then
|
||||
echo ""
|
||||
echo "$f vs $f2"
|
||||
first_line=0
|
||||
fi
|
||||
echo "$line"
|
||||
done
|
||||
}
|
||||
|
||||
# For backward compatibility. Old repo location or new?
|
||||
CENTOS_REPO=${MY_REPO}/centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
echo "ERROR: directory ${MY_REPO}/centos-repo not found."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "======================================================"
|
||||
echo "Auditing built packages vs unpatched upstream packages"
|
||||
echo "======================================================"
|
||||
for r in $(find $MY_WORKSPACE/*/rpmbuild/RPMS -name '*.rpm' | grep -v '.src.rpm' | grep -v debuginfo); do
|
||||
f=$(basename $r)
|
||||
f2=$(echo $f | sed 's#[.]tis[.][0-9]*[.]#.#' | sed 's#[.]tis[.]#.#')
|
||||
r2=$(find ${CENTOS_REPO}/Binary/ -name $f2)
|
||||
if [ "$r2" == "" ]; then
|
||||
# Probably one of our own
|
||||
# echo "Couldn't find '$f2'"
|
||||
continue
|
||||
fi
|
||||
rpm_compare "$r" "$r2"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "============================"
|
||||
echo "Auditing built for conflicts"
|
||||
echo "============================"
|
||||
grep 'conflicts with file from package' -r --binary-files=without-match $MY_WORKSPACE/*/results/ |
|
||||
|
||||
while read -r line; do
|
||||
w=$(echo "$line" | awk '{ print $8 }')".rpm"
|
||||
w2=$(echo "$line" | awk '{ print $14 }')".rpm"
|
||||
echo "$w $w2"
|
||||
done | sort --unique | sed 's#bash-completion-1:#bash-completion-#' |
|
||||
|
||||
while read -r line2; do
|
||||
f=$(echo "$line2" | awk '{ print $1 }')
|
||||
f2=$(echo "$line2" | awk '{ print $2 }')
|
||||
r=$(find ${CENTOS_REPO}/Binary/ $MY_WORKSPACE/*/rpmbuild/RPMS -name $f)
|
||||
r2=$(find ${CENTOS_REPO}/Binary/ $MY_WORKSPACE/*/rpmbuild/RPMS -name $f2)
|
||||
# echo ""
|
||||
# echo "$f vs $f2"
|
||||
# echo "$r vs $r2"
|
||||
if [ "$r" != "" ] && [ "$r2" != "" ]; then
|
||||
rpm_compare "$r" "$r2"
|
||||
fi
|
||||
done
|
@ -1,923 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# Functions related to build avoidance.
|
||||
#
|
||||
# Do not call directly. Used by build-pkgs.
|
||||
#
|
||||
# Build avoidance downloads rpm, src.rpm and other artifacts of
|
||||
# build-pkgs for a local reference build. The reference would
|
||||
# typically be an automated build run atleast daily.
|
||||
# The MY_WORKSPACE directory for the reference build shall have
|
||||
# a common root directory, and a leaf directory that is a time stamp
|
||||
# in a sortable parsable format. Default YYYYMMDDThhmmssZ.
|
||||
# e.g. /localdisk/loadbuild/jenkins/StarlingX/20180719T113021Z
|
||||
#
|
||||
# Other formats can be used by setting the following variables
|
||||
# in $MY_REPO/local-build-data/build_avoidance_source.
|
||||
# e.g. to allow format YYYY-MM-DD_hh-mm-ss
|
||||
# BUILD_AVOIDANCE_DATE_FORMAT="%Y-%m-%d"
|
||||
# BUILD_AVOIDANCE_TIME_FORMAT="%H-%M-%S"
|
||||
# BUILD_AVOIDANCE_DATE_TIME_DELIM="_"
|
||||
# BUILD_AVOIDANCE_DATE_TIME_POSTFIX=""
|
||||
#
|
||||
# Note: Must be able to rsync and ssh to the machine that holds the
|
||||
# reference builds.
|
||||
#
|
||||
# In future alternative transfer protocols may be supported.
|
||||
# Select the alternate protocol by setting the following variables
|
||||
# in $MY_REPO/local-build-data/build_avoidance_source.
|
||||
# e.g.
|
||||
# BUILD_AVOIDANCE_FILE_TRANSFER="my-supported-prototcol"
|
||||
#
|
||||
|
||||
BUILD_AVOIDANCE_UTILS_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
|
||||
|
||||
source "${BUILD_AVOIDANCE_UTILS_DIR}/git-utils.sh"
|
||||
|
||||
BUILD_AVOIDANCE_USR=""
|
||||
BUILD_AVOIDANCE_HOST=""
|
||||
BUILD_AVOIDANCE_DIR=""
|
||||
BUILD_AVOIDANCE_URL=""
|
||||
|
||||
# Default date/time format, iso-8601 compact, 20180912T143913Z
|
||||
# Syntax is a subset of that use by the unix 'date' command.
|
||||
BUILD_AVOIDANCE_DATE_FORMAT="%Y%m%d"
|
||||
BUILD_AVOIDANCE_TIME_FORMAT="%H%M%S"
|
||||
BUILD_AVOIDANCE_DATE_TIME_DELIM="T"
|
||||
BUILD_AVOIDANCE_DATE_TIME_POSTFIX="Z"
|
||||
|
||||
# Default file transfer method
|
||||
BUILD_AVOIDANCE_FILE_TRANSFER="rsync"
|
||||
|
||||
# Default is to use timestamps and days in UTC
|
||||
#
|
||||
# If you prefer local time, then set 'BUILD_AVOIDANCE_DATE_UTC=0'
|
||||
# in '$MY_REPO/local-build-data/build_avoidance_source'
|
||||
BUILD_AVOIDANCE_DATE_UTC=1
|
||||
|
||||
BUILD_AVOIDANCE_DATA_DIR="$MY_WORKSPACE/build_avoidance_data"
|
||||
BUILD_AVOIDANCE_SOURCE="$MY_REPO/build-data/build_avoidance_source"
|
||||
BUILD_AVOIDANCE_LOCAL_SOURCE="$MY_REPO/local-build-data/build_avoidance_source"
|
||||
BUILD_AVOIDANCE_TEST_CONTEXT="$BUILD_AVOIDANCE_DATA_DIR/test_context"
|
||||
|
||||
if [ ! -f $BUILD_AVOIDANCE_SOURCE ]; then
|
||||
echo "Couldn't read $BUILD_AVOIDANCE_SOURCE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Reading: $BUILD_AVOIDANCE_SOURCE"
|
||||
source $BUILD_AVOIDANCE_SOURCE
|
||||
|
||||
if [ -f $BUILD_AVOIDANCE_LOCAL_SOURCE ]; then
|
||||
echo "Reading: $BUILD_AVOIDANCE_LOCAL_SOURCE"
|
||||
source $BUILD_AVOIDANCE_LOCAL_SOURCE
|
||||
fi
|
||||
|
||||
UTC=""
|
||||
|
||||
if [ $BUILD_AVOIDANCE_DATE_UTC -eq 1 ]; then
|
||||
UTC="--utc"
|
||||
fi
|
||||
|
||||
|
||||
if [ "x$BUILD_AVOIDANCE_OVERRIDE_DIR" != "x" ]; then
|
||||
BUILD_AVOIDANCE_DIR="$BUILD_AVOIDANCE_OVERRIDE_DIR"
|
||||
fi
|
||||
|
||||
if [ "x$BUILD_AVOIDANCE_OVERRIDE_HOST" != "x" ]; then
|
||||
BUILD_AVOIDANCE_HOST="$BUILD_AVOIDANCE_OVERRIDE_HOST"
|
||||
fi
|
||||
|
||||
if [ "x$BUILD_AVOIDANCE_OVERRIDE_USR" != "x" ]; then
|
||||
BUILD_AVOIDANCE_USR="$BUILD_AVOIDANCE_OVERRIDE_USR"
|
||||
fi
|
||||
|
||||
echo "BUILD_AVOIDANCE_DIR=$BUILD_AVOIDANCE_DIR"
|
||||
echo "BUILD_AVOIDANCE_HOST=$BUILD_AVOIDANCE_HOST"
|
||||
echo "BUILD_AVOIDANCE_USR=$BUILD_AVOIDANCE_USR"
|
||||
|
||||
build_avoidance_last_sync_file () {
|
||||
local BUILD_TYPE=$1
|
||||
|
||||
if [ -z "$BUILD_TYPE" ]; then
|
||||
echo "build_avoidance_last_sync_file: Build type not set"
|
||||
exit 1
|
||||
fi
|
||||
echo "$BUILD_AVOIDANCE_DATA_DIR/$BUILD_TYPE/last_sync_context"
|
||||
}
|
||||
|
||||
build_avoidance_clean () {
|
||||
local BUILD_TYPE=$1
|
||||
local lsf
|
||||
|
||||
if [ "$BUILD_TYPE" == "" ]; then
|
||||
for lsf in $(find $BUILD_AVOIDANCE_DATA_DIR -name last_sync_context); do
|
||||
\rm -f -v "$lsf"
|
||||
done
|
||||
else
|
||||
lsf="$(build_avoidance_last_sync_file $BUILD_TYPE)"
|
||||
if [ -f $lsf ]; then
|
||||
\rm -f -v "$lsf"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
date_to_iso_8601 () {
|
||||
local DATE="$1"
|
||||
local CENTURY=""
|
||||
local YEAR_IN_CENTURY="00"
|
||||
local MONTH="01"
|
||||
local DAY="01"
|
||||
local DAY_OF_YEAR=""
|
||||
|
||||
CENTURY="$(date '+%C')"
|
||||
|
||||
for x in $(echo "${BUILD_AVOIDANCE_DATE_FORMAT}" | tr ' ' '#' | sed 's/%%/#/g' | tr '%' ' ' ); do
|
||||
# Consume format case options
|
||||
case ${x:0:1} in
|
||||
^) x=${x:1};;
|
||||
\#) x=${x:1};;
|
||||
*) ;;
|
||||
esac
|
||||
|
||||
# Process format
|
||||
case $x in
|
||||
Y*) CENTURY=${DATE:0:2}; YEAR_IN_CENTURY=${DATE:2:2}; DATE=${DATE:4}; x=${x:1};;
|
||||
0Y*) CENTURY=${DATE:0:2}; YEAR_IN_CENTURY=${DATE:2:2}; DATE=${DATE:4}; x=${x:2};;
|
||||
_Y*) CENTURY=$(echo "${DATE:0:2}" | tr ' ' '0'); YEAR_IN_CENTURY=${DATE:2:2}; DATE=${DATE:4}; x=${x:2};;
|
||||
|
||||
y*) YEAR_IN_CENTURY=${DATE:0:2}; DATE=${DATE:2}; x=${x:1};;
|
||||
0y*) YEAR_IN_CENTURY=${DATE:0:2}; DATE=${DATE:2}; x=${x:2};;
|
||||
_y*) YEAR_IN_CENTURY=$(echo "${DATE:0:2}" | tr ' ' '0'); DATE=${DATE:2}; x=${x:2};;
|
||||
|
||||
C*) CENTURY=${DATE:0:2}; DATE=${DATE:2}; x=${x:1};;
|
||||
0C*) CENTURY=${DATE:0:2}; DATE=${DATE:2}; x=${x:2};;
|
||||
_C*) CENTURY=$(echo "${DATE:0:2}" | tr ' ' '0'); DATE=${DATE:2}; x=${x:2};;
|
||||
|
||||
m*) MONTH=${DATE:0:2}; DATE=${DATE:2}; x=${x:1};;
|
||||
0m*) MONTH=${DATE:0:2}; DATE=${DATE:2}; x=${x:2};;
|
||||
_m*) MONTH=$(echo "${DATE:0:2}" | tr ' ' '0'); DATE=${DATE:2}; x=${x:2};;
|
||||
e*) MONTH=$(echo "${DATE:0:2}" | tr ' ' '0'); DATE=${DATE:2}; x=${x:1};;
|
||||
0e*) MONTH=${DATE:0:2}; DATE=${DATE:2}; x=${x:2};;
|
||||
_e*) MONTH=$(echo "${DATE:0:2}" | tr ' ' '0'); DATE=${DATE:2}; x=${x:2};;
|
||||
b*) MONTH="$(date -d "${DATE:0:3} 1 2000" '+%m')"; DATE=${DATE:3}; x=${x:1};;
|
||||
h*) MONTH="$(date -d "${DATE:0:3} 1 2000" '+%m')"; DATE=${DATE:3}; x=${x:1};;
|
||||
|
||||
d*) DAY=${DATE:0:2}; DATE=${DATE:2}; x=${x:1};;
|
||||
0d*) DAY=${DATE:0:2}; DATE=${DATE:2}; x=${x:2};;
|
||||
_d*) DAY=$(echo "${DATE:0:2}" | tr ' ' '0'); DATE=${DATE:2}; x=${x:2};;
|
||||
|
||||
j*) DAY_OF_YEAR=${DATE:0:3}; DATE=${DATE:3}; x=${x:1};;
|
||||
0j*) DAY_OF_YEAR=${DATE:0:3}; DATE=${DATE:3}; x=${x:2};;
|
||||
_j*) DAY_OF_YEAR=$(echo "${DATE:0:3}" | tr ' ' '0'); DATE=${DATE:3}; x=${x:2};;
|
||||
|
||||
D*) MONTH=${DATE:0:2}; DAY=${DATE:3:2}; YEAR_IN_CENTURY=${DATE:6:2}; DATE=${DATE:8}; x=${x:1};;
|
||||
F*) CENTURY=${DATE:0:2}; YEAR_IN_CENTURY=${DATE:2:2}; MONTH=${DATE:5:2}; DAY=${DATE:8:2}; DATE=${DATE:10}; x=${x:1};;
|
||||
*) >&2 echo "$FUNCNAME (${LINENO}): Unsupported date format: ${BUILD_AVOIDANCE_DATE_FORMAT}"; return 1;;
|
||||
esac
|
||||
|
||||
# consume remaing non-interpreted content
|
||||
if [ "$(echo "${DATE:0:${#x}}" | tr ' ' '#')" != "${x}" ]; then
|
||||
>&2 echo "$FUNCNAME (${LINENO}): Unexpected content '${DATE:0:${#x}}' does not match expected '${x}': '$1' being parsed vs '${BUILD_AVOIDANCE_DATE_FORMAT}'"
|
||||
return 1
|
||||
fi
|
||||
DATE=${DATE:${#x}}
|
||||
done
|
||||
|
||||
if [ "${DAY_OF_YEAR}" != "" ]; then
|
||||
local YEAR_SEC
|
||||
local DOY_SEC
|
||||
YEAR_SEC="$(date -d "${CENTURY}${YEAR_IN_CENTURY}-01-01" '+%s')"
|
||||
DOY_SEC=$((YEAR_SEC+(DAY_OF_YEAR-1)*24*60*60))
|
||||
MONTH="$(date "@$DOY_SEC" "+%m")"
|
||||
DAY="$(date "@$DOY_SEC" "+%d")"
|
||||
fi
|
||||
|
||||
echo "${CENTURY}${YEAR_IN_CENTURY}-${MONTH}-${DAY}"
|
||||
return 0
|
||||
}
|
||||
|
||||
time_to_iso_8601 () {
|
||||
TIME="$1"
|
||||
local HOUR="00"
|
||||
local H12=""
|
||||
local AMPM=""
|
||||
local MINUTE="00"
|
||||
local SECOND="00"
|
||||
|
||||
CENTURY="$(date '+%C')"
|
||||
|
||||
for x in $(echo "${BUILD_AVOIDANCE_TIME_FORMAT}" | tr ' ' '#' | sed 's/%%/#/g' | tr '%' ' ' ); do
|
||||
# Consume format case options
|
||||
case ${x:0:1} in
|
||||
^) x=${x:1};;
|
||||
\#) x=${x:1};;
|
||||
*) ;;
|
||||
esac
|
||||
|
||||
# Process format
|
||||
case $x in
|
||||
H*) HOUR=${TIME:0:2}; TIME=${TIME:2}; x=${x:1};;
|
||||
0H*) HOUR=${TIME:0:2}; TIME=${TIME:2}; x=${x:2};;
|
||||
_H*) HOUR="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:2};;
|
||||
k*) HOUR="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:1};;
|
||||
0k*) HOUR=${TIME:0:2}; TIME=${TIME:2}; x=${x:2};;
|
||||
_k*) HOUR="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:2};;
|
||||
|
||||
I*) H12=${TIME:0:2}; TIME=${TIME:2}; x=${x:1};;
|
||||
0I*) H12=${TIME:0:2}; TIME=${TIME:2}; x=${x:2};;
|
||||
_I*) H12="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:2};;
|
||||
l*) H12="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:1};;
|
||||
0l*) H12=${TIME:0:2}; TIME=${TIME:2}; x=${x:2};;
|
||||
_l*) H12="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:2};;
|
||||
p*) AMPM=${TIME:0:2}; TIME=${TIME:2}; x=${x:1};;
|
||||
|
||||
M*) MINUTE=${TIME:0:2}; TIME=${TIME:2}; x=${x:1};;
|
||||
0M*) MINUTE=${TIME:0:2}; TIME=${TIME:2}; x=${x:2};;
|
||||
_M*) MINUTE="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:2};;
|
||||
|
||||
S*) SECOND=${TIME:0:2}; TIME=${TIME:2}; x=${x:1};;
|
||||
0S*) SECOND=${TIME:0:2}; TIME=${TIME:2}; x=${x:2};;
|
||||
_S*) SECOND="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:2};;
|
||||
|
||||
R*) HOUR=${TIME:0:2}; MINUTE=${TIME:3:2} TIME=${TIME:5}; x=${x:1};;
|
||||
r*) H12=${TIME:0:2}; MINUTE=${TIME:3:2}; SECOND=${TIME:6:2}; AMPM=${TIME:9:2}; TIME=${TIME:11}; x=${x:1};;
|
||||
T*) HOUR=${TIME:0:2}; MINUTE=${TIME:3:2}; SECOND=${TIME:6:2}; TIME=${TIME:8}; x=${x:1};;
|
||||
|
||||
*) >&2 echo "$FUNCNAME (${LINENO}): Unsupported time format: ${BUILD_AVOIDANCE_TIME_FORMAT}"; return 1;;
|
||||
esac
|
||||
|
||||
# consume remaing non-interpreted content
|
||||
if [ "$(echo "${TIME:0:${#x}}" | tr ' ' '#')" != "${x}" ]; then
|
||||
>&2 echo "$FUNCNAME (${LINENO}): Unexpected content '${TIME:0:${#x}}' does not match expected '${x}': '$1' being parsed vs '${BUILD_AVOIDANCE_TIME_FORMAT}'"
|
||||
return 1
|
||||
fi
|
||||
TIME=${TIME:${#x}}
|
||||
done
|
||||
|
||||
if [ "$H12" != "" ] && [ "$AMPM" != "" ]; then
|
||||
HOUR="$(date "$H12:01:01 $AMPM" '+%H')"
|
||||
else
|
||||
if [ "$H12" != "" ] && [ "$AMPM" != "" ]; then
|
||||
>&2 echo "$FUNCNAME (${LINENO}): Unsupported time format: ${BUILD_AVOIDANCE_TIME_FORMAT}"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "${HOUR}:${MINUTE}:${SECOND}"
|
||||
return 0
|
||||
}
|
||||
|
||||
date_time_to_iso_8601 () {
|
||||
local DATE_TIME="$1"
|
||||
local DATE
|
||||
local TIME
|
||||
local DECODED_DATE
|
||||
local DECODED_TIME
|
||||
DATE=$(echo "${DATE_TIME}" | cut -d ${BUILD_AVOIDANCE_DATE_TIME_DELIM} -f 1)
|
||||
TIME=$(echo "${DATE_TIME}" | cut -d ${BUILD_AVOIDANCE_DATE_TIME_DELIM} -f 2 | sed "s#${BUILD_AVOIDANCE_DATE_TIME_POSTFIX}\$##")
|
||||
DECODED_DATE=$(date_to_iso_8601 "${DATE}")
|
||||
DECODED_TIME=$(time_to_iso_8601 "${TIME}")
|
||||
echo "${DECODED_DATE}T${DECODED_TIME}$(date $UTC '+%:z')"
|
||||
}
|
||||
|
||||
#
|
||||
# test_build_avoidance_context <path-to-context-file>
|
||||
#
|
||||
# Is the provided context file compatible with the current
|
||||
# state of all of our gits? A compatible context is one
|
||||
# where every commit in the context file is visible in our
|
||||
# current git history.
|
||||
#
|
||||
# Returns: Timestamp of context tested.
|
||||
# Exit code: 0 = Compatible
|
||||
# 1 = This context is older than the last applied
|
||||
# build avoidance context. If you are searching
|
||||
# newest to oldest, you might as well stop.
|
||||
# 2 = Not compatible
|
||||
#
|
||||
test_build_avoidance_context () {
|
||||
local context="$1"
|
||||
local BA_LAST_SYNC_CONTEXT="$2"
|
||||
local BA_CONTEXT=""
|
||||
|
||||
BA_CONTEXT=$(basename $context | cut -d '.' -f 1)
|
||||
>&2 echo "test: $BA_CONTEXT"
|
||||
|
||||
if [ "$BA_CONTEXT" == "$BA_LAST_SYNC_CONTEXT" ]; then
|
||||
# Stop the search. We've reached the last sync point
|
||||
BA_CONTEXT=""
|
||||
echo "$BA_CONTEXT"
|
||||
return 1
|
||||
fi
|
||||
|
||||
git_test_context "$context"
|
||||
result=$?
|
||||
if [ $result -eq 0 ]; then
|
||||
# found a new context !!!
|
||||
echo "$BA_CONTEXT"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Continue the search
|
||||
BA_CONTEXT=""
|
||||
echo "$BA_CONTEXT"
|
||||
return 2
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# get_build_avoidance_context
|
||||
#
|
||||
# Return URL of the most recent jenkins build that is compatable with
|
||||
# the current software context under $MY_REPO.
|
||||
#
|
||||
get_build_avoidance_context () {
|
||||
(
|
||||
local BUILD_TYPE=$1
|
||||
local context
|
||||
local BA_CONTEXT=""
|
||||
local BA_LAST_SYNC_CONTEXT=""
|
||||
|
||||
export BUILD_AVOIDANCE_LAST_SYNC_FILE="$(build_avoidance_last_sync_file $BUILD_TYPE)"
|
||||
mkdir -p "$(dirname $BUILD_AVOIDANCE_LAST_SYNC_FILE)"
|
||||
|
||||
# Load last synced context
|
||||
if [ -f $BUILD_AVOIDANCE_LAST_SYNC_FILE ]; then
|
||||
BA_LAST_SYNC_CONTEXT=$(head -n 1 $BUILD_AVOIDANCE_LAST_SYNC_FILE)
|
||||
fi
|
||||
|
||||
mkdir -p $BUILD_AVOIDANCE_DATA_DIR
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): mkdir -p $BUILD_AVOIDANCE_DATA_DIR"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local REMOTE_CTX_DIR="context"
|
||||
local LOCAL_CTX_DIR="$BUILD_AVOIDANCE_DATA_DIR/context"
|
||||
|
||||
# First copy the directory containing all the context files for
|
||||
# the reference builds.
|
||||
>&2 echo "Download latest reference build contexts"
|
||||
|
||||
# Must set this prior to build_avoidance_copy_dir.
|
||||
# The setting is not exported outside of the subshell.
|
||||
if [ -z "$BUILD_AVOIDANCE_HOST" ]; then
|
||||
BUILD_AVOIDANCE_URL="$BUILD_AVOIDANCE_DIR"
|
||||
else
|
||||
BUILD_AVOIDANCE_URL="$BUILD_AVOIDANCE_HOST:$BUILD_AVOIDANCE_DIR"
|
||||
fi
|
||||
|
||||
|
||||
build_avoidance_copy_dir "$REMOTE_CTX_DIR" "$LOCAL_CTX_DIR"
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): build_avoidance_copy_dir '$REMOTE_CTX_DIR' '$LOCAL_CTX_DIR'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Search for a new context to sync
|
||||
cd $MY_REPO
|
||||
|
||||
if [ "$BUILD_AVOIDANCE_DAY" == "" ]; then
|
||||
# Normal case:
|
||||
# Search all contexts, newest to oldest, for a good context.
|
||||
for context in $(ls -1rd $LOCAL_CTX_DIR/*context); do
|
||||
>&2 echo "context=$context"
|
||||
BA_CONTEXT=$(test_build_avoidance_context $context $BA_LAST_SYNC_CONTEXT)
|
||||
if [ $? -le 1 ]; then
|
||||
# Stop search. Might or might not have found a good context.
|
||||
break;
|
||||
fi
|
||||
done
|
||||
else
|
||||
# Special case when a target day is specified. Why would we do this?
|
||||
# Reason is we might want the reference build to itself use build
|
||||
# avoidance referencing prior builds of itself, except for one build
|
||||
# a week when we use a full build rather than a build avoidance build.
|
||||
# e.g. Sunday - full build
|
||||
# Mon-Sat - avoidance builds that refernce Sunday build.
|
||||
#
|
||||
# Starting from last <TARG_DAY> (e.g. "Sunday"), search newest to
|
||||
# oldest for a good context. If none found, increment the target
|
||||
# day (e.g. Monday) and search again. Keep incrementing until a
|
||||
# good build is found, or target day + offset days would be a date
|
||||
# in the furure.
|
||||
#
|
||||
local TARG_DAY=$BUILD_AVOIDANCE_DAY
|
||||
local TODAY_DATE
|
||||
local TODAY_DAY
|
||||
local TARG_DATE=""
|
||||
local TARG_TS
|
||||
local TODAY_TS
|
||||
|
||||
TODAY_DATE=$(date $UTC +%Y-%m-%d)
|
||||
TODAY_DAY=$(date $UTC "+%A")
|
||||
|
||||
for OFFSET_DAYS in 0 1 2 3 4 5 6; do
|
||||
if [ "$TARG_DAY" != "" ]; then
|
||||
# Convert TARG_DAY+OFFSET_DAYS to TARG_DATE
|
||||
|
||||
if [ "$TODAY_DAY" == "$TARG_DAY" ]; then
|
||||
TARG_DATE=$(date $UTC -d"$TARG_DAY+$OFFSET_DAYS days" +%Y-%m-%d)
|
||||
else
|
||||
TARG_DATE=$(date $UTC -d"last-$TARG_DAY+$OFFSET_DAYS days" +%Y-%m-%d)
|
||||
fi
|
||||
>&2 echo "TARG_DATE=$TARG_DATE"
|
||||
|
||||
TARG_TS=$(date $UTC -d "$TARG_DATE" +%s)
|
||||
TODAY_TS=$(date $UTC -d "$TODAY_DATE" +%s)
|
||||
if [ $TARG_TS -gt $TODAY_TS ]; then
|
||||
# Skip if offset has pushed us into future dates
|
||||
continue;
|
||||
fi
|
||||
|
||||
if [ "$TARG_DATE" == "$TODAY_DATE" ]; then
|
||||
TARG_DATE=""
|
||||
fi
|
||||
fi
|
||||
|
||||
# Search build, newest to oldest, satisfying TARG_DATE
|
||||
for f in $(ls -1rd $LOCAL_CTX_DIR/*context); do
|
||||
DATE=$(date_to_iso_8601 $(basename "$f"))
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Failed to extract date from filename '$(basename "$f")', ignoring file"
|
||||
continue
|
||||
fi
|
||||
|
||||
>&2 echo " DATE=$DATE, TARG_DATE=$TARG_DATE"
|
||||
|
||||
if [ "$DATE" == "$TARG_DATE" ] || [ "$TARG_DATE" == "" ] ; then
|
||||
context=$f;
|
||||
else
|
||||
continue
|
||||
fi
|
||||
|
||||
>&2 echo "context=$context"
|
||||
|
||||
BA_CONTEXT=$(test_build_avoidance_context $context $BA_LAST_SYNC_CONTEXT)
|
||||
|
||||
if [ $? -le 1 ]; then
|
||||
# Stop search. Might or might not have found a good context.
|
||||
break;
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$BA_CONTEXT" != "" ]; then
|
||||
# Found a good context.
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ "$BA_CONTEXT" == "" ]; then
|
||||
# No new context found
|
||||
return 1
|
||||
fi
|
||||
|
||||
# test that the reference build context hasn't been deleted
|
||||
local BA_CONTEXT_DIR="$BUILD_AVOIDANCE_DIR/$BA_CONTEXT"
|
||||
|
||||
if [ -z "$BUILD_AVOIDANCE_HOST" ]; then
|
||||
>&2 echo "[ -d $BA_CONTEXT_DIR ]"
|
||||
if ! [ -d $BA_CONTEXT_DIR ] ; then
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
>&2 echo "ssh $BUILD_AVOIDANCE_HOST '[ -d $BA_CONTEXT_DIR ]'"
|
||||
if ! ssh $BUILD_AVOIDANCE_HOST '[ -d $BA_CONTEXT_DIR ]' ; then
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Save the latest context
|
||||
>&2 echo "BA_CONTEXT=$BA_CONTEXT"
|
||||
>&2 echo "BUILD_AVOIDANCE_LAST_SYNC_FILE=$BUILD_AVOIDANCE_LAST_SYNC_FILE"
|
||||
echo $BA_CONTEXT > $BUILD_AVOIDANCE_LAST_SYNC_FILE
|
||||
|
||||
# The location of the load with the most compatable new context
|
||||
if [ -z "$BUILD_AVOIDANCE_HOST" ]; then
|
||||
URL=$BA_CONTEXT_DIR
|
||||
else
|
||||
URL=$BUILD_AVOIDANCE_HOST:$BA_CONTEXT_DIR
|
||||
fi
|
||||
|
||||
# return URL to caller.
|
||||
echo $URL
|
||||
return 0
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# build_avoidance_pre_clean <build-type>
|
||||
#
|
||||
# A place for any cleanup actions that must preceed a build avoidance build.
|
||||
#
|
||||
build_avoidance_pre_clean () {
|
||||
local BUILD_TYPE="$1"
|
||||
|
||||
if [ "$BUILD_TYPE" == "" ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): BUILD_TYPE required"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# clean prior builds
|
||||
if [ -d $MY_WORKSPACE/$BUILD_TYPE ]; then
|
||||
build-pkgs --clean --$BUILD_TYPE --no-build-avoidance
|
||||
if [ $? -ne 0 ]; then
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
for f in $BUILD_AVOIDANCE_SRPM_FILES $BUILD_AVOIDANCE_RPM_FILES; do
|
||||
if [ -f $MY_WORKSPACE/$BUILD_TYPE/$f ]; then
|
||||
\rm -f $MY_WORKSPACE/$BUILD_TYPE/$f
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): rm -f $MY_WORKSPACE/$BUILD_TYPE/$f"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
for d in $BUILD_AVOIDANCE_SRPM_DIRECTORIES $BUILD_AVOIDANCE_RPM_DIRECTORIES; do
|
||||
|
||||
if [ -d $MY_WORKSPACE/$BUILD_TYPE/$d ]; then
|
||||
\rm -rf $MY_WORKSPACE/$BUILD_TYPE/$d
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): rm -rf $MY_WORKSPACE/$BUILD_TYPE/$d"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# build_avoidance_copy_dir_rsync <remote-dir-path-rel> <local-dir-path> ['verbose']
|
||||
#
|
||||
# Copy a file from $BUILD_AVOIDANCE_URL/<remote-dir-path-rel>
|
||||
# to <local-dir-path> using rsync.
|
||||
#
|
||||
build_avoidance_copy_dir_rsync () {
|
||||
local FROM="$1"
|
||||
local TO="$2"
|
||||
local VERBOSE="$3"
|
||||
local FLAGS="-a -u"
|
||||
|
||||
if [ "$BUILD_AVOIDANCE_URL" == "" ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): BUILD_AVOIDANCE_URL no set"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "$VERBOSE" != "" ]; then
|
||||
FLAGS="$FLAGS -v"
|
||||
echo "rsync $FLAGS '$BUILD_AVOIDANCE_URL/$FROM/' '$TO/'"
|
||||
fi
|
||||
|
||||
rsync $FLAGS "$BUILD_AVOIDANCE_URL/$FROM/" "$TO/"
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): command failed: rsync $FLAGS '$BUILD_AVOIDANCE_URL/$FROM/' '$TO/'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
chmod -R 'ug+w' "$TO/"
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): command failed: chmod -R 'ug+w' '$TO/'"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
#
|
||||
# build_avoidance_copy_file_rsync <remote-file-path-rel> <local-file-path> ['verbose']
|
||||
#
|
||||
# Copy a file from $BUILD_AVOIDANCE_URL/<remote-file-path-rel>
|
||||
# to <local-file-path> using rsync.
|
||||
#
|
||||
build_avoidance_copy_file_rsync () {
|
||||
local FROM="$1"
|
||||
local TO="$2"
|
||||
local VERBOSE="$3"
|
||||
local FLAGS="-a -u"
|
||||
|
||||
if [ "$BUILD_AVOIDANCE_URL" == "" ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): BUILD_AVOIDANCE_URL no set"
|
||||
return 1
|
||||
fi
|
||||
if [ "$VERBOSE" != "" ]; then
|
||||
FLAGS="$FLAGS -v"
|
||||
echo "rsync $FLAGS '$BUILD_AVOIDANCE_URL/$FROM' '$TO'"
|
||||
fi
|
||||
|
||||
rsync $FLAGS "$BUILD_AVOIDANCE_URL/$FROM" "$TO"
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): command failed: rsync $FLAGS '$BUILD_AVOIDANCE_URL/$FROM' '$TO'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
chmod -R 'ug+w' "$TO"
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): command failed: chmod -R 'ug+w' '$TO'"
|
||||
return 1
|
||||
fi
|
||||
return $?
|
||||
}
|
||||
|
||||
#
|
||||
# build_avoidance_copy_dir <remote-dir-path-rel> <local-dir-path> ['verbose']
|
||||
#
|
||||
# Copy a file from $BUILD_AVOIDANCE_URL/<remote-dir-path-rel>
|
||||
# to <local-dir-path>. The copy method will be determined by
|
||||
# BUILD_AVOIDANCE_FILE_TRANSFER. Only 'rsync' is supported at present.
|
||||
#
|
||||
# <local-dir-path> should be a directory,
|
||||
# mkdir -p will be called on <local-file-path>.
|
||||
#
|
||||
build_avoidance_copy_dir () {
|
||||
local FROM="$1"
|
||||
local TO="$2"
|
||||
local VERBOSE="$3"
|
||||
|
||||
if [ "$VERBOSE" != "" ]; then
|
||||
echo "mkdir -p '$TO'"
|
||||
fi
|
||||
mkdir -p "$TO"
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): mkdir -p $TO"
|
||||
return 1
|
||||
fi
|
||||
|
||||
case ${BUILD_AVOIDANCE_FILE_TRANSFER} in
|
||||
rsync)
|
||||
build_avoidance_copy_dir_rsync "$FROM" "$TO" "$VERBOSE"
|
||||
return $?
|
||||
;;
|
||||
*)
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): Unknown BUILD_AVOIDANCE_FILE_TRANSFER '${BUILD_AVOIDANCE_FILE_TRANSFER}'"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
return 1
|
||||
}
|
||||
|
||||
#
|
||||
# build_avoidance_copy_file <remote-file-path-rel> <local-file-path> ['verbose']
|
||||
#
|
||||
# Copy a file from $BUILD_AVOIDANCE_URL/<remote-file-path-rel>
|
||||
# to <local-file-path>. The copy method will be determined by
|
||||
# BUILD_AVOIDANCE_FILE_TRANSFER. Only 'rsync' is supported at present.
|
||||
#
|
||||
# <local-file-path> should be a file, not a directory,
|
||||
# mkdir -p will be called on $(dirname <local-file-path>)
|
||||
#
|
||||
build_avoidance_copy_file () {
|
||||
local FROM="$1"
|
||||
local TO="$2"
|
||||
local VERBOSE="$3"
|
||||
|
||||
if [ "$VERBOSE" != "" ]; then
|
||||
echo "mkdir -p $(dirname '$TO')"
|
||||
fi
|
||||
mkdir -p "$(dirname "$TO")"
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): mkdir -p $(dirname "$TO")"
|
||||
return 1
|
||||
fi
|
||||
|
||||
case ${BUILD_AVOIDANCE_FILE_TRANSFER} in
|
||||
rsync)
|
||||
build_avoidance_copy_file_rsync "$FROM" "$TO" "$VERBOSE"
|
||||
return $?
|
||||
;;
|
||||
*)
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): Unknown BUILD_AVOIDANCE_FILE_TRANSFER '${BUILD_AVOIDANCE_FILE_TRANSFER}'"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
return 1
|
||||
}
|
||||
|
||||
#
|
||||
# build_avoidance_copy <build-type> ['verbose']
|
||||
#
|
||||
# Copy the needed build artifacts for <build-type> from $BUILD_AVOIDANCE_URL.
|
||||
#
|
||||
build_avoidance_copy () {
|
||||
local BUILD_TYPE="$1"
|
||||
local VERBOSE="$2"
|
||||
|
||||
if [ "$BUILD_TYPE" == "" ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): BUILD_TYPE required"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Iterate through list of directories to copy
|
||||
for d in $BUILD_AVOIDANCE_SRPM_DIRECTORIES $BUILD_AVOIDANCE_RPM_DIRECTORIES; do
|
||||
build_avoidance_copy_dir "$BUILD_TYPE/$d" "$MY_WORKSPACE/$BUILD_TYPE/$d" "$VERBOSE"
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): build_avoidance_copy_dir '$BUILD_TYPE/$d' '$MY_WORKSPACE/$BUILD_TYPE/$d'"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Iterate through list of files to copy
|
||||
for f in $BUILD_AVOIDANCE_SRPM_FILES $BUILD_AVOIDANCE_RPM_FILES; do
|
||||
build_avoidance_copy_file "$BUILD_TYPE/$f" "$MY_WORKSPACE/$BUILD_TYPE/$f" "$VERBOSE"
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): build_avoidance_copy_file '$BUILD_TYPE/$f' '$MY_WORKSPACE/$BUILD_TYPE/$f'"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
#
|
||||
# build_avoidance_fixups <build-type>
|
||||
#
|
||||
# Fix paths in the build artifacts that we coppied that contain
|
||||
# the user name.
|
||||
#
|
||||
# Also, our credentials may differ from the reference build,
|
||||
# so substitute unsigned packages in place of signed packages.
|
||||
#
|
||||
build_avoidance_fixups () {
|
||||
local BUILD_TYPE="$1"
|
||||
|
||||
local BA_SOURCE_BUILD_ENVIRONMENT
|
||||
BA_SOURCE_BUILD_ENVIRONMENT="${BUILD_AVOIDANCE_USR}-$(basename $(dirname $BUILD_AVOIDANCE_URL))-$(basename $BUILD_AVOIDANCE_URL)-${SRC_BUILD_ENVIRONMENT}"
|
||||
local RESULT_DIR=""
|
||||
local FROM_DIR=""
|
||||
local TO_DIR=""
|
||||
local rpm_path_post_signing
|
||||
local rpm_path_pre_signing
|
||||
local rpm_name
|
||||
local md5sum_post_signing
|
||||
local md5sum_pre_signing
|
||||
|
||||
if [ "$BUILD_TYPE" == "" ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): BUILD_TYPE required"
|
||||
return 1
|
||||
fi
|
||||
|
||||
RESULT_DIR="$MY_WORKSPACE/$BUILD_TYPE/results"
|
||||
FROM_DIR="${RESULT_DIR}/${BA_SOURCE_BUILD_ENVIRONMENT}-${BUILD_TYPE}"
|
||||
TO_DIR="${RESULT_DIR}/${MY_BUILD_ENVIRONMENT}-${BUILD_TYPE}"
|
||||
echo "$FUNCNAME: FROM_DIR=$FROM_DIR"
|
||||
echo "$FUNCNAME: TO_DIR=$TO_DIR"
|
||||
echo "$FUNCNAME: MY_BUILD_ENVIRONMENT=$MY_BUILD_ENVIRONMENT"
|
||||
|
||||
# Fix patchs the use MY_BUILD_ENVIRONMENT
|
||||
if [ ! -d "$FROM_DIR" ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): Expected directory '$FROM_DIR' is missing."
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "$FUNCNAME: mv '$FROM_DIR' '$TO_DIR'"
|
||||
\mv "$FROM_DIR" "$TO_DIR"
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): mv '$FROM_DIR' '$TO_DIR'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local MY_WS_BT="$MY_WORKSPACE/$BUILD_TYPE"
|
||||
|
||||
# Replace signed rpms with non-signed copies .... we aren't a formal build
|
||||
for rpm_path_post_signing in $(find $MY_WS_BT/rpmbuild/RPMS -type f -name '*.rpm' | grep -v src.rpm); do
|
||||
|
||||
rpm_name=$(basename $rpm_path_post_signing)
|
||||
rpm_path_pre_signing=$(find $MY_WS_BT/results -name $rpm_name | head -n1)
|
||||
if [ "$rpm_path_pre_signing" != "" ]; then
|
||||
md5sum_post_signing=$(md5sum ${rpm_path_post_signing} | cut -d ' ' -f 1)
|
||||
md5sum_pre_signing=$(md5sum ${rpm_path_pre_signing} | cut -d ' ' -f 1)
|
||||
if [ "${md5sum_post_signing}" != "${md5sum_pre_signing}" ]; then
|
||||
echo "$FUNCNAME: fixing $rpm_name"
|
||||
\rm -f ${rpm_path_post_signing}
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): rm -f ${rpm_path_post_signing}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
\cp ${rpm_path_pre_signing} ${rpm_path_post_signing}
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): cp ${rpm_path_pre_signing} ${rpm_path_post_signing}"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
fi;
|
||||
done
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# build_avoidance <build-type>
|
||||
#
|
||||
# Look for a reference build that is applicable to our current git context.
|
||||
# and copy it to our local workspace, if we haven't already done so.
|
||||
#
|
||||
build_avoidance () {
|
||||
local BUILD_TYPE="$1"
|
||||
|
||||
echo "==== Build Avoidance Start ===="
|
||||
|
||||
export BUILD_AVOIDANCE_LAST_SYNC_FILE="$(build_avoidance_last_sync_file $BUILD_TYPE)"
|
||||
mkdir -p "$(dirname $BUILD_AVOIDANCE_LAST_SYNC_FILE)"
|
||||
|
||||
if [ "$BUILD_TYPE" == "" ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): BUILD_TYPE required"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ ! -d $MY_WORKSPACE/$BUILD_TYPE ]; then
|
||||
mkdir -p $MY_WORKSPACE/$BUILD_TYPE
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): Failed to create directory $MY_WORKSPACE/$BUILD_TYPE"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -L $MY_WORKSPACE/$BUILD_TYPE/repo ]; then
|
||||
ln -s $MY_REPO $MY_WORKSPACE/$BUILD_TYPE/repo
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): Failed to create symlink $MY_WORKSPACE/$BUILD_TYPE/repo -> $MY_REPO"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
build_avoidance_pre_clean $BUILD_TYPE
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): build_avoidance_pre_clean $BUILD_TYPE"
|
||||
return 1
|
||||
fi
|
||||
|
||||
build_avoidance_copy $BUILD_TYPE 'verbose'
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): build_avoidance_copy $BUILD_TYPE"
|
||||
return 1
|
||||
fi
|
||||
|
||||
build_avoidance_fixups $BUILD_TYPE
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error: $FUNCNAME (${LINENO}): build_avoidance_fixups $BUILD_TYPE"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "==== Build Avoidance Complete ===="
|
||||
return 0
|
||||
}
|
||||
|
||||
#
|
||||
# build_avoidance_save_reference_context
|
||||
#
|
||||
# For use by a reference build. Copy the 'CONTEXT' file
|
||||
# from the build into a central directory where we save
|
||||
# the context of old builds.
|
||||
#
|
||||
# Individual reference builds use:
|
||||
# MY_WORKSPACE=<common-dir>/<timestamp>
|
||||
# and context files are collected in dir:
|
||||
# DEST_CTX_DIR=<common-dir>/context
|
||||
# using name:
|
||||
# DEST_CTX=<timestamp>.context
|
||||
|
||||
build_avoidance_save_reference_context () {
|
||||
local DIR
|
||||
DIR=$(dirname "${MY_WORKSPACE}")
|
||||
|
||||
# Note: SUB_DIR should be a timestamp
|
||||
local SUB_DIR
|
||||
SUB_DIR=$(basename "${MY_WORKSPACE}")
|
||||
|
||||
local SRC_CTX="${MY_WORKSPACE}/CONTEXT"
|
||||
local DEST_CTX_DIR="${DIR}/context"
|
||||
local DEST_CTX="${DEST_CTX_DIR}/${SUB_DIR}.context"
|
||||
|
||||
if [ ! -f "${SRC_CTX}" ]; then
|
||||
echo "Context file not found at '${SRC_CTX}'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
mkdir -p "${DEST_CTX_DIR}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: $FUNCNAME (${LINENO}): Failed to create directory '${DEST_CTX_DIR}'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
cp "${SRC_CTX}" "${DEST_CTX}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: $FUNCNAME (${LINENO}): Failed to copy ${SRC_CTX} -> ${DEST_CTX}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
@ -3,21 +3,22 @@
|
||||
PRIVATE_REGISTRY_USERID=myuser
|
||||
PRIVATE_REGISTRY=xxx.xxx.xxx.xxx:9001
|
||||
VERSION=2018.11.13
|
||||
OS=centos
|
||||
OS=debian
|
||||
OS_VERSION=7.5.1804
|
||||
BUILD_STREAM=stable
|
||||
HOST_PORT=8088
|
||||
PUBLISH_URL=https://mirror.starlingx.windriver.com/mirror/starlingx/master/${OS}/monolithic/latest_build/
|
||||
|
||||
## Step 1: Build stx-centos
|
||||
## Step 1: Build stx-debian
|
||||
time $MY_REPO/build-tools/build-docker-images/build-stx-base.sh \
|
||||
--os ${OS} \
|
||||
--os-version ${OS_VERSION} \
|
||||
--version ${VERSION} \
|
||||
--user ${PRIVATE_REGISTRY_USERID} \
|
||||
--registry ${PRIVATE_REGISTRY} \
|
||||
--repo 'deb [trusted=yes check-valid-until=0] $PUBLISH_URL/inputs/packages ./'
|
||||
--repo 'deb [trusted=yes check-valid-until=0] $PUBLISH_URL/outputs/std/packages ./'
|
||||
--push \
|
||||
--repo stx-local-build,http://${HOSTNAME}:${HOST_PORT}/${MY_WORKSPACE}/std/rpmbuild/RPMS \
|
||||
--repo stx-mirror-distro,http://${HOSTNAME}:${HOST_PORT}/${MY_REPO}/cgcs-root/cgcs-${OS}-repo/Binary \
|
||||
--clean
|
||||
|
||||
|
||||
|
@ -1,2 +0,0 @@
|
||||
# one option per line, option=value
|
||||
repo=ussuri-wsgi,https://mirror.starlingx.windriver.com/mirror/centos/centos/mirror.centos.org/centos/7/sclo/x86_64/rh/
|
@ -1,2 +0,0 @@
|
||||
# one option per line, option=value
|
||||
repo=ussuri-wsgi,https://mirror.starlingx.windriver.com/mirror/centos/centos/mirror.centos.org/centos/7/sclo/x86_64/rh/
|
@ -18,7 +18,7 @@ if [ -z "${MY_WORKSPACE}" -o -z "${MY_REPO}" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SUPPORTED_OS_ARGS=('centos' 'debian')
|
||||
SUPPORTED_OS_ARGS=( 'debian' )
|
||||
OS= # default: autodetect
|
||||
OS_VERSION= # default: lookup "ARG RELEASE" in Dockerfile
|
||||
BUILD_STREAM=stable
|
||||
@ -52,7 +52,6 @@ Options:
|
||||
--version: Specify version for output image
|
||||
--stream: Build stream, stable or dev (default: stable)
|
||||
--repo: Software repository, can be specified multiple times
|
||||
* CentOS format: "NAME,BASEURL"
|
||||
* Debian format: "TYPE [OPTION=VALUE...] URL DISTRO COMPONENTS..."
|
||||
This will be added to /etc/apt/sources.list as is,
|
||||
see also sources.list(5) manpage.
|
||||
@ -281,13 +280,7 @@ fi
|
||||
|
||||
if [ ${#REPO_LIST[@]} -eq 0 ]; then
|
||||
# Either --repo or --local must be specified
|
||||
if [ "${LOCAL}" = "yes" ]; then
|
||||
if [[ "$OS" == "centos" ]] ; then
|
||||
REPO_LIST+=("local-std,http://${HOST}:8088${MY_WORKSPACE}/std/rpmbuild/RPMS")
|
||||
REPO_LIST+=("stx-distro,http://${HOST}:8089${MY_REPO}/cgcs-${OS}-repo/Binary")
|
||||
fi
|
||||
# debian is handled down below
|
||||
elif [ "${BUILD_STREAM}" != "dev" -a "${BUILD_STREAM}" != "master" ]; then
|
||||
if [ "${LOCAL}" != "yes" -a "${BUILD_STREAM}" != "dev" -a "${BUILD_STREAM}" != "master" ]; then
|
||||
echo "Either --local or --repo must be specified" >&2
|
||||
exit 1
|
||||
fi
|
||||
@ -314,33 +307,7 @@ fi
|
||||
cp ${SRC_DOCKERFILE} ${BUILDDIR}/Dockerfile
|
||||
|
||||
# Generate the stx.repo file
|
||||
if [[ "$OS" == "centos" ]] ; then
|
||||
STX_REPO_FILE=${BUILDDIR}/stx.repo
|
||||
for repo in ${REPO_LIST[@]}; do
|
||||
repo_name=$(echo $repo | awk -F, '{print $1}')
|
||||
repo_baseurl=$(echo $repo | awk -F, '{print $2}')
|
||||
|
||||
if [ -z "${repo_name}" -o -z "${repo_baseurl}" ]; then
|
||||
echo "Invalid repo specified: ${repo}" >&2
|
||||
echo "Expected format: name,baseurl" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cat >>${STX_REPO_FILE} <<EOF
|
||||
[${repo_name}]
|
||||
name=${repo_name}
|
||||
baseurl=${repo_baseurl}
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
skip_if_unavailable=1
|
||||
metadata_expire=0
|
||||
|
||||
EOF
|
||||
|
||||
REPO_OPTS="${REPO_OPTS} --enablerepo=${repo_name}"
|
||||
done
|
||||
else
|
||||
|
||||
if [[ "$OS" == "debian" ]] ; then
|
||||
# These env vars must be defined in debian builder pods
|
||||
for var in DEBIAN_SNAPSHOT DEBIAN_SECURITY_SNAPSHOT DEBIAN_DISTRIBUTION REPOMGR_DEPLOY_URL REPOMGR_ORIGIN ; do
|
||||
if [[ -z "${!var}" ]] ; then
|
||||
@ -413,9 +380,7 @@ IMAGE_NAME_LATEST=${DOCKER_REGISTRY}${DOCKER_USER}/stx-${OS}:${LATEST_TAG}
|
||||
|
||||
declare -a BUILD_ARGS
|
||||
BUILD_ARGS+=(--build-arg RELEASE=${OS_VERSION})
|
||||
if [[ "$OS" == "centos" ]] ; then
|
||||
BUILD_ARGS+=(--build-arg "REPO_OPTS=${REPO_OPTS}")
|
||||
else
|
||||
if [[ "$OS" == "debian" ]] ; then
|
||||
BUILD_ARGS+=(--build-arg "DIST=${DEBIAN_DISTRIBUTION}")
|
||||
fi
|
||||
|
||||
|
@ -52,7 +52,7 @@ new file mode 100755
|
||||
index 0000000..dd43612
|
||||
--- /dev/null
|
||||
+++ b/stx-scripts/setup-package-repos.sh
|
||||
@@ -0,0 +1,126 @@
|
||||
@@ -0,0 +1,88 @@
|
||||
+#!/bin/bash
|
||||
+
|
||||
+set -ex
|
||||
@ -60,11 +60,7 @@ index 0000000..dd43612
|
||||
+#
|
||||
+# This script enables or disables package repos specified
|
||||
+# by the DIST_REPOS environment variable, which must contain
|
||||
+# a space-separated list of repos (in CentOS) or list files
|
||||
+# (Debian) to enable or disable.
|
||||
+#
|
||||
+# In CentOS repo names refer to the names in square brackets
|
||||
+# in any repo files under /etc/yum.repos.d.
|
||||
+# a list files (Debian) to enable or disable.
|
||||
+#
|
||||
+# In Debian repo names refer to individual files under
|
||||
+# /etc/apt/sources.list.d/$NAME.list.
|
||||
@ -80,8 +76,7 @@ index 0000000..dd43612
|
||||
+# repo, and any repo's passed on the command-line
|
||||
+# to "build-stx-image.sh" script.
|
||||
+#
|
||||
+# OS - same as "base updates extras" in CentOS
|
||||
+# same as "debian" in Debian
|
||||
+# OS - same as "debian" in Debian
|
||||
+#
|
||||
+#
|
||||
+# These keywords have the same meaning in all distros, while actual
|
||||
@ -93,15 +88,6 @@ index 0000000..dd43612
|
||||
+# If a repo doesn't match an existing repository, this script will
|
||||
+# fail.
|
||||
+#
|
||||
+# CentOS Example
|
||||
+# ==============
|
||||
+# DIST_REPOS="-base -updates"
|
||||
+# disable "base" and "updates" repos normally defined
|
||||
+# in /etc/yum.repos.d/CentOS-Base.repo
|
||||
+#
|
||||
+# DIST_REPOS="-STX +OS -updates"
|
||||
+# disable all local repos, enable core OS repos, except "updates"
|
||||
+#
|
||||
+# Debian Example
|
||||
+# ==============
|
||||
+# DIST_REPOS="debian"
|
||||
@ -119,11 +105,6 @@ index 0000000..dd43612
|
||||
+ [OS]="debian"
|
||||
+ [STX]="stx"
|
||||
+ )
|
||||
+ # yum repo IDs
|
||||
+ declare -A CENTOS_REPO_GROUPS=(
|
||||
+ [OS]="base updates extras"
|
||||
+ [STX]="/etc/yum.repos.d/stx.repo" # ie, all repos defined in this file
|
||||
+ )
|
||||
+
|
||||
+ distro=$(awk -F= '/^ID=/ {gsub(/\"/, "", $2); print $2}' /etc/*release)
|
||||
+ # enable or disable each repo
|
||||
@ -153,25 +134,6 @@ index 0000000..dd43612
|
||||
+ fi
|
||||
+ done
|
||||
+ ;;
|
||||
+ centos)
|
||||
+ specs="${CENTOS_REPO_GROUPS[$base]:-$base}"
|
||||
+ for spec in $specs ; do
|
||||
+ # repo id begins with a "/" - assume its a full path to a .repo file
|
||||
+ # and enable/disable all repos defined in that file
|
||||
+ if [[ "${spec#/}" != "$spec" ]] ; then
|
||||
+ repos=$(sed -r -n 's/^\s*[[]([^]]+)[]]\s*$/\1/gp' "$spec")
|
||||
+ else
|
||||
+ repos=$spec
|
||||
+ fi
|
||||
+ for repo in $repos ; do
|
||||
+ if [[ $enable -eq 1 ]] ; then
|
||||
+ yum-config-manager --enable "$repo"
|
||||
+ else
|
||||
+ yum-config-manager --disable "$repo"
|
||||
+ fi
|
||||
+ done
|
||||
+ done
|
||||
+ ;;
|
||||
+ *)
|
||||
+ echo "error: unsupported OS \"$distro\"" >&2
|
||||
+ exit 1
|
||||
|
@ -1,16 +0,0 @@
|
||||
# Expected build arguments:
|
||||
# RELEASE: centos release
|
||||
#
|
||||
ARG RELEASE=7.5.1804
|
||||
FROM centos:${RELEASE}
|
||||
|
||||
RUN set -ex ;\
|
||||
sed -i '/\[main\]/ atimeout=120' /etc/yum.conf ;\
|
||||
yum install -y centos-release-openstack-stein ;\
|
||||
rm -rf \
|
||||
/var/log/* \
|
||||
/tmp/* \
|
||||
/var/tmp/*
|
||||
|
||||
# root CA cert expired on October 1st, 2021
|
||||
RUN yum update -y ca-certificates
|
@ -1,31 +0,0 @@
|
||||
# Expected build arguments:
|
||||
# RELEASE: centos release
|
||||
# REPO_OPTS: yum options to enable StarlingX repo
|
||||
#
|
||||
ARG RELEASE=7.5.1804
|
||||
FROM centos:${RELEASE}
|
||||
|
||||
ARG REPO_OPTS
|
||||
|
||||
# The stx.repo file must be generated by the build tool first
|
||||
COPY stx.repo /
|
||||
|
||||
RUN set -ex ;\
|
||||
sed -i '/\[main\]/ atimeout=120' /etc/yum.conf ;\
|
||||
mv /stx.repo /etc/yum.repos.d/ ;\
|
||||
yum upgrade --disablerepo=* ${REPO_OPTS} -y ;\
|
||||
yum install --disablerepo=* ${REPO_OPTS} -y \
|
||||
qemu-img \
|
||||
openssh-clients \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-wheel \
|
||||
rh-python36-mod_wsgi \
|
||||
;\
|
||||
rm -rf \
|
||||
/var/log/* \
|
||||
/tmp/* \
|
||||
/var/tmp/*
|
||||
|
||||
# root CA cert expired on October 1st, 2021
|
||||
RUN yum update -y ca-certificates
|
@ -58,7 +58,7 @@ Options:
|
||||
--module-src: Specify path to module source to install/update (dir or git repo)
|
||||
Formats: dir[|version]
|
||||
url[|branch][|version]
|
||||
--pkg: Specify path to distro package to install/update (ie. rpm)
|
||||
--pkg: Specify path to distro package to install/update (ie. deb)
|
||||
--customize: Customization script
|
||||
--extra: Extra file (to be accessible to customization script)
|
||||
--push: Push to docker repo
|
||||
|
@ -1,412 +0,0 @@
|
||||
#!/bin/env bash
|
||||
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# Build the tis-centos-image.img or tis-centos-image-rt.img file
|
||||
#
|
||||
|
||||
BUILD_GUEST_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
|
||||
source "${BUILD_GUEST_DIR}/image-utils.sh"
|
||||
|
||||
PROGNAME=$(basename "$0")
|
||||
|
||||
# NOTE: TMP_DIR must end in '/'
|
||||
# NOTE: /tmp/ is now tmpfs like. Can't be trusted across multiple mock commands
|
||||
# TMP_DIR=/tmp/
|
||||
TMP_DIR=/
|
||||
|
||||
# Use RPMs from the std build only, for now
|
||||
export BUILD_TYPE=std
|
||||
export MY_BUILD_DIR_TOP=$MY_BUILD_DIR
|
||||
|
||||
function init_vars {
|
||||
# Output path (current dir unless MY_WORKSPACE defined)
|
||||
OUTPUT_DIR="$PWD/export"
|
||||
if [ ! -z "$MY_WORKSPACE" ] && [ -d "$MY_WORKSPACE" ] ; then
|
||||
OUTPUT_DIR="$MY_WORKSPACE/export"
|
||||
CGCS_REPO_DIR="$MY_WORKSPACE/rpmbuild/RPMS"
|
||||
fi
|
||||
|
||||
if [ -n "$MY_GUEST_DIR" ]; then
|
||||
GUEST_DIR=$MY_GUEST_DIR
|
||||
else
|
||||
GUEST_DIR=$MY_WORKSPACE/guest
|
||||
fi
|
||||
|
||||
MOCK=/usr/bin/mock
|
||||
if [ $VERBOSE -eq 0 ]; then
|
||||
MOCK="$MOCK -q"
|
||||
fi
|
||||
|
||||
# Path to guest configuration
|
||||
GUEST_BUILD_DIR="${BUILD_GUEST_DIR}/build_guest"
|
||||
GUEST_BUILD_CMD=$GUEST_BUILD_DIR/build-guest-image.py
|
||||
if [ $VERBOSE -eq 1 ]; then
|
||||
GUEST_BUILD_CMD="$GUEST_BUILD_CMD -x"
|
||||
fi
|
||||
|
||||
if [ $BUILD_MODE == 'std' ]; then
|
||||
OUTPUT_FILE=$OUTPUT_DIR/tis-centos-guest.img
|
||||
elif [ $BUILD_MODE == 'rt' ]; then
|
||||
OUTPUT_FILE=$OUTPUT_DIR/tis-centos-guest-rt.img
|
||||
else
|
||||
printf " Error -- unknown BUILD_MODE '$BUILD_MODE'\n";
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function check_vars {
|
||||
# Where to store data
|
||||
printf "Finding cgcs-root\n"
|
||||
printf " Checking \$MY_REPO (value \"$MY_REPO\")\n"
|
||||
|
||||
if [ ! -z "$MY_REPO" ] && [ -d "$MY_REPO" ] ; then
|
||||
INTERNAL_REPO_ROOT=$MY_REPO
|
||||
printf " Found!\n"
|
||||
fi
|
||||
|
||||
if [ -z "$INTERNAL_REPO_ROOT" ] ; then
|
||||
printf " No joy -- checking \$MY_REPO_ROOT_DIR (value \"$MY_REPO_ROOT_DIR\")\n"
|
||||
if [ ! -z "$MY_REPO_ROOT_DIR" ] && [ -d "$MY_REPO_ROOT_DIR/cgcs-root" ] ; then
|
||||
INTERNAL_REPO_ROOT=$MY_REPO_ROOT_DIR/cgcs-root
|
||||
printf " Found!\n"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$INTERNAL_REPO_ROOT" ] ; then
|
||||
printf " No joy -- checking for \$MY_WORKSPACE/cgcs-root\n"
|
||||
if [ -d "$MY_WORKSPACE/cgcs-root" ] ; then
|
||||
INTERNAL_REPO_ROOT=$MY_WORKSPACE/cgcs-root
|
||||
printf " Found!\n"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$INTERNAL_REPO_ROOT" ] ; then
|
||||
printf " Error -- could not locate cgcs-root repo.\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
STX_DIR=$INTERNAL_REPO_ROOT/stx
|
||||
|
||||
if [ "x$MY_BUILD_CFG" == "x" ];then
|
||||
printf " Error -- reqiure MY_BUILD_CFG to be defined.\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
RELEASE_INFO="$(get_release_info)"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "WARNING: failed to find a release info file."
|
||||
else
|
||||
export PLATFORM_RELEASE=$(source "$RELEASE_INFO" && echo $PLATFORM_RELEASE)
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
|
||||
function create_rootfs {
|
||||
printf "\nCreating guest file system\n"
|
||||
|
||||
mkdir -p $GUEST_DIR
|
||||
if [ $? -ne 0 ]; then
|
||||
printf " Error -- Could not create $GUEST_DIR\n";
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Place build-time environment variables in mock configuration
|
||||
GUEST_ENV="${MY_BUILD_ENVIRONMENT}-guest"
|
||||
GUEST_CFG=$GUEST_DIR/$MY_BUILD_ENVIRONMENT_FILE
|
||||
|
||||
MY_BUILD_ENVIRONMENT=$GUEST_ENV "${BUILD_GUEST_DIR}/modify-build-cfg" $GUEST_CFG
|
||||
if [ $? -ne 0 ]; then
|
||||
printf " Error -- Could not update $GUEST_CFG\n";
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Setup mock directories for the guest
|
||||
if [ -d /localdisk/loadbuild/mock ]; then
|
||||
LNK=/localdisk/loadbuild/mock/$GUEST_ENV
|
||||
if [ ! -L $LNK ]; then
|
||||
ln -s $GUEST_DIR $LNK
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -d /localdisk/loadbuild/mock-cache ]; then
|
||||
mkdir -p $GUEST_DIR/cache
|
||||
LNK=/localdisk/loadbuild/mock-cache/$GUEST_ENV
|
||||
if [ ! -L $LNK ]; then
|
||||
ln -s $GUEST_DIR/cache $LNK
|
||||
fi
|
||||
fi
|
||||
|
||||
# Setup mock chroot environment
|
||||
$MOCK -r $GUEST_CFG --clean && $MOCK -r $GUEST_CFG --init
|
||||
if [ $? -ne 0 ]; then
|
||||
printf " Error -- Failed to setup guest mock chroot\n";
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install the RPMs to the root filesystem
|
||||
|
||||
# Note that the "rt" build needs access to both local-std and local-rt repos
|
||||
local EXTRA_REPOS=""
|
||||
|
||||
if [ $BUILD_MODE == 'std' ]; then
|
||||
INC_RPM_LIST=$(grep -v '^#' ${GUEST_BUILD_DIR}/rpm-install-list.txt)
|
||||
TIS_RPM_LIST=$(image_inc_list guest std centos)
|
||||
elif [ $BUILD_MODE == 'rt' ]; then
|
||||
INC_RPM_LIST=$(grep -v '^#' ${GUEST_BUILD_DIR}/rpm-install-list-rt.txt)
|
||||
TIS_RPM_LIST=$(image_inc_list guest rt centos)
|
||||
EXTRA_REPOS="--enablerepo local-rt"
|
||||
else
|
||||
printf " Error -- unknown BUILD_MODE '$BUILD_MODE'\n";
|
||||
exit 1
|
||||
fi
|
||||
|
||||
$MOCK -r $GUEST_CFG ${EXTRA_REPOS} --install ${INC_RPM_LIST} ${TIS_RPM_LIST} "$@"
|
||||
if [ $? -ne 0 ]; then
|
||||
printf "=====\n"
|
||||
cat $GUEST_DIR/mock/result/root.log | sed -n '/Error:/,$p' | sed '/Child return code was:/q'
|
||||
printf "=====\n"
|
||||
printf " Error -- Failed to install RPM packages\n";
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make sure all requested packages are installed
|
||||
MISSING=$(
|
||||
extra_rpm_names="$(
|
||||
for p in "$@" ; do
|
||||
# skip URLs
|
||||
if [[ "$p" =~ :// ]] ; then
|
||||
continue
|
||||
fi
|
||||
# if it contains a slash or ends with .rpm, assume its a local file
|
||||
# and read its embedded package name
|
||||
if [[ "$p" =~ / || "$p" =~ [.]rpm$ ]] ; then
|
||||
rpm -q --qf '%{name}\n' -p "$p"
|
||||
# otherwise assume its a package name already
|
||||
else
|
||||
echo "$p"
|
||||
fi
|
||||
done
|
||||
)"
|
||||
$MOCK -r $GUEST_CFG --chroot -- rpm -q --whatprovides ${INC_RPM_LIST} ${TIS_RPM_LIST} $extra_rpm_names \
|
||||
| sed -n 's/^no package provides //p' \
|
||||
| sort -u
|
||||
)
|
||||
if [ -n "$MISSING" ]; then
|
||||
printf "=====\n"
|
||||
printf "WARNING: The following RPMs are missing or could not be installed:\n"
|
||||
local p
|
||||
for p in $MISSING ; do
|
||||
echo " [$p]"
|
||||
done
|
||||
printf "=====\n"
|
||||
fi
|
||||
|
||||
# Remove RPMs that are not required in image (pruned package list)
|
||||
# NOTE: these are automatically installed from the mock init not
|
||||
# through dependencies.
|
||||
EXC_RPM_LIST=$(grep -v '^#' ${GUEST_BUILD_DIR}/rpm-remove-list.txt)
|
||||
|
||||
$MOCK -r $GUEST_CFG --remove ${EXC_RPM_LIST}
|
||||
if [ $? -ne 0 ]; then
|
||||
printf " Error -- Failed to remove RPM packages\n";
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf " Done\n"
|
||||
}
|
||||
|
||||
|
||||
function update_rootfs {
|
||||
printf "\nCustomizing guest file system\n"
|
||||
|
||||
# Copy over skeleton configuration files
|
||||
for GUEST_ROOTFS in $GUEST_BUILD_DIR/rootfs $GUEST_BUILD_DIR/rootfs-$BUILD_MODE;
|
||||
do
|
||||
for f in $(cd $GUEST_ROOTFS && find . -type f | cut -c3-);
|
||||
do
|
||||
echo "$MOCK -r $GUEST_CFG --copyin $GUEST_ROOTFS/$f $f"
|
||||
$MOCK -r $GUEST_CFG --copyin $GUEST_ROOTFS/$f $f
|
||||
if [ $? -ne 0 ]; then
|
||||
printf " Error -- Failed to copyin file $f\n";
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
# Run the root file system setup script inside the chroot
|
||||
ROOTFS_SETUP=rootfs-setup.sh
|
||||
$MOCK -r $GUEST_CFG --copyin $GUEST_BUILD_DIR/$ROOTFS_SETUP $TMP_DIR && \
|
||||
if [ $BUILD_MODE == 'rt' ]; then
|
||||
ROOTFS_SETUP_CMD="$TMP_DIR$ROOTFS_SETUP --rt"
|
||||
elif [ $BUILD_MODE == 'std' ]; then
|
||||
ROOTFS_SETUP_CMD="$TMP_DIR$ROOTFS_SETUP --std"
|
||||
else
|
||||
ROOTFS_SETUP_CMD="$TMP_DIR$ROOTFS_SETUP"
|
||||
fi
|
||||
$MOCK -r $GUEST_CFG --chroot "$ROOTFS_SETUP_CMD"
|
||||
if [ $? -ne 0 ]; then
|
||||
printf " Error -- Failed to run guest $ROOTFS_SETUP\n";
|
||||
exit 1
|
||||
fi
|
||||
$MOCK -r $GUEST_CFG --chroot "rm -f $TMP_DIR$ROOTFS_SETUP"
|
||||
if [ $? -ne 0 ]; then
|
||||
printf " Error -- Failed to delete $ROOTFS_SETUP from guest\n";
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf " Done\n"
|
||||
}
|
||||
|
||||
|
||||
function build_image {
|
||||
# Build the image
|
||||
printf "\nBuilding guest image $OUTPUT_FILE\n"
|
||||
|
||||
mkdir -p $OUTPUT_DIR
|
||||
if [ $? -ne 0 ]; then
|
||||
printf " Error -- Could not create $OUTPUT_DIR\n";
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Build guest rootfs archive
|
||||
ROOTFS_SPACE=$((500*1024*1024))
|
||||
ROOTFS_TAR=rootfs.tar
|
||||
ROOTFS_EXCLUDE=rootfs-exclude.txt
|
||||
|
||||
$MOCK -r $GUEST_CFG --copyin $GUEST_BUILD_DIR/$ROOTFS_EXCLUDE $TMP_DIR
|
||||
$MOCK -r $GUEST_CFG --chroot -- tar -cf $TMP_DIR$ROOTFS_TAR -X $TMP_DIR$ROOTFS_EXCLUDE --exclude=$TMP_DIR$ROOTFS_TAR --numeric-owner /
|
||||
$MOCK -r $GUEST_CFG --copyout $TMP_DIR$ROOTFS_TAR $GUEST_DIR
|
||||
$MOCK -r $GUEST_CFG --chroot -- rm -f $TMP_DIR$ROOTFS_TAR
|
||||
|
||||
$GUEST_BUILD_CMD -i $GUEST_DIR/$ROOTFS_TAR -o $OUTPUT_FILE -s $ROOTFS_SPACE
|
||||
if [ $? -ne 0 ]; then
|
||||
printf " Error -- Failed to build guest image\n";
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf " Done\n"
|
||||
}
|
||||
|
||||
|
||||
function clean_guest {
|
||||
printf "\nCleaning the guest $GUEST_DIR\n"
|
||||
|
||||
if [ ! -e $GUEST_DIR ]; then
|
||||
printf " Done...nothing to do\n";
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Place build-time environment variables in mock configuration
|
||||
GUEST_ENV="${MY_BUILD_ENVIRONMENT}-guest"
|
||||
GUEST_CFG=$GUEST_DIR/$MY_BUILD_ENVIRONMENT_FILE
|
||||
|
||||
if [ ! -e $GUEST_CFG ]; then
|
||||
MY_BUILD_ENVIRONMENT=$GUEST_ENV "${BUILD_GUEST_DIR}/modify-build-cfg" $GUEST_CFG
|
||||
if [ $? -ne 0 ]; then
|
||||
printf " Error -- Could not update $GUEST_CFG\n";
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
$MOCK -r $GUEST_CFG --clean
|
||||
$MOCK -r $GUEST_CFG --scrub=cache
|
||||
|
||||
rm -rf $GUEST_DIR
|
||||
if [ $? -ne 0 ]; then
|
||||
printf " Error -- Failed to remove guest $GUEST_DIR\n";
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf " Done\n"
|
||||
}
|
||||
|
||||
#############################################
|
||||
# Main code
|
||||
#############################################
|
||||
|
||||
usage () {
|
||||
echo ""
|
||||
echo "Usage: "
|
||||
echo " build-guest [--rt | --std] [--verbose] [EXTRA_RPMS...]"
|
||||
echo " build-guest [--help]"
|
||||
echo " build-guest [--clean]"
|
||||
echo ""
|
||||
echo "EXTRA_RPMS are either package names or full RPM file paths"
|
||||
}
|
||||
|
||||
# Default argument values
|
||||
HELP=0
|
||||
CLEAN=0
|
||||
VERBOSE=0
|
||||
BUILD_MODE='std'
|
||||
|
||||
# read the options
|
||||
TEMP=`getopt -o h --long clean,rt,std,verbose,help -n "$PROGNAME" -- "$@"` || exit 1
|
||||
eval set -- "$TEMP"
|
||||
|
||||
# extract options and their arguments into variables.
|
||||
while true ; do
|
||||
case "$1" in
|
||||
-h|--help) HELP=1 ; shift ;;
|
||||
--clean) CLEAN=1 ; shift ;;
|
||||
--verbose) VERBOSE=1 ; shift ;;
|
||||
--rt) BUILD_MODE='rt' ; shift ;;
|
||||
--std) BUILD_MODE='std' ; shift ;;
|
||||
--) shift ; break ;;
|
||||
*) echo "Internal error!" ; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ $HELP -eq 1 ]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ $CLEAN -eq 1 && "$#" -gt 0 ]] ; then
|
||||
echo "Too many arguments!" >&2 ; exit 1
|
||||
else
|
||||
# make sure extra RPM files exist
|
||||
for p in "$@" ; do
|
||||
# skip URLs
|
||||
if [[ "$p" =~ :// ]] ; then
|
||||
continue
|
||||
fi
|
||||
# if it contains a slash or ends with .rpm assume its a local file name
|
||||
if [[ "$p" =~ / || "$p" =~ [.]rpm$ ]] ; then
|
||||
# make sure it exists and is an RPM file
|
||||
true <"$p" || exit 1
|
||||
if ! file --brief --mime-type "$p" | grep -q "^application/x-rpm$" ; then
|
||||
echo "$p: not an RPM file" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
unset p
|
||||
fi
|
||||
|
||||
(
|
||||
printf "\n*****************************\n"
|
||||
printf "Create Titanium Cloud/CentOS Guest Image\n"
|
||||
printf "*****************************\n\n"
|
||||
|
||||
init_vars
|
||||
check_vars
|
||||
|
||||
if [ $CLEAN -eq 1 ]; then
|
||||
clean_guest
|
||||
exit 0
|
||||
fi
|
||||
|
||||
create_rootfs "$@"
|
||||
update_rootfs
|
||||
build_image
|
||||
|
||||
) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' ; exit ${PIPESTATUS[0]}
|
@ -12,7 +12,7 @@
|
||||
BUILD_HELM_CHARTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
source $BUILD_HELM_CHARTS_DIR/utils.sh || exit 1
|
||||
|
||||
SUPPORTED_OS_ARGS=('centos' 'debian')
|
||||
SUPPORTED_OS_ARGS=('debian')
|
||||
OS=
|
||||
LABEL=""
|
||||
APP_NAME="stx-openstack"
|
||||
@ -35,7 +35,7 @@ Usage:
|
||||
$(basename $0) [--os <os>] [-a, --app <app-name>]
|
||||
[-A, --app-version-file /path/to/$APP_VERSION_BASE]
|
||||
[-B, --app-version <version>]
|
||||
[-r, --rpm <rpm-name>] [-i, --image-record <image-record>] [--label <label>]
|
||||
[--package <package-name>] [-i, --image-record <image-record>] [--label <label>]
|
||||
[-p, --patch-dependency <patch-dependency>] [ --verbose ]
|
||||
Options:
|
||||
--os:
|
||||
@ -55,13 +55,10 @@ Options:
|
||||
Specify application (tarball) version, this overrides any other
|
||||
version information.
|
||||
|
||||
-r, --package PACKAGE_NAME,... :
|
||||
--package PACKAGE_NAME,... :
|
||||
Top-level package(s) containing the helm chart(s), comma-separated.
|
||||
Default: ${APP_NAME}-helm
|
||||
|
||||
--rpm PACKAGE_NAME,... :
|
||||
(Deprecated) same as --package
|
||||
|
||||
-i, --image-record FILENAME :
|
||||
Specify the path to image record file(s) or url(s).
|
||||
Multiple files/urls can be specified with a comma-separated
|
||||
@ -136,18 +133,18 @@ function build_image_versions_to_armada_manifest {
|
||||
# <docker-registry>/<repository>/<repository>/.../<image-name>:<tag>
|
||||
#
|
||||
# An example of the content of an image record file:
|
||||
# e.g. images-centos-dev-latest.lst
|
||||
# docker.io/starlingx/stx-aodh:master-centos-dev-latest
|
||||
# docker.io/starlingx/stx-ceilometer:master-centos-dev-latest
|
||||
# docker.io/starlingx/stx-cinder:master-centos-dev-latest
|
||||
# e.g. images-debian-stable-latest.lst
|
||||
# docker.io/starlingx/stx-aodh:master-debian-stable-latest
|
||||
# docker.io/starlingx/stx-ceilometer:master-debian-stable-latest
|
||||
# docker.io/starlingx/stx-cinder:master-debian-stable-latest
|
||||
# ...
|
||||
#
|
||||
# An example of the usage of an image reference in manifest file:
|
||||
# e.g. manifest.yaml
|
||||
# images:
|
||||
# tags:
|
||||
# aodh_api: docker.io/starlingx/stx-aodh:master-centos-stable-latest
|
||||
# aodh_db_sync: docker.io/starlingx/stx-aodh:master-centos-stable-latest
|
||||
# aodh_api: docker.io/starlingx/stx-aodh:master-debian-stable-latest
|
||||
# aodh_db_sync: docker.io/starlingx/stx-aodh:master-debian-stable-latest
|
||||
# ...
|
||||
#
|
||||
# To replace the images in the manifest file with the images in image record file:
|
||||
@ -156,14 +153,14 @@ function build_image_versions_to_armada_manifest {
|
||||
# e.g. image_name = stx-aodh
|
||||
#
|
||||
# 2. search the image reference in manifest yaml via image_name
|
||||
# e.g. old_image_reference = docker.io/starlingx/stx-aodh:master-centos-stable-latest
|
||||
# e.g. old_image_reference = docker.io/starlingx/stx-aodh:master-debian-stable-latest
|
||||
#
|
||||
# 3. update the manifest file to replace the old image references with the new one
|
||||
# e.g. manifest.yaml
|
||||
# images:
|
||||
# tags:
|
||||
# aodh_api: docker.io/starlingx/stx-aodh:master-centos-dev-latest
|
||||
# aodh_db_sync: docker.io/starlingx/stx-aodh:master-centos-dev-latest
|
||||
# aodh_api: docker.io/starlingx/stx-aodh:master-debian-stable-latest
|
||||
# aodh_db_sync: docker.io/starlingx/stx-aodh:master-debian-stable-latest
|
||||
#
|
||||
image_record=${IMAGE_RECORD_PATH}/$(basename ${image_record})
|
||||
${PYTHON_2_OR_3} $BUILD_HELM_CHARTS_DIR/helm_chart_modify.py ${manifest_file} ${manifest_file}.tmp ${image_record}
|
||||
@ -188,18 +185,18 @@ function build_image_versions_to_fluxcd_manifests {
|
||||
# <docker-registry>/<repository>/<repository>/.../<image-name>:<tag>
|
||||
#
|
||||
# An example of the content of an image record file:
|
||||
# e.g. images-centos-dev-latest.lst
|
||||
# docker.io/starlingx/stx-aodh:master-centos-dev-latest
|
||||
# docker.io/starlingx/stx-ceilometer:master-centos-dev-latest
|
||||
# docker.io/starlingx/stx-cinder:master-centos-dev-latest
|
||||
# e.g. images-debian-stable-latest.lst
|
||||
# docker.io/starlingx/stx-aodh:master-debian-stable-latest
|
||||
# docker.io/starlingx/stx-ceilometer:master-debian-stable-latest
|
||||
# docker.io/starlingx/stx-cinder:master-debian-stable-latest
|
||||
# ...
|
||||
#
|
||||
# An example of the usage of an image reference in manifest file:
|
||||
# e.g. manifest.yaml
|
||||
# images:
|
||||
# tags:
|
||||
# aodh_api: docker.io/starlingx/stx-aodh:master-centos-stable-latest
|
||||
# aodh_db_sync: docker.io/starlingx/stx-aodh:master-centos-stable-latest
|
||||
# aodh_api: docker.io/starlingx/stx-aodh:master-debian-stable-latest
|
||||
# aodh_db_sync: docker.io/starlingx/stx-aodh:master-debian-stable-latest
|
||||
# ...
|
||||
#
|
||||
# To replace the images in the manifest file with the images in image record file:
|
||||
@ -208,14 +205,14 @@ function build_image_versions_to_fluxcd_manifests {
|
||||
# e.g. image_name = stx-aodh
|
||||
#
|
||||
# 2. search the image reference in manifest yaml via image_name
|
||||
# e.g. old_image_reference = docker.io/starlingx/stx-aodh:master-centos-stable-latest
|
||||
# e.g. old_image_reference = docker.io/starlingx/stx-aodh:master-debian-stable-latest
|
||||
#
|
||||
# 3. update the manifest file to replace the old image references with the new one
|
||||
# e.g. manifest.yaml
|
||||
# images:
|
||||
# tags:
|
||||
# aodh_api: docker.io/starlingx/stx-aodh:master-centos-dev-latest
|
||||
# aodh_db_sync: docker.io/starlingx/stx-aodh:master-centos-dev-latest
|
||||
# aodh_api: docker.io/starlingx/stx-aodh:master-debian-stable-latest
|
||||
# aodh_db_sync: docker.io/starlingx/stx-aodh:master-debian-stable-latest
|
||||
#
|
||||
image_record=${IMAGE_RECORD_PATH}/$(basename ${image_record})
|
||||
find ${manifest_folder} -name "*.yaml" | while read manifest_file; do
|
||||
@ -435,23 +432,7 @@ filter_existing_dirs() {
|
||||
function find_package_files {
|
||||
local -a dirlist
|
||||
local dir
|
||||
if [[ "$OS" == "centos" ]] ; then
|
||||
local centos_repo="${MY_REPO}/centos-repo"
|
||||
if [[ ! -d "${centos_repo}" ]] ; then
|
||||
centos_repo="${MY_REPO}/cgcs-centos-repo"
|
||||
if [[ ! -d "${centos_repo}" ]] ; then
|
||||
echo "ERROR: directory ${MY_REPO}/centos-repo not found." >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
readarray -t dirlist < <(filter_existing_dirs \
|
||||
"${MY_WORKSPACE}/std/rpmbuild/RPMS" \
|
||||
"${centos_repo}/Binary/noarch")
|
||||
if [[ "${#dirlist[@]}" -gt 0 ]] ; then
|
||||
echo "looking for packages in ${dirlist[*]}" >&2
|
||||
find "${dirlist[@]}" -xtype f -name "*.tis.noarch.rpm"
|
||||
fi
|
||||
else
|
||||
if [[ "$OS" == "debian" ]] ; then
|
||||
# FIXME: can't search 3rd-party binary debs because they are not accessible
|
||||
# on the filesystem, but only as remote files in apt repos
|
||||
readarray -t dirlist < <(filter_existing_dirs "${MY_WORKSPACE}/std")
|
||||
@ -491,9 +472,7 @@ function find_helm_chart_package_files {
|
||||
local failed=0
|
||||
for package_file in $(find_package_files) ; do
|
||||
package_name="$(
|
||||
if [[ "$OS" == "centos" ]] ; then
|
||||
rpm_get_name "$package_file" || exit 1
|
||||
else
|
||||
if [[ "$OS" == "debian" ]] ; then
|
||||
deb_get_control "$package_file" | deb_get_field "Package"
|
||||
check_pipe_status
|
||||
fi
|
||||
@ -537,10 +516,7 @@ function find_helm_chart_package_files {
|
||||
fi
|
||||
|
||||
local -a dep_package_names=($(
|
||||
if [[ "$OS" == "centos" ]] ; then
|
||||
rpm -qRp "$package_file" | sed 's/rpmlib([a-zA-Z0-9]*)[[:space:]]\?[><=!]\{0,2\}[[:space:]]\?[0-9.-]*//g' | grep -E -v -e '/' -e '^\s*$'
|
||||
check_pipe_status || exit 1
|
||||
else
|
||||
if [[ "$OS" == "debian" ]] ; then
|
||||
deb_get_control "$package_file" | deb_get_simple_depends
|
||||
check_pipe_status || exit 1
|
||||
fi
|
||||
@ -591,14 +567,6 @@ function extract_chart_from_package {
|
||||
local package_file=$1
|
||||
echo "extracting charts from package $package_file" >&2
|
||||
case $OS in
|
||||
centos)
|
||||
rpm2cpio "$package_file" | cpio ${CPIO_FLAGS}
|
||||
if ! check_pipe_status ; then
|
||||
echo "Failed to extract content of helm package: ${package_file}" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
|
||||
debian)
|
||||
deb_extract_content "$package_file" $([[ "$VERBOSE" == "true" ]] && echo --verbose || true)
|
||||
if ! check_pipe_status ; then
|
||||
@ -671,10 +639,7 @@ function get_app_version {
|
||||
echo "extracting version from $1" >&2
|
||||
local app_version
|
||||
app_version="$(
|
||||
if [[ "$OS" == "centos" ]] ; then
|
||||
rpm -q --qf '%{VERSION}-%{RELEASE}' -p "$1" | sed 's![.]tis!!g'
|
||||
check_pipe_status || exit 1
|
||||
else
|
||||
if [[ "$OS" == "debian" ]] ; then
|
||||
control="$(deb_get_control "$1")" || exit 1
|
||||
version="$(echo "$control" | deb_get_field "Version" | sed -r -e 's/^[^:]+:+//')"
|
||||
if [[ -z "$version" ]] ; then
|
||||
@ -689,7 +654,7 @@ function get_app_version {
|
||||
}
|
||||
|
||||
# TODO(awang): remove the deprecated image-file option
|
||||
OPTS=$(getopt -o h,a:,A:,B:,r:,i:,l:,p: -l help,os:,app:,app-version-file:,app-version:,rpm:,package:,image-record:,image-file:,label:,patch-dependency:,verbose -- "$@")
|
||||
OPTS=$(getopt -o h,a:,A:,B:,i:,l:,p: -l help,os:,app:,app-version-file:,app-version:,package:,image-record:,image-file:,label:,patch-dependency:,verbose -- "$@")
|
||||
if [ $? -ne 0 ]; then
|
||||
usage
|
||||
exit 1
|
||||
@ -720,10 +685,7 @@ while true; do
|
||||
APP_VERSION="$2"
|
||||
shift 2
|
||||
;;
|
||||
-r | --rpm | --package)
|
||||
if [[ "$1" == "--rpm" ]] ; then
|
||||
echo "WARNING: option $1 is deprecated, use --package instead" >&2
|
||||
fi
|
||||
--package)
|
||||
APP_PACKAGES+=(${2//,/ })
|
||||
shift 2
|
||||
;;
|
||||
@ -770,8 +732,6 @@ if [ -z "$OS" ] ; then
|
||||
if [[ -z "$OS" ]] ; then
|
||||
echo "Unable to determine OS, please re-run with \`--os' option" >&2
|
||||
exit 1
|
||||
elif [[ "$OS" != "debian" ]] ; then
|
||||
OS="centos"
|
||||
fi
|
||||
fi
|
||||
VALID_OS=1
|
||||
@ -810,10 +770,8 @@ function find_python_2_or_3 {
|
||||
}
|
||||
PYTHON_2_OR_3="$(find_python_2_or_3)" || exit 1
|
||||
|
||||
# include SRPM utils
|
||||
if [[ "$OS" == "centos" ]] ; then
|
||||
source $BUILD_HELM_CHARTS_DIR/srpm-utils || exit 1
|
||||
else
|
||||
# include packaging utils
|
||||
if [[ "$OS" == "debian" ]] ; then
|
||||
source $BUILD_HELM_CHARTS_DIR/deb-utils.sh || exit 1
|
||||
fi
|
||||
|
||||
|
@ -1,638 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
PROGNAME=$(basename "$0")
|
||||
FORCE=0
|
||||
AUTO_MODE=
|
||||
IMG_SIZE=
|
||||
BOOTIMAGE_ISO=
|
||||
GRAPHICAL_SUFFIX=
|
||||
IMG_FILE=
|
||||
AUTO_ISO=
|
||||
DHCPV6C=yes
|
||||
OAM_DEV=ens3
|
||||
IPV4_GW_ADDR=
|
||||
IPV6_GW_ADDR=
|
||||
AWS_COMPATIBLE=0
|
||||
declare -A PASSWORDS
|
||||
: KVM=
|
||||
KVM_OPTS=()
|
||||
TEMPFILES_DIR=
|
||||
SUDO=0
|
||||
GRAPHICAL=0
|
||||
TTY_SETTINGS=
|
||||
RPM_ADDON_LIST=()
|
||||
|
||||
# Print out the help message
|
||||
usage() {
|
||||
echo "\
|
||||
Usage: $0 OPTIONS...
|
||||
Create a QCOW2/QEMU image with StarlingX pre-installed
|
||||
|
||||
-f,--force overwrite output file if it exists
|
||||
|
||||
-m,--mode={controller|aio|aio_lowlatency}
|
||||
create a controller or an all-in-one/low latency system
|
||||
(default: aio)
|
||||
|
||||
--sudo Use sudo to mount the ISO, rather than udisks
|
||||
|
||||
-s,--size=nnnG image file size, must end with "G" (default: 500G)
|
||||
|
||||
-g,--graphical create a graphical installation, rather than console
|
||||
|
||||
-e,--oam-dev=OAM_DEV
|
||||
OAM network device (default: ens3)
|
||||
|
||||
-4,--ipv4 don't configure IPv6 in the generated image
|
||||
|
||||
-w,--ipv4-default-gateway=GW_IPV4_ADDR
|
||||
Add a default IPv4 route via this gateway address
|
||||
|
||||
-W,--ipv6-default-gateway=GW_IPV6_ADDR
|
||||
Add a default IPv6 route via this gateway address
|
||||
|
||||
-p,--password=USER:PASSWORD
|
||||
Unlock USER account and set its password in the generated
|
||||
image.
|
||||
USER must exist -- e.g., root, sysadmin.
|
||||
This option may be repeated.
|
||||
|
||||
WARNING: this option is not recommended because the
|
||||
password will be visible to anyone listing the
|
||||
processes. Use \`--passwords-from' instead.
|
||||
|
||||
-P,--passwords-from=PASSWORD_FILE
|
||||
Unlock and set passwords of each user account from
|
||||
PASSWORD_FILE, which must contain one or more lines
|
||||
of the form
|
||||
|
||||
USER:PASSWORD
|
||||
|
||||
USERs must exist -- e.g., root, sysadmin.
|
||||
|
||||
-S,--passwords-from-stdin
|
||||
Same as \`--passwords-from=/dev/stdin'
|
||||
|
||||
-i,--iso=BOOTIMAGE_ISO
|
||||
use this iso file as input, it must have been generated
|
||||
by build-iso with default options
|
||||
(default: \$MY_WORKSPACE/export/bootimage.iso)
|
||||
|
||||
-o,--output=IMG_FILE
|
||||
output image file name
|
||||
Default:
|
||||
\$MY_WORKSPACE/export/stx_\${MODE}.qcow2)
|
||||
Default with --graphical:
|
||||
\$MY_WORKSPACE/export/stx_\${MODE}_graphical.qcow2)
|
||||
|
||||
--aws
|
||||
Prepare an image that can be loaded onto an AWS EC2
|
||||
instance
|
||||
--addon
|
||||
Specify additional rpms to add to the qcow2 image
|
||||
|
||||
ENVIRONMENT
|
||||
|
||||
MY_REPO source repo directory
|
||||
MY_WORKSPACE build workspace directory
|
||||
KVM path to kvm executable (default: auto)
|
||||
"
|
||||
}
|
||||
|
||||
# Delete temporary files
|
||||
cleanup() {
|
||||
# QEMU changes terminal settings, restore them before exiting
|
||||
[[ -z $TTY_SETTINGS ]] || stty "$TTY_SETTINGS" <&1
|
||||
# remove temporary files
|
||||
rm -rf "$TEMPFILES_DIR"
|
||||
rm -f "$IMG_FILE.tmp"
|
||||
}
|
||||
|
||||
# Clean up before exiting due to a signal
|
||||
handle_sig() {
|
||||
trap - EXIT
|
||||
cleanup
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Clean up before normal exit
|
||||
handle_exit() {
|
||||
local rv="$?"
|
||||
trap - EXIT
|
||||
cleanup
|
||||
exit $rv
|
||||
}
|
||||
|
||||
# Print out an error message
|
||||
error() {
|
||||
echo "$PROGNAME: error: $*" >&2
|
||||
}
|
||||
|
||||
# Print out an error message and exit
|
||||
die() {
|
||||
error "$*"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Print out a command-line error message and exit
|
||||
cmdline_error() {
|
||||
if [ "$#" -gt 0 ] ; then
|
||||
error "$*"
|
||||
fi
|
||||
echo "Type \`$0 --help' for more info." >&2
|
||||
exit 2
|
||||
}
|
||||
|
||||
# Encrypt a password for /etc/passwd
|
||||
encrypt_password() {
|
||||
export ARG="$1"
|
||||
python -c '
|
||||
import crypt, os, binascii, sys
|
||||
salt = binascii.b2a_hex(os.urandom (8)).decode("ascii")
|
||||
encrypted = crypt.crypt (os.environ["ARG"], "$5$" + salt + "$")
|
||||
print (encrypted)
|
||||
' "$1"
|
||||
local status="$?"
|
||||
unset ARG
|
||||
[[ $status -eq 0 ]] || exit 1
|
||||
}
|
||||
|
||||
# Save username/password to $PASSWORDS
|
||||
save_password() {
|
||||
local passwd_str="$1"
|
||||
local error_prefix="$2"
|
||||
if [[ ! $passwd_str =~ : ]] ; then
|
||||
error "${error_prefix}expecting USER:PASSWORD"
|
||||
return 1
|
||||
fi
|
||||
local user="${passwd_str%%:*}"
|
||||
local passwd="${passwd_str#*:}"
|
||||
if [[ -z $user || -z $passwd ]] ; then
|
||||
error "${error_prefix}expecting USER:PASSWORD"
|
||||
return 1
|
||||
fi
|
||||
if [[ $user =~ [^a-zA-Z0-9._-] ]] ; then
|
||||
error "${error_prefix}username must only contain characters [a-zA-Z0-9._-]"
|
||||
return 1
|
||||
fi
|
||||
PASSWORDS[$user]="$passwd"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Read passwords from file or STDIN
|
||||
read_passwords() {
|
||||
local filename="$1"
|
||||
local -i lineno=0
|
||||
local numchar="#"
|
||||
# Open password file or STDIN as file descriptor 3
|
||||
if [[ -z $filename || $filename == - ]] ; then
|
||||
filename=STDIN
|
||||
exec 3<&0 || exit 1
|
||||
else
|
||||
exec 3<"$filename" || exit 1
|
||||
fi
|
||||
while read line <&3 ; do
|
||||
let lineno++
|
||||
# skip empty lines and comments
|
||||
# ${numchar} is "#" to avoid tripping up VI's syntax highlighting
|
||||
if [[ ! $line =~ ^[[:space:]]*(${numchar}.*)?*$ ]] ; then
|
||||
save_password "$line" "$filename:$lineno: " || exit 1
|
||||
fi
|
||||
done
|
||||
# close file descriptor 3
|
||||
exec 3<&-
|
||||
}
|
||||
|
||||
# Check if an IPv4 address is valid
|
||||
is_ipv4_addr() {
|
||||
# make sure we have python
|
||||
python -c 'import socket' || exit 1
|
||||
# parse the address via python
|
||||
python -c 'import socket,sys;socket.inet_aton(sys.argv[1])' "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Check if an IPv6 address is valid
|
||||
is_ipv6_addr() {
|
||||
# make sure we have python
|
||||
python -c 'import socket' || exit 1
|
||||
# parse the address via python
|
||||
python -c 'import socket,sys;socket.inet_pton(socket.AF_INET6,sys.argv[1])' "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# find QEMU/KVM
|
||||
find_kvm() {
|
||||
local kvm
|
||||
if [[ -n "$KVM" ]] ; then
|
||||
kvm=$(which "$KVM")
|
||||
[[ -n $kvm ]] || exit 1
|
||||
else
|
||||
for kvm_basename in qemu-kvm kvm ; do
|
||||
kvm=$(export PATH=$PATH:/usr/bin:/usr/libexec ; which $kvm_basename 2>/dev/null || :)
|
||||
[[ -n $kvm ]] && break || :
|
||||
done
|
||||
[[ -n $kvm ]] || die "unable to find kvm executable"
|
||||
fi
|
||||
KVM="$kvm"
|
||||
if [[ -c /dev/kvm ]] ; then
|
||||
KVM_OPTS+=("-enable-kvm")
|
||||
fi
|
||||
}
|
||||
|
||||
# Perform setup work for an image to run on AWS
|
||||
# Create config files for adding ENA driver module, network scripts, and for
|
||||
# regenerating a generic initramfs image
|
||||
add_aws_setup(){
|
||||
local ks_addon=$1
|
||||
AWS_OAM_IF=ens5
|
||||
AWS_MGMT_IF=ens6
|
||||
cat >>"$ks_addon" <<_END
|
||||
|
||||
# Comment out deprecated virtio by-path rules to avoid duplicate symlinks
|
||||
sed -i 's/^\(KERNEL.*disk\/by-path\/virtio\)/#\1/' /usr/lib/udev/rules.d/60-persistent-storage.rules
|
||||
|
||||
cat >/etc/modules-load.d/ena.conf <<END
|
||||
ena
|
||||
END
|
||||
|
||||
cat >/etc/dracut.conf.d/add-ena.conf <<END
|
||||
add_drivers+=" ena "
|
||||
END
|
||||
|
||||
cat >/etc/dracut.conf.d/no-hostonly.conf <<END
|
||||
hostonly="no"
|
||||
END
|
||||
|
||||
cat >/etc/sysconfig/network-scripts/ifcfg-${AWS_OAM_IF} <<END
|
||||
DEVICE=${AWS_OAM_IF}
|
||||
BOOTPROTO=dhcp
|
||||
ONBOOT=yes
|
||||
TYPE=Ethernet
|
||||
USERCTL=yes
|
||||
PEERDNS=yes
|
||||
DHCPV6C=yes
|
||||
DHCPV6C_OPTIONS=-nw
|
||||
PERSISTENT_DHCLIENT=yes
|
||||
RES_OPTIONS="timeout:2 attempts:5"
|
||||
DHCP_ARP_CHECK=no
|
||||
END
|
||||
|
||||
cat >/etc/sysconfig/network-scripts/ifcfg-${AWS_MGMT_IF} <<END
|
||||
DEVICE=${AWS_MGMT_IF}
|
||||
BOOTPROTO=dhcp
|
||||
ONBOOT=yes
|
||||
TYPE=Ethernet
|
||||
USERCTL=yes
|
||||
PEERDNS=yes
|
||||
DHCPV6C=yes
|
||||
DHCPV6C_OPTIONS=-nw
|
||||
PERSISTENT_DHCLIENT=yes
|
||||
RES_OPTIONS="timeout:2 attempts:5"
|
||||
DHCP_ARP_CHECK=no
|
||||
END
|
||||
|
||||
if [ ! -d /var/tmp ]; then
|
||||
mkdir -m 1777 /var/tmp
|
||||
fi
|
||||
|
||||
KERNEL_VERSION=\$(rpm -q kernel --qf '%{version}-%{release}.%{arch}')
|
||||
/sbin/dracut -f /boot/initramfs-\$KERNEL_VERSION.img \$KERNEL_VERSION
|
||||
_END
|
||||
}
|
||||
|
||||
# Process command line
|
||||
init() {
|
||||
local temp
|
||||
temp=$(getopt -o hf4w:W:e:p:P:Sm:gs:i:o: --long help,force,ipv4,ipv4-default-gateway:,ipv6-default-gateway:,oam-dev:,password:,passwords-from:,passwords-from-stdin,mode:,graphical,sudo,size:,iso:,output:,aws,addon: -n "$PROGNAME" -- "$@") || cmdline_error
|
||||
eval set -- "$temp"
|
||||
while true ; do
|
||||
case "$1" in
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-f|--force)
|
||||
FORCE=1
|
||||
shift
|
||||
;;
|
||||
-4|--ipv4)
|
||||
DHCPV6C=no
|
||||
shift
|
||||
;;
|
||||
-w|--ipv4-default-gateway)
|
||||
is_ipv4_addr "$2" || cmdline_error "invalid IP address \`$2'"
|
||||
IPV4_GW_ADDR="$2"
|
||||
shift 2
|
||||
;;
|
||||
-W|--ipv6-default-gateway)
|
||||
is_ipv6_addr "$2" || cmdline_error "invalid IP address \`$2'"
|
||||
IPV6_GW_ADDR="$2"
|
||||
shift 2
|
||||
;;
|
||||
-e|--oam-dev)
|
||||
OAM_DEV="$2"
|
||||
shift 2
|
||||
;;
|
||||
-P|--passwords-from)
|
||||
read_passwords "$2"
|
||||
shift 2
|
||||
;;
|
||||
-S|--passwords-from-stdin)
|
||||
read_passwords -
|
||||
shift
|
||||
;;
|
||||
-p|--password)
|
||||
save_password "$2" "invalid $1: " || cmdline_error
|
||||
shift 2
|
||||
;;
|
||||
-m|--mode)
|
||||
[[ "$2" =~ ^(controller|aio|aio_lowlatency)$ ]] || cmdline_error "invalid --mode"
|
||||
AUTO_MODE="$2"
|
||||
shift 2
|
||||
;;
|
||||
-g|--graphical)
|
||||
GRAPHICAL=1
|
||||
GRAPHICAL_SUFFIX=_graphical
|
||||
shift
|
||||
;;
|
||||
--sudo)
|
||||
SUDO=1
|
||||
shift
|
||||
;;
|
||||
-s|--size)
|
||||
[[ $2 =~ ^[0-9]{1,5}G$ ]] || cmdline_error "invalid --size"
|
||||
IMG_SIZE="$2"
|
||||
shift 2
|
||||
;;
|
||||
-i|--iso)
|
||||
BOOTIMAGE_ISO="$2"
|
||||
shift 2
|
||||
;;
|
||||
-o|--output)
|
||||
IMG_FILE="$2"
|
||||
shift 2
|
||||
;;
|
||||
--aws)
|
||||
AWS_COMPATIBLE=1
|
||||
shift
|
||||
;;
|
||||
--addon)
|
||||
RPM_ADDON_LIST+=("$2")
|
||||
shift 2
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
-?*)
|
||||
cmdline_error
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
[[ $# -le 0 ]] || cmdline_error "too many arguments"
|
||||
|
||||
# These are required
|
||||
[[ -n $MY_WORKSPACE ]] || die "MY_WORKSPACE is not set"
|
||||
[[ -n $MY_REPO ]] || die "MY_REPO is not set"
|
||||
|
||||
# Defaults
|
||||
: ${AUTO_MODE:=aio}
|
||||
: ${IMG_SIZE:=500G}
|
||||
: ${BOOTIMAGE_ISO:=$MY_WORKSPACE/export/bootimage.iso}
|
||||
: ${IMG_FILE:=$MY_WORKSPACE/export/stx_${AUTO_MODE}${GRAPHICAL_SUFFIX}.qcow2}
|
||||
}
|
||||
|
||||
# main
|
||||
init "$@"
|
||||
|
||||
# make sure we clean up before exiting
|
||||
trap handle_sig INT TERM PIPE HUP
|
||||
trap handle_exit EXIT
|
||||
|
||||
# make sure update-iso-centos.sh exists
|
||||
UPDATE_ISO=$MY_REPO/stx/utilities/utilities/platform-util/scripts/update-iso-centos.sh
|
||||
: <"$UPDATE_ISO" || exit 1
|
||||
|
||||
# make sure input ISO file exists
|
||||
: <"$BOOTIMAGE_ISO" || exit 1
|
||||
|
||||
# make sure patch_build.sh exists
|
||||
PATCH_BUILD=$MY_REPO/stx/update/extras/scripts/patch_build.sh
|
||||
: <"$PATCH_BUILD" || exit 1
|
||||
|
||||
# find patch-iso
|
||||
which patch-iso >/dev/null || exit 1
|
||||
|
||||
# find QEMU/KVM
|
||||
find_kvm
|
||||
|
||||
# find qemu-img
|
||||
which qemu-img >/dev/null || exit 1
|
||||
|
||||
# refuse to overwrite existing output file
|
||||
if [[ -e "$IMG_FILE" ]] && [[ $FORCE -ne 1 ]] ; then
|
||||
die "output file $IMG_FILE already exist, delete it first or use --force"
|
||||
fi
|
||||
|
||||
# which menu item to use?
|
||||
menu_item=
|
||||
case "$AUTO_MODE" in
|
||||
controller) menu_item=0 ;;
|
||||
aio) menu_item=2 ;;
|
||||
aio_lowlatency) menu_item=4 ;;
|
||||
*) die "internal error" ;;
|
||||
esac
|
||||
|
||||
# create a directory for temporary files
|
||||
TEMPFILES_DIR=$(mktemp -d -t build_img.XXXXXXXX) || exit 1
|
||||
|
||||
# create an updated iso with the menu item pre-selected
|
||||
auto_iso="$TEMPFILES_DIR/bootimage_${AUTO_MODE}${GRAPHICAL_SUFFIX}.iso"
|
||||
rm -f "$auto_iso"
|
||||
cmd=()
|
||||
if [[ $SUDO == 1 ]] ; then
|
||||
cmd+=(sudo)
|
||||
fi
|
||||
cmd+=("$UPDATE_ISO" -i "$BOOTIMAGE_ISO" -o "$auto_iso" -d "$menu_item" -t 3)
|
||||
|
||||
if [[ $AWS_COMPATIBLE == 1 ]] ; then
|
||||
cmd+=(-p rdloaddriver=ena)
|
||||
fi
|
||||
|
||||
# generate a kickstart add-on
|
||||
ks_addon="$TEMPFILES_DIR/ks_addon.sh"
|
||||
echo "#### start ks-addon.cfg" >"$ks_addon"
|
||||
# configure $OAM_DEV
|
||||
cat >>"$ks_addon" <<_END
|
||||
# configure $OAM_DEV
|
||||
uuid=\$(uuidgen)
|
||||
cat >/etc/sysconfig/network-scripts/ifcfg-$OAM_DEV <<END
|
||||
UUID=\$uuid
|
||||
DEVICE=$OAM_DEV
|
||||
NAME=$OAM_DEV
|
||||
TYPE=Ethernet
|
||||
PROXY_METHOD=none
|
||||
BROWSER_ONLY=no
|
||||
BOOTPROTO=dhcp
|
||||
DEFROUTE=yes
|
||||
IPV4_FAILURE_FATAL=no
|
||||
IPV6INIT=yes
|
||||
IPV6_AUTOCONF=no
|
||||
IPV6_DEFROUTE=yes
|
||||
IPV6_FAILURE_FATAL=no
|
||||
IPV6_ADDR_GEN_MODE=stable-privacy
|
||||
ONBOOT=yes
|
||||
DHCPV6C=$DHCPV6C
|
||||
END
|
||||
_END
|
||||
|
||||
# Add default routes
|
||||
if [[ -n "$IPV4_GW_ADDR" ]] ; then
|
||||
cat >>"$ks_addon" <<_END
|
||||
# Add a default IPv4 route
|
||||
echo "default via $IPV4_GW_ADDR dev $OAM_DEV metric 1" >/etc/sysconfig/network-scripts/route-$OAM_DEV
|
||||
_END
|
||||
fi
|
||||
if [[ -n "$IPV6_GW_ADDR" ]] ; then
|
||||
cat >>"$ks_addon" <<_END
|
||||
# Add a default IPv6 route
|
||||
echo "default via $IPV6_GW_ADDR dev $OAM_DEV metric 1" >/etc/sysconfig/network-scripts/route6-$OAM_DEV
|
||||
_END
|
||||
fi
|
||||
|
||||
# Disable cloud-init networking if cloud-init is installed
|
||||
cat >>"$ks_addon" <<_END
|
||||
if [ -d /etc/cloud/cloud.cfg.d/ ]; then
|
||||
echo "network: {config: disabled}" > /etc/cloud/cloud.cfg.d/99-disable-networking.cfg
|
||||
fi
|
||||
_END
|
||||
|
||||
# Set passwords
|
||||
for user in "${!PASSWORDS[@]}" ; do
|
||||
encrypted=$(encrypt_password "${PASSWORDS[$user]}")
|
||||
[[ $? -eq 0 ]] || exit 1
|
||||
cat >>"$ks_addon" <<_END
|
||||
# set ${user}'s password
|
||||
usermod -e '' -p '$encrypted' '$user' || exit 1
|
||||
chage --inactive -1 --maxdays -1 --lastday \$(date '+%Y-%m-%d') '$user' || exit 1
|
||||
_END
|
||||
done
|
||||
|
||||
# Comment-out global_filter in lvm.conf
|
||||
# The installer normally sets it to the installer hard drive's bus address,
|
||||
# and LVM doesn't come up when booted in different emulation environment.
|
||||
cat >>"$ks_addon" <<'_END'
|
||||
# Comment-out global_filter in lvm.conf
|
||||
sed -r -i 's!^(\s*)global_filter\s*=.*!\1# global_filter = [ "a|.*/|" ]!' /etc/lvm/lvm.conf
|
||||
_END
|
||||
|
||||
# Change grub parameters to boot to graphical console.
|
||||
# The installer sets these to use the serial port when we install
|
||||
# in text mode.
|
||||
if [[ $GRAPHICAL -eq 1 ]] ; then
|
||||
cat >>"$ks_addon" <<'_END'
|
||||
# Boot in graphical mode
|
||||
sed -r -i \
|
||||
-e '/^\s*GRUB_SERIAL_COMMAND=/ d' \
|
||||
-e '/^\s*GRUB_TERMINAL(_OUTPUT)?=/ s/=.*/="console"/' \
|
||||
-e '/^\s*GRUB_CMDLINE_LINUX=/ s/\bconsole=ttyS0,\S+/console=tty0/' \
|
||||
/etc/default/grub
|
||||
if [ -d /sys/firmware/efi ] ; then
|
||||
grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg
|
||||
else
|
||||
grub2-mkconfig -o /boot/grub2/grub.cfg
|
||||
fi
|
||||
_END
|
||||
fi
|
||||
|
||||
# Add necessary setup work for an aws image to the ks_addon script
|
||||
if [[ $AWS_COMPATIBLE == 1 ]] ; then
|
||||
add_aws_setup $ks_addon
|
||||
fi
|
||||
|
||||
echo "#### end ks-addon.cfg" >>"$ks_addon"
|
||||
cmd+=(-a "$ks_addon")
|
||||
|
||||
# execute update_iso.sh
|
||||
echo "${cmd[@]}"
|
||||
"${cmd[@]}" || exit 1
|
||||
|
||||
# patch the iso if additional rpms are specified
|
||||
if [ ${#RPM_ADDON_LIST[@]} -gt 0 ] ; then
|
||||
# Patch build will drop the generated patch file into the current directory.
|
||||
# We want that to be $MY_WORKSPACE.
|
||||
pushd $MY_WORKSPACE
|
||||
patch_file="PATCH.img-addon"
|
||||
patched_iso="$TEMPFILES_DIR/bootimage_${AUTO_MODE}${GRAPHICAL_SUFFIX}_patched.iso"
|
||||
cmd=("$PATCH_BUILD" --id "${patch_file}" --summary "additional packages for qcow2 image" --desc "Adds customizations to qcow2 image" --status "REL" --reboot-required "N")
|
||||
for rpm_addon in "${RPM_ADDON_LIST[@]}"; do
|
||||
cmd+=(--all-nodes "${rpm_addon}")
|
||||
done
|
||||
# create the patch file
|
||||
echo "${cmd[@]}"
|
||||
"${cmd[@]}" || exit 1
|
||||
cmd=(patch-iso -i "$auto_iso" -o "$patched_iso" "${MY_WORKSPACE}/${patch_file}.patch")
|
||||
# execute patch-iso
|
||||
echo "${cmd[@]}"
|
||||
"${cmd[@]}" || exit 1
|
||||
mv ${patched_iso} ${auto_iso}
|
||||
popd
|
||||
fi
|
||||
|
||||
# create a blank image file
|
||||
rm -f "$IMG_FILE.tmp"
|
||||
cmd=(qemu-img create "$IMG_FILE.tmp" -f qcow2 "$IMG_SIZE")
|
||||
echo "${cmd[@]}"
|
||||
"${cmd[@]}" || exit 1
|
||||
|
||||
# run the installer in QEMU
|
||||
cmd=(
|
||||
"$KVM"
|
||||
"${KVM_OPTS[@]}"
|
||||
-m 8192
|
||||
-drive file="$IMG_FILE.tmp",if=ide
|
||||
-cdrom "$auto_iso"
|
||||
-boot d
|
||||
-no-reboot
|
||||
-nographic
|
||||
-smp 4
|
||||
)
|
||||
# if STDOUT is a terminal, save current terminal settings
|
||||
# so that we can restore them later
|
||||
if [[ -t 1 ]] ; then
|
||||
TTY_SETTINGS=$(stty -g <&1)
|
||||
# otherwise, disable QEMU's terminal features
|
||||
else
|
||||
cmd+=(-serial file:/dev/stdout)
|
||||
fi
|
||||
# execute qemu
|
||||
echo "${cmd[@]}"
|
||||
"${cmd[@]}" 2>&1 | tee $TEMPFILES_DIR/kvm.log
|
||||
if [[ ${PIPESTATUS[0]} -ne 0 || ${PIPESTATUS[1]} -ne 0 ]] ; then
|
||||
die "qemu: installation failed"
|
||||
fi
|
||||
|
||||
# QEMU exits with status=0 even when killed by a signal. Check its output
|
||||
# for a known message to detect this case
|
||||
if tail "$TEMPFILES_DIR/kvm.log" | grep -q -E "(qemu|kvm).*: terminating on signal" ; then
|
||||
die "qemu terminated by a signal"
|
||||
fi
|
||||
|
||||
# rename tmp image file to the final name
|
||||
mv -f "$IMG_FILE.tmp" "$IMG_FILE" || exit 1
|
||||
|
||||
# done
|
||||
echo "
|
||||
Created $IMG_FILE
|
||||
|
||||
To use this image, type:
|
||||
"
|
||||
if [[ $GRAPHICAL -eq 1 ]] ; then
|
||||
echo " $KVM ${KVM_OPTS[@]} -m 16384 -drive file=$IMG_FILE,if=ide -boot c -smp 4"
|
||||
echo
|
||||
echo "(requires a graphical console)"
|
||||
else
|
||||
echo " $KVM ${KVM_OPTS[@]} -m 16384 -drive file=$IMG_FILE,if=ide -boot c -nographic -smp 4"
|
||||
fi
|
@ -1,853 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Copyright (c) 2018-2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Build the export/bootimage.iso file
|
||||
#
|
||||
# This script uses environment variables to determine the source of
|
||||
# packages, and bundles the packages into a bootable .iso
|
||||
#
|
||||
# It starts by building a basic "vanilla CentOS" ISO, and then adds our
|
||||
# packages to it.
|
||||
|
||||
BUILD_ISO_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
|
||||
source "${BUILD_ISO_DIR}/image-utils.sh"
|
||||
source "${BUILD_ISO_DIR}/git-utils.sh"
|
||||
|
||||
# Set REPOQUERY, REPOQUERY_SUB_COMMAND, REPOQUERY_RESOLVE and
|
||||
# REPOQUERY_WHATPROVIDES_DELIM for our build environment.
|
||||
source "${BUILD_ISO_DIR}/pkg-manager-utils.sh"
|
||||
|
||||
usage () {
|
||||
echo ""
|
||||
echo "Usage: "
|
||||
echo " build-iso [--auto <mode>] [--file <filename>] "
|
||||
echo " [--device <device>] [--skip-sign]"
|
||||
echo " [--sudo|udev]"
|
||||
echo " --file <bootimage.iso> destination ISO file"
|
||||
echo " --auto <controller|cpe> Modify kickstart to auto-install controller or cpe"
|
||||
echo " mode"
|
||||
echo " --device <sda> Use a different boot/rootds device (default is sda)"
|
||||
echo " --skip-sign do not add file signature to RPMs"
|
||||
echo " --sudo Use \"sudo\" command to access EFI image filesystem (default)"
|
||||
echo " --udev Use udev to access EFI image filesystem"
|
||||
echo ""
|
||||
echo " Note that environment variable BUILD_ISO_USE_UDEV=1 will have the same effect"
|
||||
echo " as the --udev option"
|
||||
echo ""
|
||||
}
|
||||
|
||||
MY_YUM_CONF=""
|
||||
STD_REPO_ID="local-std"
|
||||
RT_REPO_ID="local-rt"
|
||||
LOWER_LAYER_STD_REPO_ID=""
|
||||
LOWER_LAYER_RT_REPO_ID=""
|
||||
|
||||
NPROCS=$(nproc)
|
||||
|
||||
export MOCK=/usr/bin/mock
|
||||
|
||||
CREATEREPO=$(which createrepo_c)
|
||||
if [ $? -ne 0 ]; then
|
||||
CREATEREPO="createrepo"
|
||||
fi
|
||||
|
||||
# TEMPORARY: Check for isohybrid now to give a warning about installing pkg
|
||||
if [ ! -f /usr/bin/isohybrid ]; then
|
||||
echo "Missing required utility: /usr/bin/isohybrid"
|
||||
echo "Installation of syslinux is required:"
|
||||
echo " sudo yum install -y syslinux"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function install_pkg_list {
|
||||
local PKGLIST=$1
|
||||
if [ "x$PKGLIST" == "x" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
OLD_PWD=$PWD
|
||||
|
||||
echo "Installing packages listed in $PKGLIST and dependancies"
|
||||
\rm -f $OUTPUT_DIR/dist/report_deps.txt
|
||||
$CREATEREPO $CGCS_REPO_DIR
|
||||
$CREATEREPO $CGCS_RT_REPO_DIR
|
||||
|
||||
\cp -v $MY_YUM_CONF $OUTPUT_DIR
|
||||
|
||||
\cd $OUTPUT_DIST_DIR/isolinux/Packages
|
||||
$INTERNAL_REPO_ROOT/build-tools/build_iso/cgts_deps.sh --deps=$PKGLIST
|
||||
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
echo "Could not install dependencies"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# clean up
|
||||
echo "Removing local-std yum repo $CGCS_REPO_DIR/repodata"
|
||||
echo "Removing local-rt yum repo $CGCS_RT_REPO_DIR/repodata"
|
||||
|
||||
\cd $OLD_PWD
|
||||
}
|
||||
|
||||
# Generate the report of where all packages come from
|
||||
function make_report {
|
||||
local PKGLISTFILES=$@
|
||||
if [ "x$PKGLISTFILES" == "x" ]; then
|
||||
return 1
|
||||
fi
|
||||
echo "MAKING $REPORT_FILE"
|
||||
echo "-----------------" >> $REPORT_FILE
|
||||
|
||||
echo "ISO REPORT" > $REPORT_FILE
|
||||
date >> $REPORT_FILE
|
||||
echo "-----------------" >> $REPORT_FILE
|
||||
|
||||
echo " " >> $REPORT_FILE
|
||||
echo "-----------------" >> $REPORT_FILE
|
||||
echo "EXPLICIT INCLUDES" >> $REPORT_FILE
|
||||
echo "-----------------" >> $REPORT_FILE
|
||||
for PKGLIST in $PKGLISTFILES; do
|
||||
while read PKG; do
|
||||
PKG=`echo $PKG | sed "s/#.*//"`;
|
||||
if [ "${PKG}x" != "x" ]; then
|
||||
echo $PKG >> $REPORT_FILE
|
||||
fi
|
||||
done < $PKGLIST
|
||||
done
|
||||
|
||||
echo " " >> $REPORT_FILE
|
||||
echo "-----------------" >> $REPORT_FILE
|
||||
echo " PACKAGES " >> $REPORT_FILE
|
||||
echo "-----------------" >> $REPORT_FILE
|
||||
cat $BUILT_REPORT | sort | uniq >> $REPORT_FILE
|
||||
|
||||
echo " " >> $REPORT_FILE
|
||||
echo "-----------------" >> $REPORT_FILE
|
||||
echo " WARNINGS " >> $REPORT_FILE
|
||||
echo "-----------------" >> $REPORT_FILE
|
||||
|
||||
# Note that the warnings file may have multiple lines for the same
|
||||
# missing dependency. A sort | uniq solves this so we don't duplicate
|
||||
# warnings
|
||||
cat $WARNINGS_REPORT | sort | uniq >> $REPORT_FILE
|
||||
|
||||
echo "ISO REPORT: $REPORT_FILE"
|
||||
}
|
||||
|
||||
function init_vars {
|
||||
#####################################
|
||||
# Input definitions
|
||||
|
||||
# Where all CentOS packages live
|
||||
# Where essential CentOS (minimal install) packages live
|
||||
INTERNAL_REPO_ROOT=
|
||||
STX_DIR=
|
||||
|
||||
# Where BSP files live
|
||||
export BSP_FILES_PATH=
|
||||
|
||||
# Where our own packages live
|
||||
CGCS_REPO_DIR=$MY_WORKSPACE/std/rpmbuild/RPMS
|
||||
CGCS_RT_REPO_DIR=$MY_WORKSPACE/rt/rpmbuild/RPMS
|
||||
|
||||
MY_YUM_CONF=$(create-yum-conf)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: create-yum-conf failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# LOWER_LAYER_STD_REPO_ID should be something like StxCentos7Distro or StxCentos8Distro
|
||||
LOWER_LAYER_STD_REPO_ID=$(grep '\[StxCentos.*Distro\]' ${MY_YUM_CONF} | sed -e 's/^\[//' -e 's/\].*//')
|
||||
LOWER_LAYER_RT_REPO_ID=$(grep '\[StxCentos.*Distro-rt\]' ${MY_YUM_CONF} | sed -e 's/^\[//' -e 's/\].*//')
|
||||
|
||||
DISTRO_REPO_DIR=$(for d in $(grep baseurl $MY_YUM_CONF | grep file: | awk -F : '{print $2}' | sed 's:///:/:g'); do if [ -d $d/images ]; then echo $d ;fi; done)
|
||||
|
||||
#####################################
|
||||
# Output definitons
|
||||
|
||||
# where to put stuff (curent dir unless MY_WORKSPACE defined)
|
||||
OUTPUT_DIR="$PWD/export"
|
||||
if [ ! -z "$MY_WORKSPACE" ] && [ -d "$MY_WORKSPACE" ] ; then
|
||||
OUTPUT_DIR="$MY_WORKSPACE/export"
|
||||
CGCS_REPO_DIR="$MY_WORKSPACE/std/rpmbuild/RPMS"
|
||||
CGCS_RT_REPO_DIR="$MY_WORKSPACE/rt/rpmbuild/RPMS"
|
||||
fi
|
||||
|
||||
# Directory in which to populate files to be distributed
|
||||
if [ $CUMULUS -eq 0 ]; then
|
||||
OUTPUT_DIST_DIR=$OUTPUT_DIR/dist
|
||||
else
|
||||
OUTPUT_DIST_DIR=$OUTPUT_DIR/dist-cumulus
|
||||
fi
|
||||
|
||||
# Package disc image
|
||||
OUTPUT_FILE=$OUTPUT_DIR/$DEST_FILE
|
||||
|
||||
# Generate an error if the output file is below this threshold
|
||||
MINIMUM_EXPECTED_SIZE=500000000
|
||||
|
||||
# For backward compatibility. Old repo location or new?
|
||||
CENTOS_REPO=${MY_REPO}/centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
CENTOS_REPO=${MY_REPO}/centos-repo
|
||||
fi
|
||||
fi
|
||||
|
||||
# report variables
|
||||
REPORT_FILE=$OUTPUT_DIR/report.txt
|
||||
BUILT_REPORT=$OUTPUT_DIR/local.txt
|
||||
CLOUD_REPORT=$OUTPUT_DIR/cloud.txt
|
||||
CLOUD_COMMON_REPORT=$OUTPUT_DIR/cloudcommon.txt
|
||||
CENTOS_REPORT=$OUTPUT_DIR/centos.txt
|
||||
EPEL_REPORT=$OUTPUT_DIR/epel.txt
|
||||
WARNINGS_REPORT=$OUTPUT_DIR/warnings.txt
|
||||
|
||||
\rm -f $REPORT_FILE
|
||||
\rm -f $BUILT_REPORT
|
||||
\rm -f $CLOUD_REPORT
|
||||
\rm -f $CLOUD_COMMON_REPORT
|
||||
\rm -f $CENTOS_REPORT
|
||||
\rm -f $WARNINGS_REPORT
|
||||
}
|
||||
|
||||
# check input variables
|
||||
function check_vars {
|
||||
# Where to store data
|
||||
printf "Finding cgcs-root\n"
|
||||
printf " Checking \$MY_REPO (value \"$MY_REPO\")\n"
|
||||
|
||||
if [ ! -z "$MY_REPO" ] && [ -d "$MY_REPO" ] ; then
|
||||
INTERNAL_REPO_ROOT=$MY_REPO
|
||||
printf " Found!\n"
|
||||
fi
|
||||
|
||||
if [ -z "$INTERNAL_REPO_ROOT" ] ; then
|
||||
printf " No joy -- checking \$MY_REPO_ROOT_DIR (value \"$MY_REPO_ROOT_DIR\")\n"
|
||||
if [ ! -z "$MY_REPO_ROOT_DIR" ] && [ -d "$MY_REPO_ROOT_DIR/cgcs-root" ] ; then
|
||||
INTERNAL_REPO_ROOT=$MY_REPO_ROOT_DIR/cgcs-root
|
||||
printf " Found!\n"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$INTERNAL_REPO_ROOT" ] ; then
|
||||
printf " No joy -- checking for \$MY_WORKSPACE/cgcs-root\n"
|
||||
if [ -d "$MY_WORKSPACE/cgcs-root" ] ; then
|
||||
INTERNAL_REPO_ROOT=$MY_WORKSPACE/cgcs-root
|
||||
printf " Found!\n"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$INTERNAL_REPO_ROOT" ] ; then
|
||||
printf " Error -- could not locate cgcs-root repo.\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -z "${CENTOS_REPO}" ] && [ ! -d ${CENTOS_REPO} ]; then
|
||||
echo " Error -- directory '${CENTOS_REPO}' not found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
STX_DIR=$INTERNAL_REPO_ROOT/stx
|
||||
|
||||
printf "\nChecking that we can access $DISTRO_REPO_DIR\n"
|
||||
if [ ! -d "$DISTRO_REPO_DIR" ] ; then
|
||||
printf " Error -- could not access $DISTRO_REPO_DIR\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -e "$DISTRO_REPO_DIR/repodata" ] ; then
|
||||
printf " Error -- $DISTRO_REPO_DIR is there, but does not seem sane\n"
|
||||
fi
|
||||
|
||||
printf "\nOkay, input looks fine...\n\n"
|
||||
printf "Creating output directory $OUTPUT_DIST_DIR\n"
|
||||
if [ $CLEAN_FLAG -eq 1 ]; then
|
||||
echo " Cleaning..."
|
||||
if [ -e $OUTPUT_DIST_DIR ] ; then
|
||||
chmod -R a+w $OUTPUT_DIST_DIR
|
||||
\rm -rf $OUTPUT_DIST_DIR
|
||||
fi
|
||||
if [ -e $OUTPUT_DIST_DIR ] ; then
|
||||
printf "Error: could not remove old $OUTPUT_DIST_DIR\n"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
\mkdir -p $OUTPUT_DIST_DIR
|
||||
if [ ! -d $OUTPUT_DIST_DIR ] ; then
|
||||
printf "Error: could not create $OUTPUT_DIST_DIR\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
RELEASE_INFO="$(get_release_info)"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed to find a release info file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export PLATFORM_RELEASE=$(source "$RELEASE_INFO" && echo $PLATFORM_RELEASE)
|
||||
|
||||
# Where BSP files live
|
||||
export BSP_FILES_PATH="$(get_bsp_dir)"
|
||||
echo " Done"
|
||||
echo ""
|
||||
}
|
||||
|
||||
function init_output_dir {
|
||||
echo "Creating base output directory in $OUTPUT_DIST_DIR"
|
||||
\mkdir -p $OUTPUT_DIST_DIR/isolinux/images
|
||||
\mkdir -p $OUTPUT_DIST_DIR/isolinux/ks
|
||||
\mkdir -p $OUTPUT_DIST_DIR/isolinux/LiveOS
|
||||
\mkdir -p $OUTPUT_DIST_DIR/isolinux/Packages
|
||||
\mkdir -p $OUTPUT_DIST_DIR/utils
|
||||
|
||||
\mkdir -p $OUTPUT_DIST_DIR/isolinux/EFI
|
||||
# This directory will contains files required for the PXE network installer
|
||||
\mkdir -p $OUTPUT_DIST_DIR/isolinux/pxeboot
|
||||
|
||||
echo " Preparing package lists"
|
||||
image_inc_list iso std centos > "${PKGLIST_STX}"
|
||||
image_inc_list iso dev centos > "${PKGLIST_DEV}"
|
||||
image_inc_list iso layer centos ${LAYER} > "${PKGLIST_THIS_LAYER}"
|
||||
|
||||
echo " Copying base files"
|
||||
|
||||
# Generate .discinfo file
|
||||
date +%s.%N > $OUTPUT_DIST_DIR/isolinux/.discinfo
|
||||
echo $PLATFORM_RELEASE >> $OUTPUT_DIST_DIR/isolinux/.discinfo
|
||||
echo "x86_64" >> $OUTPUT_DIST_DIR/isolinux/.discinfo
|
||||
|
||||
\cp -L -ru $DISTRO_REPO_DIR/isolinux/* $OUTPUT_DIST_DIR/isolinux/
|
||||
\cp -L -ru $DISTRO_REPO_DIR/images/pxeboot $OUTPUT_DIST_DIR/isolinux/images/
|
||||
|
||||
echo " Installing startup files"
|
||||
|
||||
\cp -L "$BSP_FILES_PATH/centos.syslinux.cfg" "$OUTPUT_DIST_DIR/isolinux/syslinux.cfg"
|
||||
\cp -L "$BSP_FILES_PATH/centos.syslinux.cfg" "$OUTPUT_DIST_DIR/isolinux/isolinux.cfg"
|
||||
sed -i 's/wr_usb_boot/oe_iso_boot/' $OUTPUT_DIST_DIR/isolinux/isolinux.cfg
|
||||
|
||||
# Modify the isolinux.cfg to auto install if requested
|
||||
# Option 0 is Controller(serial). Option 2 is CPE serial.
|
||||
if [ "$AUTO_INSTALL" == "controller" ] ; then
|
||||
echo "Modifying ISO to auto-install controller load"
|
||||
perl -p -i -e 's/timeout 0/timeout 1\ndefault 0/' $OUTPUT_DIST_DIR/isolinux/isolinux.cfg
|
||||
elif [ "$AUTO_INSTALL" == "cpe" ] ; then
|
||||
echo "Modifying ISO to auto-install CPE (combined load)"
|
||||
perl -p -i -e 's/timeout 0/timeout 1\ndefault 2/' $OUTPUT_DIST_DIR/isolinux/isolinux.cfg
|
||||
fi
|
||||
|
||||
# Modify the device if requested
|
||||
if [ ! -z "$DEVICE" ] ; then
|
||||
echo "Modifying ISO to use device $DEVICE"
|
||||
perl -p -i -e "s/device=sda/device=${DEVICE}/g" $OUTPUT_DIST_DIR/isolinux/isolinux.cfg
|
||||
fi
|
||||
|
||||
# Copy UEFI files
|
||||
\cp -L -ru $DISTRO_REPO_DIR/EFI/* $OUTPUT_DIST_DIR/isolinux/EFI/
|
||||
\cp -L "$BSP_FILES_PATH/grub.cfg" "$OUTPUT_DIST_DIR/isolinux/EFI/BOOT/grub.cfg"
|
||||
\cp -L "$BSP_FILES_PATH/pxeboot_grub.cfg" "$OUTPUT_DIST_DIR/isolinux/pxeboot/pxeboot_grub.cfg"
|
||||
|
||||
# Update the efiboot.img (See https://wiki.archlinux.org/index.php/Remastering_the_Install_ISO)
|
||||
# We need to mount the image file, replace the grub.cfg file with the StarlingX one, and unmount.
|
||||
# Script update-efiboot-image will do this. If there is not loop device on the build machine
|
||||
# then this script must be executed manually prior.
|
||||
|
||||
if [ ! -e "/dev/loop-control" -a ! -f "$OUTPUT_DIR/efiboot.img" ]; then
|
||||
CMD="export PROJECT=$PROJECT; \
|
||||
export SRC_BUILD_ENVIRONMENT=$SRC_BUILD_ENVIRONMENT; \
|
||||
export MY_BUILD_ENVIRONMENT=$MY_BUILD_ENVIRONMENT; \
|
||||
export MY_BUILD_ENVIRONMENT_FILE=$MY_BUILD_ENVIRONMENT_FILE; \
|
||||
export MY_BUILD_DIR=$MY_BUILD_DIR; \
|
||||
export MY_WORKSPACE=$MY_WORKSPACE; \
|
||||
export MY_REPO=$MY_REPO; \
|
||||
export LAYER=$LAYER; \
|
||||
export MY_BUILD_CFG=$MY_BUILD_CFG; \
|
||||
export MY_MOCK_ROOT=$MY_MOCK_ROOT; \
|
||||
export PATH=$MY_REPO/build-tools:\$PATH; \
|
||||
export BUILD_ISO_USE_UDEV=$BUILD_ISO_USE_UDEV; \
|
||||
export BSP_FILES_PATH=$BSP_FILES_PATH; \
|
||||
update-efiboot-image"
|
||||
echo $CMD
|
||||
|
||||
if [ "$HOSTNAME" == "yow-cgts3-centos7" ]; then
|
||||
echo "Attempting to run update-efiboot-image on yow-cgts3-lx"
|
||||
ssh -o StrictHostKeyChecking=no yow-cgts3-lx "$CMD"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to run update-efiboot-image on yow-cgts3-lx"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$HOSTNAME" == "yow-cgts2-centos7" ]; then
|
||||
echo "Attempting to run update-efiboot-image on yow-cgts2-lx"
|
||||
ssh -o StrictHostKeyChecking=no yow-cgts2-lx "$CMD"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to run update-efiboot-image on yow-cgts2-lx"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -e "/dev/loop-control" -a ! -f "$OUTPUT_DIR/efiboot.img" ]; then
|
||||
printf "\n**************************************************************************************************** \n"
|
||||
printf "No loop device on this machine. Please ensure $OUTPUT_DIR/efiboot.img \n"
|
||||
printf "exist prior to executing build-iso by. It can be created by running \n"
|
||||
printf " $INTERNAL_REPO_ROOT/build-tools/update-efiboot-image \n"
|
||||
printf "on a machine that does support a loop device. Please ensure all standard \n"
|
||||
printf "build environment variables are defined (e.g. MY_REPO, MY_WORKSPACE, etc.). \n"
|
||||
printf " \n"
|
||||
printf "e.g. If building on yow-cgts3-centos7, you'll want to run the script on \n"
|
||||
printf " yow-cgts3-lx which shares the same file system, but supports loop devices \n"
|
||||
printf "****************************************************************************************************** \n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -f "$OUTPUT_DIR/efiboot.img" ]; then
|
||||
|
||||
# The script update-efiboot-image was run outside the build-iso script, do nothing.
|
||||
printf " The image file $OUTPUT_DIR/efiboot.img already exists\n"
|
||||
else
|
||||
printf " The image file $OUTPUT_DIR/efiboot.img does not exist \n"
|
||||
if [ ! -f "$INTERNAL_REPO_ROOT/build-tools/update-efiboot-image" ]; then
|
||||
printf "*** Error: script update-efiboot-image does not exist *** \n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run the script
|
||||
BUILD_ISO_USE_UDEV=$BUILD_ISO_USE_UDEV $INTERNAL_REPO_ROOT/build-tools/update-efiboot-image
|
||||
RET=$?
|
||||
if [ $RET != 0 ]; then
|
||||
printf "*** Error: update-efiboot-image script returned failure $RET *** \n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
\cp -L $OUTPUT_DIR/efiboot.img $OUTPUT_DIST_DIR/isolinux/images/
|
||||
\rm -f $OUTPUT_DIR/efiboot.img
|
||||
|
||||
# Copy and set up pxeboot setup files
|
||||
\cp "$BSP_FILES_PATH/pxeboot_setup.sh" "$OUTPUT_DIST_DIR/isolinux/pxeboot_setup.sh"
|
||||
\cp "$BSP_FILES_PATH/pxeboot.cfg" "$OUTPUT_DIST_DIR/isolinux/pxeboot/pxeboot.cfg"
|
||||
chmod +x $OUTPUT_DIST_DIR/isolinux/pxeboot_setup.sh
|
||||
|
||||
\rm -f $OUTPUT_DIST_DIR/comps.xml
|
||||
\cp -L $INTERNAL_REPO_ROOT/build-tools/build_iso/comps.xml.gz $OUTPUT_DIST_DIR/
|
||||
gunzip $OUTPUT_DIST_DIR/comps.xml.gz
|
||||
|
||||
TMP_DIR=$MY_WORKSPACE/tmp
|
||||
\mkdir -p $TMP_DIR
|
||||
TMPDIR=$TMP_DIR yum clean all -c $MY_YUM_CONF
|
||||
\rm -rf $TMP_DIR/yum-$USER-*
|
||||
echo " Done"
|
||||
echo ""
|
||||
}
|
||||
|
||||
function package_content_checksum {
|
||||
local p=$1
|
||||
local md5
|
||||
local r
|
||||
r=$(basename $p)
|
||||
md5=$( ( rpm2cpio $p;
|
||||
rpm -q --info -p $p;
|
||||
rpm -q --dump -p $p;
|
||||
rpm -q --scripts -p $p ) | md5sum | cut -d ' ' -f 1)
|
||||
echo "$r $md5"
|
||||
}
|
||||
|
||||
function final_touches {
|
||||
OLD_PWD=$PWD
|
||||
|
||||
# Update the comps.xml
|
||||
if [ ! -f $OUTPUT_DIST_DIR/comps.xml.bak ]; then
|
||||
\cp $OUTPUT_DIST_DIR/comps.xml $OUTPUT_DIST_DIR/comps.xml.bak
|
||||
fi
|
||||
|
||||
local EXTRA_ARGS=""
|
||||
if [ "x${RELEASE_BUILD}" == "x" ]; then
|
||||
EXTRA_ARGS="--pkglist '${PKGLIST_DEV}'"
|
||||
fi
|
||||
|
||||
for PKGLIST_LOWER_LAYER in ${PKGLIST_LOWER_LAYER_LIST}; do
|
||||
EXTRA_ARGS+=" --pkglist ${PKGLIST_LOWER_LAYER}"
|
||||
done
|
||||
|
||||
python "$BSP_FILES_PATH/platform_comps.py" \
|
||||
--groups "$OUTPUT_DIST_DIR/comps.xml" \
|
||||
--pkglist "${PKGLIST_MINIMAL}" \
|
||||
--pkglist "${PKGLIST_STX}" \
|
||||
--pkglist "${PKGLIST_THIS_LAYER}" \
|
||||
${EXTRA_ARGS}
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to update comps.xml"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# create the repo
|
||||
\cd $OUTPUT_DIST_DIR/isolinux
|
||||
$CREATEREPO -q -g ../comps.xml .
|
||||
|
||||
# Create package_checksums
|
||||
printf "creating package_checksums file\n"
|
||||
for r in $(ls Packages/*rpm); do
|
||||
package_content_checksum $r
|
||||
done > package_checksums
|
||||
|
||||
# build the ISO
|
||||
printf "Building image $OUTPUT_FILE\n"
|
||||
\cd $OUTPUT_DIST_DIR
|
||||
chmod 664 isolinux/isolinux.bin
|
||||
mkisofs -o $OUTPUT_FILE \
|
||||
-R -D -A 'oe_iso_boot' -V 'oe_iso_boot' \
|
||||
-quiet \
|
||||
-b isolinux.bin -c boot.cat -no-emul-boot \
|
||||
-boot-load-size 4 -boot-info-table \
|
||||
-eltorito-alt-boot \
|
||||
-e images/efiboot.img \
|
||||
-no-emul-boot \
|
||||
isolinux/
|
||||
|
||||
isohybrid --uefi $OUTPUT_FILE
|
||||
implantisomd5 $OUTPUT_FILE
|
||||
|
||||
\cd $OLD_PWD
|
||||
}
|
||||
|
||||
function extract_pkg_from_local_repo {
|
||||
local pkgname=$1
|
||||
local pkg_mgr_conf=$2
|
||||
shift 2
|
||||
|
||||
local repoid=""
|
||||
local repoid_arg=""
|
||||
|
||||
for repoid in $@; do
|
||||
repoid_arg+=" --repoid=${repoid}"
|
||||
done
|
||||
|
||||
echo "TMPDIR=$TMP_DIR"\
|
||||
"${REPOQUERY} --config=${pkg_mgr_conf} ${repoid_arg}"\
|
||||
"${REPOQUERY_SUB_COMMAND} --location"\
|
||||
"--arch=noarch,x86_64 -q ${pkgname}"
|
||||
local pkgfile=$(TMPDIR=$TMP_DIR \
|
||||
${REPOQUERY} --config=${pkg_mgr_conf} ${repoid_arg} \
|
||||
${REPOQUERY_SUB_COMMAND} --location \
|
||||
--arch=noarch,x86_64 -q ${pkgname})
|
||||
if [ -z "${pkgfile}" ]; then
|
||||
echo "Could not find package $pkgname in $@"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rpm2cpio ${pkgfile/file://} | cpio -idmv
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to extract files from ${pkgfile/file://}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function extract_installer_files {
|
||||
# Changes to copied files here must also be reflected in patch-iso
|
||||
|
||||
PKGDIR=$OUTPUT_DIST_DIR/isolinux/Packages
|
||||
|
||||
(
|
||||
\cd $OUTPUT_DIR
|
||||
\rm -rf kickstarts extra_cfgs kickstart.work
|
||||
\mkdir kickstarts extra_cfgs kickstart.work
|
||||
|
||||
echo "Retrieving kickstarts..."
|
||||
|
||||
\cd kickstart.work
|
||||
|
||||
echo "MY_YUM_CONF=${MY_YUM_CONF}"
|
||||
cat ${MY_YUM_CONF}
|
||||
extract_pkg_from_local_repo platform-kickstarts ${MY_YUM_CONF} ${STD_REPO_ID} ${LOWER_LAYER_STD_REPO_ID}
|
||||
extract_pkg_from_local_repo platform-kickstarts-pxeboot ${MY_YUM_CONF} ${STD_REPO_ID} ${LOWER_LAYER_STD_REPO_ID}
|
||||
extract_pkg_from_local_repo platform-kickstarts-extracfgs ${MY_YUM_CONF} ${STD_REPO_ID} ${LOWER_LAYER_STD_REPO_ID}
|
||||
|
||||
\cp --preserve=all var/www/pages/feed/rel-*/*.cfg pxeboot/*.cfg ../kickstarts/ &&
|
||||
\cp --preserve=all extra_cfgs/*.cfg ../extra_cfgs/
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to copy extracted kickstarts"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
\cd ..
|
||||
|
||||
# Copy kickstarts to ISO
|
||||
\cp --preserve=all kickstarts/controller_ks.cfg $OUTPUT_DIST_DIR/isolinux/ks.cfg
|
||||
# Modify the kickstart to shutdown instead of reboot if doing an auto install
|
||||
if [ ! -z "$AUTO_INSTALL" ] ; then
|
||||
sed -i 's/^reboot --eject/shutdown/' $OUTPUT_DIST_DIR/isolinux/ks.cfg
|
||||
fi
|
||||
|
||||
\mv kickstarts/pxeboot* $OUTPUT_DIST_DIR/isolinux/pxeboot/
|
||||
\cp --preserve=all kickstarts/* $OUTPUT_DIST_DIR/isolinux
|
||||
|
||||
# Update OAM interface for cumulus auto install
|
||||
if [ $CUMULUS -eq 1 ]; then
|
||||
# Cumulus wants tty1
|
||||
perl -p -i -e 's/console=tty0/console=tty1/' $OUTPUT_DIST_DIR/isolinux/isolinux.cfg
|
||||
|
||||
# CUMULUS setup scripts specify ens3 for OAM
|
||||
OAM_IFNAME=ens3
|
||||
|
||||
cat <<EOM >> $OUTPUT_DIST_DIR/isolinux/ks.cfg
|
||||
%post
|
||||
#For cumulus tis on tis automated install
|
||||
cat << EOF > /etc/sysconfig/network-scripts/ifcfg-${OAM_IFNAME}
|
||||
IPADDR=10.10.10.3
|
||||
NETMASK=255.255.255.0
|
||||
BOOTPROTO=static
|
||||
ONBOOT=yes
|
||||
DEVICE=${OAM_IFNAME}
|
||||
MTU=1500
|
||||
GATEWAY=10.10.10.1
|
||||
EOF
|
||||
%end
|
||||
EOM
|
||||
fi
|
||||
|
||||
# For PXE boot network installer
|
||||
|
||||
echo ${OUTPUT_DIST_DIR}/isolinux/Packages
|
||||
|
||||
local WORKDIR=pxe-network-installer.content
|
||||
local ORIG_PWD=$PWD
|
||||
|
||||
\rm -rf $WORKDIR
|
||||
\mkdir $WORKDIR
|
||||
\cd $WORKDIR
|
||||
|
||||
extract_pkg_from_local_repo pxe-network-installer ${MY_YUM_CONF} ${STD_REPO_ID} ${LOWER_LAYER_STD_REPO_ID}
|
||||
extract_pkg_from_local_repo grub2-efi-x64-pxeboot ${MY_YUM_CONF} ${STD_REPO_ID} ${LOWER_LAYER_STD_REPO_ID}
|
||||
extract_pkg_from_local_repo grub2-efi-x64-modules ${MY_YUM_CONF} ${STD_REPO_ID} ${LOWER_LAYER_STD_REPO_ID}
|
||||
|
||||
\mkdir -p $OUTPUT_DIST_DIR/isolinux/pxeboot/EFI/centos/x86_64-efi
|
||||
|
||||
\cp --preserve=all var/pxeboot/pxelinux.0 var/pxeboot/menu.c32 var/pxeboot/chain.c32 $OUTPUT_DIST_DIR/isolinux/pxeboot &&
|
||||
\cp --preserve=all usr/lib/grub/x86_64-efi/* $OUTPUT_DIST_DIR/isolinux/pxeboot/EFI/centos/x86_64-efi/ &&
|
||||
\cp --preserve=all var/pxeboot/EFI/grubx64.efi $OUTPUT_DIST_DIR/isolinux/pxeboot/EFI/
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Could not copy all files from installer"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
\cp --preserve=all var/www/pages/feed/rel-*/LiveOS/squashfs.img $OUTPUT_DIST_DIR/isolinux/LiveOS
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Could not copy squashfs from LiveOS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Replace vmlinuz and initrd.img with our own pre-built ones
|
||||
\rm -f \
|
||||
$OUTPUT_DIST_DIR/isolinux/vmlinuz \
|
||||
$OUTPUT_DIST_DIR/isolinux/images/pxeboot/vmlinuz \
|
||||
$OUTPUT_DIST_DIR/isolinux/initrd.img \
|
||||
$OUTPUT_DIST_DIR/isolinux/images/pxeboot/initrd.img
|
||||
\cp --preserve=all var/pxeboot/rel-*/installer-bzImage_1.0 \
|
||||
$OUTPUT_DIST_DIR/isolinux/vmlinuz &&
|
||||
\cp --preserve=all var/pxeboot/rel-*/installer-bzImage_1.0 \
|
||||
$OUTPUT_DIST_DIR/isolinux/images/pxeboot/vmlinuz &&
|
||||
\cp --preserve=all var/pxeboot/rel-*/installer-intel-x86-64-initrd_1.0 \
|
||||
$OUTPUT_DIST_DIR/isolinux/initrd.img &&
|
||||
\cp --preserve=all var/pxeboot/rel-*/installer-intel-x86-64-initrd_1.0 \
|
||||
$OUTPUT_DIST_DIR/isolinux/images/pxeboot/initrd.img
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to copy installer images"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
\cd $ORIG_PWD
|
||||
\rm -rf $WORKDIR
|
||||
)
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function setup_upgrades_files {
|
||||
# Changes to copied files here must also be reflected in patch-iso
|
||||
|
||||
# Copy the upgrade files
|
||||
UPGRADES_DIR="$OUTPUT_DIST_DIR/isolinux/upgrades"
|
||||
\rm -rf $UPGRADES_DIR
|
||||
\mkdir -p $UPGRADES_DIR
|
||||
\cp $BSP_FILES_PATH/upgrades/* $UPGRADES_DIR
|
||||
sed -i "s/xxxSW_VERSIONxxx/${PLATFORM_RELEASE}/g" $UPGRADES_DIR/metadata.xml
|
||||
chmod +x $UPGRADES_DIR/*.sh
|
||||
# Write the version out (used in upgrade scripts - this is the same as SW_VERSION)
|
||||
echo "VERSION=$PLATFORM_RELEASE" > $UPGRADES_DIR/version
|
||||
}
|
||||
|
||||
function sign_iso {
|
||||
# Sign the .iso with the developer private key
|
||||
# Sigining with the formal key is only to be done for customer release
|
||||
# builds
|
||||
local isofilename=$(basename $OUTPUT_DIR/$DEST_FILE)
|
||||
local isofilenoext="${isofilename%.*}"
|
||||
openssl dgst -sha256 -sign ${MY_REPO}/build-tools/signing/dev-private-key.pem -binary -out $OUTPUT_DIR/$isofilenoext.sig $OUTPUT_DIR/$DEST_FILE
|
||||
}
|
||||
|
||||
#############################################
|
||||
# Main code
|
||||
#############################################
|
||||
|
||||
# Check args
|
||||
HELP=0
|
||||
CLEAN_FLAG=1 # TODO -- doesn't yet work without --clean
|
||||
DEST_FILE=bootimage.iso
|
||||
AUTO_FLAG=0
|
||||
AUTO_INSTALL=""
|
||||
CUMULUS=0
|
||||
SIGN_RPM_FILES=1
|
||||
DEVICE=""
|
||||
if [ -z "$BUILD_ISO_USE_UDEV" ]; then
|
||||
BUILD_ISO_USE_UDEV=0
|
||||
fi
|
||||
|
||||
# read the options
|
||||
TEMP=`getopt -o hf:a:d: --long help,file:,auto:,device:,cumulus,clean,skip-sign,sudo,udev -n 'test.sh' -- "$@"`
|
||||
eval set -- "$TEMP"
|
||||
|
||||
# extract options and their arguments into variables.
|
||||
while true ; do
|
||||
case "$1" in
|
||||
-h|--help) HELP=1 ; shift ;;
|
||||
--clean) CLEAN_FLAG=1 ; shift ;;
|
||||
--skip-sign) SIGN_RPM_FILES=0 ; shift ;;
|
||||
--cumulus) CUMULUS=1 ; shift ;;
|
||||
-f | --file) DEST_FILE="$2"; shift; shift ;;
|
||||
-d | --device) DEVICE="$2"; shift; shift ;;
|
||||
-a | --auto) AUTO_FLAG=1; AUTO_INSTALL="$2"; shift; shift ;;
|
||||
--sudo) BUILD_ISO_USE_UDEV=0 ; shift ;;
|
||||
--udev) BUILD_ISO_USE_UDEV=1 ; shift ;;
|
||||
--) shift ; break ;;
|
||||
*) echo "Internal error!" ; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ $AUTO_FLAG -eq 1 ]; then
|
||||
if [[ "$AUTO_INSTALL" != "controller" && "$AUTO_INSTALL" != "cpe" ]] ; then
|
||||
echo "Unsupported --auto value: $AUTO_INSTALL"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $HELP -eq 1 ]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
(
|
||||
printf "\n*************************\n"
|
||||
printf "Create StarlingX/CentOS Boot CD\n"
|
||||
printf "*************************\n\n"
|
||||
|
||||
# Init variables
|
||||
init_vars
|
||||
check_vars
|
||||
DISTRO="centos"
|
||||
|
||||
PKGLIST_MINIMAL="${INTERNAL_REPO_ROOT}/build-tools/build_iso/minimal_rpm_list.txt"
|
||||
PKGLIST_STX="${OUTPUT_DIR}/image.inc"
|
||||
PKGLIST_DEV="${OUTPUT_DIR}/image-dev.inc"
|
||||
PKGLIST_THIS_LAYER="${OUTPUT_DIR}/image-layer.inc"
|
||||
PKGLIST_LOWER_LAYER_DIR="${CENTOS_REPO}/layer_image_inc"
|
||||
|
||||
PKGLIST_LOWER_LAYER_LIST=""
|
||||
if [ -d ${PKGLIST_LOWER_LAYER_DIR} ]; then
|
||||
PKGLIST_LOWER_LAYER_LIST="$(find ${PKGLIST_LOWER_LAYER_DIR} -name '*image.inc')"
|
||||
fi
|
||||
|
||||
# Create skeleton build dir
|
||||
init_output_dir
|
||||
|
||||
# Create the vanilla DVD
|
||||
echo "Copying vanilla CentOS RPMs"
|
||||
install_pkg_list "${PKGLIST_MINIMAL}"
|
||||
if [ $? -eq 2 ]; then
|
||||
echo "Error: Failed to install packages from ${PKGLIST_MINIMAL}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Find all StarlingX packages built locally
|
||||
echo "Installing StarlingX packages"
|
||||
install_pkg_list "${PKGLIST_STX}"
|
||||
if [ $? -eq 2 ]; then
|
||||
echo "Error: Failed to install packages from ${PKGLIST_STX}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
for PKGLIST_LOWER_LAYER in $PKGLIST_LOWER_LAYER_LIST; do
|
||||
install_pkg_list "${PKGLIST_LOWER_LAYER}"
|
||||
if [ $? -eq 2 ]; then
|
||||
echo "Error: Failed to install packages from ${PKGLIST_LOWER_LAYER}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "x${RELEASE_BUILD}" == "x" ]; then
|
||||
echo "Installing StarlingX developer packages"
|
||||
install_pkg_list "${PKGLIST_DEV}"
|
||||
if [ $? -eq 2 ]; then
|
||||
echo "Error: Failed to install packages from ${PKGLIST_DEV}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for PKGLIST_LOWER_LAYER in $PKGLIST_LOWER_LAYER_LIST; do
|
||||
install_pkg_list "${PKGLIST_LOWER_LAYER}"
|
||||
if [ $? -eq 2 ]; then
|
||||
echo "Error: Failed to install packages from ${PKGLIST_LOWER_LAYER}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
\cd $OUTPUT_DIST_DIR
|
||||
chmod -R 644 isolinux/Packages/*
|
||||
|
||||
# Extract installer files
|
||||
extract_installer_files
|
||||
|
||||
# Upgrades files
|
||||
setup_upgrades_files
|
||||
|
||||
# add file signatures to all rpms
|
||||
if [ $SIGN_RPM_FILES -ne 0 ]; then
|
||||
sign-rpms -d $OUTPUT_DIST_DIR/isolinux/Packages
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "failed to add file signatures to RPMs"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Finalize and build ISO
|
||||
final_touches
|
||||
|
||||
# Sign the ISO
|
||||
sign_iso
|
||||
|
||||
make_report "${PKGLIST_MINIMAL}" "${PKGLIST_STX}" "${PKGLIST_THIS_LAYER}" ${PKGLIST_LOWER_LAYER_LIST}
|
||||
|
||||
# Check sanity
|
||||
FILESIZE=$(wc -c <"$OUTPUT_FILE")
|
||||
if [ $FILESIZE -ge $MINIMUM_EXPECTED_SIZE ]; then
|
||||
printf "Done."
|
||||
printf "Output file: $OUTPUT_FILE\n\n"
|
||||
else
|
||||
printf "Output file $OUTPUT_FILE smaller than expected -- probable error\n\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' ; exit ${PIPESTATUS[0]}
|
@ -1,63 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Available environment
|
||||
# SRC_BASE = absolute path to cgcs-root
|
||||
# AVS_BASE = absolute path to AVS source
|
||||
# CGCS_BASE = absolute path to CGCS source
|
||||
# RPM_BUILD_BASE = Directory where the package .distro directory can be found
|
||||
# SRPM_OUT = Directory into which SRC RPMS are copied in preparation for mock build
|
||||
# RPM_DIR = Directory into which binary RPMs are delivered by mock
|
||||
|
||||
SRC_DIR="/sources"
|
||||
VERSION=$(grep '^Version:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//')
|
||||
TAR_NAME=$(grep '^Name:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//')
|
||||
CUR_DIR=`pwd`
|
||||
BUILD_DIR=".distro/centos7/rpmbuild"
|
||||
|
||||
mkdir -p $BUILD_DIR/SRPMS
|
||||
|
||||
TAR="$TAR_NAME-$VERSION.tar.gz"
|
||||
TAR_PATH="$BUILD_DIR/SOURCES/$TAR"
|
||||
|
||||
TAR_NEEDED=0
|
||||
if [ -f $TAR_PATH ]; then
|
||||
n=`find . -cnewer $TAR_PATH -and ! -path './.git*' \
|
||||
-and ! -path './build/*' \
|
||||
-and ! -path './.pc/*' \
|
||||
-and ! -path './patches/*' \
|
||||
-and ! -path './.distro/*' \
|
||||
-and ! -path './pbr-*.egg/*' \
|
||||
| wc -l`
|
||||
if [ $n -gt 0 ]; then
|
||||
TAR_NEEDED=1
|
||||
fi
|
||||
else
|
||||
TAR_NEEDED=1
|
||||
fi
|
||||
|
||||
if [ $TAR_NEEDED -gt 0 ]; then
|
||||
tar czvf $TAR_PATH .$SRC_DIR --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude='.distro' --exclude='pbr-*.egg' --transform "s,^\.$SRC_DIR,$TAR_NAME-$VERSION,"
|
||||
fi
|
||||
|
||||
for SPEC in `ls $BUILD_DIR/SPECS`; do
|
||||
SPEC_PATH="$BUILD_DIR/SPECS/$SPEC"
|
||||
RELEASE=$(grep '^Release:' $SPEC_PATH | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//')
|
||||
NAME=`echo $SPEC | sed 's/.spec$//'`
|
||||
SRPM="$NAME-$VERSION-$RELEASE.src.rpm"
|
||||
SRPM_PATH="$BUILD_DIR/SRPMS/$SRPM"
|
||||
|
||||
BUILD_NEEDED=0
|
||||
if [ -f $SRPM_PATH ]; then
|
||||
n=`find . -cnewer $SRPM_PATH | wc -l`
|
||||
if [ $n -gt 0 ]; then
|
||||
BUILD_NEEDED=1
|
||||
fi
|
||||
else
|
||||
BUILD_NEEDED=1
|
||||
fi
|
||||
|
||||
if [ $BUILD_NEEDED -gt 0 ]; then
|
||||
rpmbuild -bs $SPEC_PATH --define="%_topdir $CUR_DIR/$BUILD_DIR" --define="_tis_dist .tis"
|
||||
fi
|
||||
done
|
||||
|
@ -1,126 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Copyright (c) 2018-2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# Build first src.rpms, then rpms, from source, or from a downloaded tarball
|
||||
# or src.rpm plus our additional patches.
|
||||
#
|
||||
# This program is a wrapper around build-pkgs-parallel and build-pkgs-serial
|
||||
#
|
||||
|
||||
BUILD_PKGS_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
|
||||
|
||||
# Set REPOQUERY and REPOQUERY_SUB_COMMAND for our build environment.
|
||||
source "${BUILD_PKGS_DIR}/pkg-manager-utils.sh"
|
||||
|
||||
|
||||
usage () {
|
||||
echo ""
|
||||
echo "Usage: "
|
||||
echo " Create source and binary rpms:"
|
||||
echo " build-pkgs [--serial] [args]"
|
||||
}
|
||||
|
||||
SERIAL_FLAG=0
|
||||
RC=0
|
||||
|
||||
for arg in "$@"; do
|
||||
case "$1" in
|
||||
--serial) SERIAL_FLAG=1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
which mock_tmpfs_umount >> /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
SERIAL_FLAG=1
|
||||
fi
|
||||
|
||||
export TMPDIR=$MY_WORKSPACE/tmp
|
||||
mkdir -p $TMPDIR
|
||||
|
||||
# Old repo path or new?
|
||||
LOCAL_REPO=${MY_REPO}/local-repo
|
||||
if [ ! -d ${LOCAL_REPO} ]; then
|
||||
LOCAL_REPO=${MY_REPO}/cgcs-tis-repo
|
||||
if [ ! -d ${LOCAL_REPO} ]; then
|
||||
# This one isn't fatal, LOCAL_REPO is not required
|
||||
LOCAL_REPO=${MY_REPO}/local-repo
|
||||
fi
|
||||
fi
|
||||
|
||||
# Make sure we have a dependency cache
|
||||
DEP_CACHE="${LOCAL_REPO}/dependancy-cache"
|
||||
|
||||
BUILD_TYPES=(" std rt installer containers")
|
||||
DEP_RPM_TYPE=(" RPMS SRPMS ")
|
||||
DEP_DELTAS="$DEP_CACHE/deltas-rpms-srpms"
|
||||
|
||||
make_cache_current_rpms () {
|
||||
|
||||
FILE=${1}
|
||||
|
||||
if [ -z "${FILE}" ]; then
|
||||
echo "File not specified"
|
||||
return;
|
||||
fi
|
||||
|
||||
if [ -f ${FILE} ]; then
|
||||
rm ${FILE}
|
||||
fi
|
||||
|
||||
for build_type in $BUILD_TYPES; do
|
||||
for rpm_type in $DEP_RPM_TYPE; do
|
||||
|
||||
if [ -d $MY_WORKSPACE/$build_type/rpmbuild/$rpm_type/repodata ]; then
|
||||
current=$MY_WORKSPACE/$build_type/rpmbuild/$rpm_type/
|
||||
|
||||
${REPOQUERY} \
|
||||
--repofrompath=$build_type-$rpm_type,$current \
|
||||
--repoid=$build_type-$rpm_type --arch=noarch,src,x86_64 \
|
||||
${REPOQUERY_SUB_COMMAND} \
|
||||
--all \
|
||||
--qf "%-10{repoid} %-40{name} %-10{version} %-10{release}" \
|
||||
>> ${FILE}
|
||||
|
||||
\rm -rf $TMP_DIR/yum-$USER-*
|
||||
fi
|
||||
done;
|
||||
done;
|
||||
}
|
||||
|
||||
if [ ! -d $DEP_CACHE ]; then
|
||||
echo "Dependency cache is missing. Creating it now."
|
||||
$BUILD_PKGS_DIR/create_dependancy_cache.py > $MY_WORKSPACE/create_dependancy_cache.log
|
||||
make_cache_current_rpms $DEP_DELTAS
|
||||
echo "Dependency cache created."
|
||||
else
|
||||
DEP_TMP=$(mktemp)
|
||||
make_cache_current_rpms $DEP_TMP
|
||||
if diff $DEP_DELTAS $DEP_TMP > /dev/null; then
|
||||
echo "No changes for stx projects"
|
||||
rm $DEP_TMP
|
||||
else
|
||||
echo "Changes detected for stx projects"
|
||||
echo "Recreating dependecy cache now."
|
||||
mv $DEP_TMP $DEP_DELTAS
|
||||
$BUILD_PKGS_DIR/create_dependancy_cache.py > $MY_WORKSPACE/create_dependancy_cache.log
|
||||
echo "Dependency cache recreated."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $SERIAL_FLAG -eq 1 ]; then
|
||||
echo "build-pkgs-serial $@"
|
||||
build-pkgs-serial "$@"
|
||||
RC=$?
|
||||
else
|
||||
echo "build-pkgs-parallel $@"
|
||||
build-pkgs-parallel "$@"
|
||||
RC=$?
|
||||
fi
|
||||
|
||||
exit $RC
|
@ -1,538 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# Build first src.rpms, then rpms, from source, or from a downloaded tarball
|
||||
# or src.rpm plus our additional patches.
|
||||
#
|
||||
# This program is a wrapper around build-srpms-parallel and build-rpms-parallel
|
||||
#
|
||||
|
||||
BUILD_PKGS_PARALLEL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
|
||||
|
||||
source "${BUILD_PKGS_PARALLEL_DIR}/git-utils.sh"
|
||||
source "${BUILD_PKGS_PARALLEL_DIR}/spec-utils"
|
||||
|
||||
usage () {
|
||||
echo ""
|
||||
echo "Usage: "
|
||||
echo " Create source and Binary rpms:"
|
||||
echo " Build optimizations (--no-descendants, --no-required, --no-build-info,"
|
||||
echo " --no-autoclean, --no-build-avoidance) are not recommended for the first build"
|
||||
echo " after a clone/pull, nor the final build prior to creating an iso or patch,"
|
||||
echo " but can be used for intermediate builds."
|
||||
echo " i.e. while debugging compilation failures."
|
||||
echo " build-pkgs-parallel [--layer] [--build-avoidance | --no-build-avoidance] [--no-descendants] [--no-required] [--no-build-info] [--no-autoclean] [--careful] [--formal] [ list of package names ]"
|
||||
echo ""
|
||||
echo " Test build dependencies of a package:"
|
||||
echo " Note: A full build of all packages should preceed the dependency test build"
|
||||
echo " build-pkgs-parallel --dep-test <package_name>"
|
||||
echo ""
|
||||
echo " Delete source rpms, and the directories associated with it's creation:"
|
||||
echo " Note: does not clean an edit environment"
|
||||
echo " build-pkgs-parallel --clean [--build-avoidance | --no-build-avoidance] [ list of package names ]"
|
||||
echo ""
|
||||
echo " Extract an src.rpm into a pair of git trees to aid in editing it's contents,"
|
||||
echo " one for source code and one for metadata such as the spec file."
|
||||
echo " If --no-meta-patch is specified, then WRS patches are omitted."
|
||||
echo " build-pkgs-parallel --edit [--no-meta-patch] [ list of package names ]"
|
||||
echo ""
|
||||
echo " Delete an edit environment"
|
||||
echo " build-pkgs-parallel --edit --clean [ list of package names ]"
|
||||
echo ""
|
||||
echo " This help page"
|
||||
echo " build-pkgs-parallel [--help]"
|
||||
echo ""
|
||||
}
|
||||
|
||||
|
||||
HELP=0
|
||||
CLEAN_FLAG=0
|
||||
EDIT_FLAG=0
|
||||
APPEND_LOG_FLAG=0
|
||||
BUILD_AVOIDANCE_FLAG=0
|
||||
STD_BUILD=1
|
||||
RT_BUILD=1
|
||||
INSTALLER_BUILD=0
|
||||
CONTAINERS_BUILD=0
|
||||
DEP_TEST_FLAG=0
|
||||
|
||||
export BUILD_AVOIDANCE_URL=""
|
||||
|
||||
# For backward compatibility. Old repo location or new?
|
||||
CENTOS_REPO=${MY_REPO}/centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
echo "ERROR: directory ${MY_REPO}/centos-repo not found."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# read the options
|
||||
TEMP=$(getopt -o h --long parallel,rt,std,installer,containers,layer:,edit,build-avoidance,no-build-avoidance,build-avoidance-dir:,build-avoidance-host:,build-avoidance-user:,build-avoidance-day:,no-meta-patch,no-descendants,no-required,no-build-info,no-autoclean,formal,careful,help,clean,dep-test,append-log -n 'build-pkgs-parallel' -- "$@")
|
||||
if [ $? -ne 0 ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
eval set -- "$TEMP"
|
||||
|
||||
# extract options and their arguments into variables.
|
||||
EXTRA_ARGS_COMMON=""
|
||||
EXTRA_ARGS_SRPM=""
|
||||
EXTRA_ARGS_RPM=""
|
||||
|
||||
export BUILD_AVOIDANCE_OVERRIDE_DIR=""
|
||||
export BUILD_AVOIDANCE_OVERRIDE_HOST=""
|
||||
export BUILD_AVOIDANCE_OVERRIDE_USR=""
|
||||
export BUILD_AVOIDANCE_DAY=""
|
||||
|
||||
while true ; do
|
||||
case "$1" in
|
||||
--append-log) APPEND_LOG_FLAG=1 ; shift ;;
|
||||
--build-avoidance) BUILD_AVOIDANCE_FLAG=1 ; shift ;;
|
||||
--no-build-avoidance) BUILD_AVOIDANCE_FLAG=0 ; shift ;;
|
||||
--build-avoidance-dir) BUILD_AVOIDANCE_OVERRIDE_DIR=$2; shift 2 ;;
|
||||
--build-avoidance-host) BUILD_AVOIDANCE_OVERRIDE_HOST=$2; shift 2 ;;
|
||||
--build-avoidance-user) BUILD_AVOIDANCE_OVERRIDE_USR=$2; shift 2 ;;
|
||||
--build-avoidance-day) BUILD_AVOIDANCE_DAY=$2; shift 2 ;;
|
||||
--no-descendants) EXTRA_ARGS_COMMON+=" --no-descendants" ; shift ;;
|
||||
--formal) EXTRA_ARGS_COMMON+=" --formal" ; shift ;;
|
||||
--careful) EXTRA_ARGS_RPM+=" --careful" ; shift ;;
|
||||
--layer) EXTRA_ARGS_COMMON+=" --layer=$2"; shift 2 ;;
|
||||
--no-required) EXTRA_ARGS_RPM+=" --no-required" ; shift ;;
|
||||
--no-build-info) EXTRA_ARGS_COMMON+=" --no-build-info" ; shift ;;
|
||||
--no-autoclean) EXTRA_ARGS_RPM+=" --no-autoclean" ; shift ;;
|
||||
--no-meta-patch) EXTRA_ARGS_SRPM+=" --no-meta-patch" ; shift ;;
|
||||
-h|--help) HELP=1 ; shift ;;
|
||||
--clean) CLEAN_FLAG=1 ; shift ;;
|
||||
--dep-test) DEP_TEST_FLAG=1; EXTRA_ARGS_RPM+=" --dep-test"; shift ;;
|
||||
--edit) EDIT_FLAG=1 ; EXTRA_ARGS_SRPM+=" --edit"; shift ;;
|
||||
--rt) STD_BUILD=0 ; shift ;;
|
||||
--std) RT_BUILD=0 ; shift ;;
|
||||
--installer) INSTALLER_BUILD=1 ; STD_BUILD=0 ; RT_BUILD=0 ; shift ;;
|
||||
--containers) INSTALLER_BUILD=0 ; STD_BUILD=0 ; RT_BUILD=0 ; CONTAINERS_BUILD=1 ; shift ;;
|
||||
--parallel) shift ;;
|
||||
--) shift ; break ;;
|
||||
*) usage; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ $HELP -eq 1 ]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Don't source until after BUILD_AVOIDANCE_OVERRIDE_* variables are set.
|
||||
source "${BUILD_PKGS_PARALLEL_DIR}/build-avoidance-utils.sh"
|
||||
|
||||
function my_exit() {
|
||||
build-rpms-parallel --std --tmpfs-clean
|
||||
build-rpms-parallel --rt --tmpfs-clean
|
||||
}
|
||||
|
||||
function my_sigint() {
|
||||
echo "build-pkgs-parallel sigint"
|
||||
pkill -SIGABRT -P $BASHPID &> /dev/null
|
||||
echo "build-pkgs-parallel waiting"
|
||||
wait
|
||||
echo "build-pkgs-parallel wait complete"
|
||||
|
||||
}
|
||||
|
||||
function my_sighup() {
|
||||
echo "build-pkgs-parallel sighup"
|
||||
pkill -SIGABRT -P $BASHPID &> /dev/null
|
||||
echo "build-pkgs-parallel waiting"
|
||||
wait
|
||||
echo "build-pkgs-parallel wait complete"
|
||||
}
|
||||
|
||||
function my_sigabrt() {
|
||||
echo "build-pkgs-parallel sigabrt"
|
||||
pkill -SIGABRT -P $BASHPID &> /dev/null
|
||||
echo "build-pkgs-parallel waiting"
|
||||
wait
|
||||
echo "build-pkgs-parallel wait complete"
|
||||
}
|
||||
|
||||
function my_sigterm() {
|
||||
echo "build-pkgs-parallel sigterm"
|
||||
pkill -SIGABRT -P $BASHPID &> /dev/null
|
||||
echo "build-pkgs-parallel waiting"
|
||||
wait
|
||||
echo "build-pkgs-parallel wait complete"
|
||||
}
|
||||
|
||||
trap my_exit EXIT
|
||||
trap my_sigint INT
|
||||
trap my_sighup HUP
|
||||
trap my_sigabrt ABRT
|
||||
trap my_sigterm TERM
|
||||
|
||||
# Note: For ease of parsing, a TARGETS list always begins and ends
|
||||
# with a space. An empty target list consistes of two spaces.
|
||||
TARGETS=" $@ "
|
||||
EMPTY_TARGETS=" "
|
||||
|
||||
TARGETS_STD="$EMPTY_TARGETS"
|
||||
TARGETS_RT="$EMPTY_TARGETS"
|
||||
TARGETS_INSTALLER="$EMPTY_TARGETS"
|
||||
TARGETS_CONTAINERS="$EMPTY_TARGETS"
|
||||
TARGETS_MISC="$EMPTY_TARGETS"
|
||||
|
||||
find_targets () {
|
||||
local centos_pkg_dirs=$1
|
||||
local d=""
|
||||
local d2=""
|
||||
local g=""
|
||||
local x=""
|
||||
local name=""
|
||||
local path=""
|
||||
local RESULT="$EMPTY_TARGETS"
|
||||
local FOUND=0
|
||||
|
||||
for d in $GIT_LIST; do
|
||||
if [ -f $d/$centos_pkg_dirs ]; then
|
||||
for d2 in $(grep -v '^#' $d/$centos_pkg_dirs); do
|
||||
name=""
|
||||
if [ -f $d/$d2/centos/srpm_path ]; then
|
||||
path=$(cat $d/$d2/centos/srpm_path | head -n 1 | \
|
||||
sed -e "s#^mirror:CentOS/tis-r3-CentOS/mitaka#${CENTOS_REPO}#" \
|
||||
-e "s#^mirror:#${CENTOS_REPO}/#" \
|
||||
-e "s#^repo:#$MY_REPO/#" \
|
||||
-e "s#^Source/#${CENTOS_REPO}/Source/#")
|
||||
name=$(rpm -q --qf='%{NAME}' --nosignature -p $path)
|
||||
else
|
||||
path=$(find $d/$d2/centos/ -name '*.spec' | head -n 1)
|
||||
if [[ ( -z "$path" ) && ( -f $d/$d2/centos/spec_path ) ]]; then
|
||||
path=$(find $MY_REPO/$(cat $d/$d2/centos/spec_path) -maxdepth 1 -name '*.spec' | head -n 1)
|
||||
fi
|
||||
if [ "$path" != "" ]; then
|
||||
name=$(spec_find_tag Name "$path" 2>> /dev/null)
|
||||
fi
|
||||
fi
|
||||
if [ "$name" != "" ]; then
|
||||
if [ "$BUILD_TYPE" == "rt" ]; then
|
||||
FOUND=0
|
||||
for x in $TARGETS; do
|
||||
if [ "${x: -3}" == "-rt" ]; then
|
||||
if [ "${name}" == "$x" ] || [ "${name}-rt" == "${x}" ]; then
|
||||
RESULT+="$x "
|
||||
FOUND=1
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [ $FOUND -eq 0 ]; then
|
||||
for x in $TARGETS; do
|
||||
if [ "${name}" == "${x}-rt" ]; then
|
||||
RESULT+="$x-rt "
|
||||
FOUND=1
|
||||
break
|
||||
else
|
||||
if [ "${name}" == "$x" ] || [ "${name}-rt" == "${x}" ]; then
|
||||
RESULT+="$x "
|
||||
FOUND=1
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
else
|
||||
for x in $TARGETS; do
|
||||
if [ "${name}" == "$x" ]; then
|
||||
RESULT+="$x "
|
||||
FOUND=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
echo "$RESULT"
|
||||
return 0
|
||||
}
|
||||
|
||||
if [ $EDIT_FLAG -eq 1 ] || [ "$TARGETS" != "$EMPTY_TARGETS" ]; then
|
||||
BUILD_AVOIDANCE_FLAG=0
|
||||
fi
|
||||
|
||||
echo "BUILD_AVOIDANCE_FLAG=$BUILD_AVOIDANCE_FLAG"
|
||||
echo "CLEAN_FLAG=$CLEAN_FLAG"
|
||||
echo "EDIT_FLAG=$EDIT_FLAG"
|
||||
|
||||
if [ "$TARGETS" != "$EMPTY_TARGETS" ]; then
|
||||
TARGETS_STD="$(find_targets centos_pkg_dirs)"
|
||||
|
||||
BUILD_TYPE_SAVE="$BUILD_TYPE"
|
||||
BUILD_TYPE="rt"
|
||||
TARGETS_RT="$(find_targets centos_pkg_dirs_rt)"
|
||||
BUILD_TYPE="installer"
|
||||
TARGETS_INSTALLER="$(find_targets centos_pkg_dirs_installer)"
|
||||
BUILD_TYPE="containers"
|
||||
TARGETS_CONTAINERS="$(find_targets centos_pkg_dirs_containers)"
|
||||
BUILD_TYPE="$BUILD_TYPE_SAVE"
|
||||
|
||||
echo "TARGETS_STD=$TARGETS_STD"
|
||||
echo "TARGETS_RT=$TARGETS_RT"
|
||||
echo "TARGETS_INSTALLER=$TARGETS_INSTALLER"
|
||||
echo "TARGETS_CONTAINERS=$TARGETS_CONTAINERS"
|
||||
|
||||
for x in $TARGETS; do
|
||||
if [[ $TARGETS_STD == *" $x "* ]]
|
||||
then
|
||||
echo "found $x" >> /dev/null;
|
||||
else
|
||||
if [[ $TARGETS_RT == *" $x "* ]]
|
||||
then
|
||||
echo "found $x" >> /dev/null;
|
||||
else
|
||||
if [[ $TARGETS_INSTALLER == *" $x "* ]]
|
||||
then
|
||||
echo "found $x" >> /dev/null;
|
||||
INSTALLER_BUILD=1
|
||||
else
|
||||
if [[ $TARGETS_CONTAINERS == *" $x "* ]]
|
||||
then
|
||||
echo "found $x" >> /dev/null;
|
||||
CONTAINERS_BUILD=1
|
||||
else
|
||||
TARGETS_MISC+="$x "
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
echo "EXTRA_ARGS_COMMON='$EXTRA_ARGS_COMMON'"
|
||||
echo "EXTRA_ARGS_SRPM='$EXTRA_ARGS_SRPM'"
|
||||
echo "EXTRA_ARGS_RPM='$EXTRA_ARGS_RPM'"
|
||||
echo "TARGETS='$TARGETS'"
|
||||
echo "TARGETS_STD='$TARGETS_STD'"
|
||||
echo "TARGETS_RT='$TARGETS_RT'"
|
||||
echo "TARGETS_INSTALLER='$TARGETS_INSTALLER'"
|
||||
echo "TARGETS_CONTAINERS='$TARGETS_CONTAINERS'"
|
||||
echo "TARGETS_MISC='$TARGETS_MISC'"
|
||||
|
||||
if [ $CLEAN_FLAG -eq 1 ]; then
|
||||
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] && [ $BUILD_AVOIDANCE_FLAG -eq 1 ] ; then
|
||||
build_avoidance_clean
|
||||
fi
|
||||
|
||||
if [ $STD_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_STD" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
if [ $EDIT_FLAG -ne 1 ]; then
|
||||
echo "${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_STD $TARGETS_MISC"
|
||||
${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_STD $TARGETS_MISC || exit 1
|
||||
|
||||
fi
|
||||
fi
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_STD" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
echo "${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_STD $TARGETS_MISC"
|
||||
${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_STD $TARGETS_MISC || exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $RT_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_RT" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
if [ $EDIT_FLAG -ne 1 ]; then
|
||||
echo "${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_RT $TARGETS_MISC"
|
||||
${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_RT $TARGETS_MISC || exit 1
|
||||
fi
|
||||
fi
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_RT" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
echo "${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_RT $TARGETS_MISC"
|
||||
${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_RT $TARGETS_MISC || exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $INSTALLER_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_INSTALLER" != "$EMPTY_TARGETS" ]; then
|
||||
if [ $EDIT_FLAG -ne 1 ]; then
|
||||
echo "${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_INSTALLER"
|
||||
${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_INSTALLER || exit 1
|
||||
fi
|
||||
fi
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_INSTALLER" != "$EMPTY_TARGETS" ]; then
|
||||
echo "${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_INSTALLER"
|
||||
${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_INSTALLER || exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $CONTAINERS_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_CONTAINERS" != "$EMPTY_TARGETS" ]; then
|
||||
if [ $EDIT_FLAG -ne 1 ]; then
|
||||
echo "${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_CONTAINERS"
|
||||
${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_CONTAINERS || exit 1
|
||||
fi
|
||||
fi
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_CONTAINERS" != "$EMPTY_TARGETS" ]; then
|
||||
echo "${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_CONTAINERS"
|
||||
${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_CONTAINERS || exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
exit $?
|
||||
fi
|
||||
|
||||
function launch_build()
|
||||
{
|
||||
local build_type=$1
|
||||
shift
|
||||
|
||||
local logfile=$MY_WORKSPACE/build-$build_type.log
|
||||
local rc
|
||||
local targets
|
||||
|
||||
if [ "$build_type" == "std" ]; then
|
||||
targets="$TARGETS_STD $TARGETS_MISC"
|
||||
else
|
||||
if [ "$build_type" == "rt" ]; then
|
||||
targets="$TARGETS_RT $TARGETS_MISC"
|
||||
else
|
||||
if [ "$build_type" == "installer" ]; then
|
||||
targets="$TARGETS_INSTALLER $TARGETS_MISC"
|
||||
else
|
||||
if [ "$build_type" == "containers" ]; then
|
||||
targets="$TARGETS_CONTAINERS $TARGETS_MISC"
|
||||
else
|
||||
targets="$TARGETS"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Launching $build_type build, logging to $logfile"
|
||||
if [ $APPEND_LOG_FLAG -eq 0 ] && [ -f $logfile ]; then
|
||||
\rm $logfile
|
||||
fi
|
||||
|
||||
|
||||
echo -e "\n######## $(date): Launching build-srpms-parallel --$build_type $EXTRA_ARGS $@\n" | tee --append $logfile
|
||||
|
||||
if [ $BUILD_AVOIDANCE_FLAG -eq 1 ]; then
|
||||
# Build Avoidance requested. Get URL of a usable context, if any.
|
||||
export BUILD_AVOIDANCE_URL=$(get_build_avoidance_context $build_type)
|
||||
fi
|
||||
|
||||
echo "BUILD_AVOIDANCE_URL=$BUILD_AVOIDANCE_URL" | tee --append $logfile
|
||||
if [ "x$BUILD_AVOIDANCE_URL" != "x" ]; then
|
||||
echo "build_avoidance $build_type" | tee --append $logfile
|
||||
build_avoidance $build_type 2>&1 | tee --append $logfile
|
||||
fi
|
||||
|
||||
# No clean flag, call build-srpms-parallel followed by build-rpms-parallel
|
||||
echo "${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $targets" | tee --append $logfile
|
||||
${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $targets 2>&1 | tee --append $logfile
|
||||
rc=${PIPESTATUS[0]}
|
||||
if [ $rc -eq 0 ]; then
|
||||
echo -e "\n######## $(date): build-srpm-parallel --$build_type was successful" | tee --append $logfile
|
||||
else
|
||||
echo -e "\n######## $(date): build-srpm-parallel --$build_type failed with rc=$rc" | tee --append $logfile
|
||||
echo -e "\n$(date): build-srpm-parallel --$build_type failed with rc=$rc"
|
||||
exit $rc
|
||||
fi
|
||||
|
||||
if [ $EDIT_FLAG -ne 1 ]; then
|
||||
echo -e "\n######## $(date): Launching build-rpms-parallel --$build_type $EXTRA_ARGS $@\n" | tee --append $logfile
|
||||
echo "${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $targets" | tee --append $logfile
|
||||
${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $targets 2>&1 | tee --append $logfile
|
||||
rc=${PIPESTATUS[0]}
|
||||
if [ $rc -eq 0 ]; then
|
||||
echo -e "\n######## $(date): build-rpm-parallel --$build_type was successful" | tee --append $logfile
|
||||
else
|
||||
echo -e "\n######## $(date): build-rpm-parallel --$build_type failed with rc=$rc" | tee --append $logfile
|
||||
echo -e "\n$(date): build-rpm-parallel --$build_type failed with rc=$rc"
|
||||
exit $rc
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "\n$(date): $build_type complete\n"
|
||||
#exit $rc
|
||||
}
|
||||
|
||||
function progbar()
|
||||
{
|
||||
while :; do
|
||||
for s in / - \\ \|; do
|
||||
printf "\r$s"
|
||||
sleep .5
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
# Create $MY_WORKSPACE if it doesn't exist already
|
||||
mkdir -p "${MY_WORKSPACE}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create directory '${MY_WORKSPACE}'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Capture build context"
|
||||
git_context > "${MY_WORKSPACE}/CONTEXT"
|
||||
|
||||
if [ $STD_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_STD" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
launch_build std
|
||||
else
|
||||
echo "Skipping 'std' build, no valid targets in list: '$TARGETS'"
|
||||
fi
|
||||
else
|
||||
echo "Skipping 'std' build"
|
||||
fi
|
||||
|
||||
if [ $RT_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_RT" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
launch_build rt
|
||||
else
|
||||
echo "Skipping 'rt' build, no valid targets in list: $TARGETS"
|
||||
fi
|
||||
else
|
||||
echo "Skipping 'rt' build"
|
||||
fi
|
||||
|
||||
if [ $INSTALLER_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_INSTALLER" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
launch_build installer
|
||||
else
|
||||
echo "Skipping 'installer' build, no valid targets in list: $TARGETS"
|
||||
fi
|
||||
else
|
||||
echo "Skipping 'installer' build"
|
||||
fi
|
||||
|
||||
if [ $CONTAINERS_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_CONTAINERS" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
launch_build containers
|
||||
else
|
||||
echo "Skipping 'containers' build, no valid targets in list: $TARGETS"
|
||||
fi
|
||||
else
|
||||
echo "Skipping 'containers' build"
|
||||
fi
|
||||
|
||||
# Make sure REFERENCE_BUILD is set to something
|
||||
if [ -z $REFERENCE_BUILD ]; then
|
||||
REFERENCE_BUILD=0
|
||||
fi
|
||||
|
||||
if [ $REFERENCE_BUILD -eq 1 ]; then
|
||||
echo "Saving reference context"
|
||||
build_avoidance_save_reference_context
|
||||
fi
|
||||
|
||||
echo "All builds were successful"
|
||||
|
||||
exit 0
|
||||
|
@ -1,538 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# Build first src.rpms, then rpms, from source, or from a downloaded tarball
|
||||
# or src.rpm plus our additional patches.
|
||||
#
|
||||
# This program is a wrapper around build-srpms-serial and build-rpms-serial
|
||||
#
|
||||
|
||||
BUILD_PKGS_SERIAL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
|
||||
|
||||
source "${BUILD_PKGS_SERIAL_DIR}/git-utils.sh"
|
||||
source "${BUILD_PKGS_SERIAL_DIR}/spec-utils"
|
||||
|
||||
|
||||
usage () {
|
||||
echo ""
|
||||
echo "Usage: "
|
||||
echo " Create source and Binary rpms:"
|
||||
echo " Build optimizations (--no-descendants, --no-required, --no-build-info,"
|
||||
echo " --no-autoclean, --no-build-avoidance) are not recommended for the first build"
|
||||
echo " after a clone/pull, nor the final build prior to creating an iso or patch,"
|
||||
echo " but can be used for intermediate builds."
|
||||
echo " i.e. while debugging compilation failures."
|
||||
echo " build-pkgs-serial [--build-avoidance | --no-build-avoidance] [--no-descendants] [--no-required] [--no-build-info] [--no-autoclean] [--careful] [--formal] [ list of package names ]"
|
||||
echo ""
|
||||
echo " Test build dependencies of a package:"
|
||||
echo " Note: A full build of all packages should preceed the dependency test build"
|
||||
echo " build-pkgs-serial --dep-test <package_name>"
|
||||
echo ""
|
||||
echo " Delete source rpms, and the directories associated with it's creation:"
|
||||
echo " Note: does not clean an edit environment"
|
||||
echo " build-pkgs-serial --clean [--build-avoidance | --no-build-avoidance] [ list of package names ]"
|
||||
echo ""
|
||||
echo " Extract an src.rpm into a pair of git trees to aid in editing it's contents,"
|
||||
echo " one for source code and one for metadata such as the spec file."
|
||||
echo " If --no-meta-patch is specified, then WRS patches are omitted."
|
||||
echo " build-pkgs-serial --edit [--no-meta-patch] [ list of package names ]"
|
||||
echo ""
|
||||
echo " Delete an edit environment"
|
||||
echo " build-pkgs-serial --edit --clean [ list of package names ]"
|
||||
echo ""
|
||||
echo " This help page"
|
||||
echo " build-pkgs-serial [--help]"
|
||||
echo ""
|
||||
}
|
||||
|
||||
|
||||
HELP=0
|
||||
CLEAN_FLAG=0
|
||||
EDIT_FLAG=0
|
||||
APPEND_LOG_FLAG=0
|
||||
BUILD_AVOIDANCE_FLAG=0
|
||||
STD_BUILD=1
|
||||
RT_BUILD=1
|
||||
INSTALLER_BUILD=0
|
||||
CONTAINERS_BUILD=0
|
||||
DEP_TEST_FLAG=0
|
||||
|
||||
export BUILD_AVOIDANCE_URL=""
|
||||
|
||||
# For backward compatibility. Old repo location or new?
|
||||
CENTOS_REPO=${MY_REPO}/centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
echo "ERROR: directory ${MY_REPO}/centos-repo not found."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# read the options
|
||||
TEMP=$(getopt -o h --long serial,rt,std,installer,containers,layer:,edit,build-avoidance,no-build-avoidance,build-avoidance-dir:,build-avoidance-host:,build-avoidance-user:,build-avoidance-day:,no-meta-patch,no-descendants,no-required,no-build-info,no-autoclean,formal,careful,help,clean,dep-test,append-log -n 'build-pkgs-serial' -- "$@")
|
||||
if [ $? -ne 0 ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
eval set -- "$TEMP"
|
||||
|
||||
# extract options and their arguments into variables.
|
||||
EXTRA_ARGS_COMMON=""
|
||||
EXTRA_ARGS_SRPM=""
|
||||
EXTRA_ARGS_RPM=""
|
||||
|
||||
export BUILD_AVOIDANCE_OVERRIDE_DIR=""
|
||||
export BUILD_AVOIDANCE_OVERRIDE_HOST=""
|
||||
export BUILD_AVOIDANCE_OVERRIDE_USR=""
|
||||
export BUILD_AVOIDANCE_DAY=""
|
||||
|
||||
while true ; do
|
||||
case "$1" in
|
||||
--append-log) APPEND_LOG_FLAG=1 ; shift ;;
|
||||
--build-avoidance) BUILD_AVOIDANCE_FLAG=1 ; shift ;;
|
||||
--no-build-avoidance) BUILD_AVOIDANCE_FLAG=0 ; shift ;;
|
||||
--build-avoidance-dir) BUILD_AVOIDANCE_OVERRIDE_DIR=$2; shift 2 ;;
|
||||
--build-avoidance-host) BUILD_AVOIDANCE_OVERRIDE_HOST=$2; shift 2 ;;
|
||||
--build-avoidance-user) BUILD_AVOIDANCE_OVERRIDE_USR=$2; shift 2 ;;
|
||||
--build-avoidance-day) BUILD_AVOIDANCE_DAY=$2; shift 2 ;;
|
||||
--no-descendants) EXTRA_ARGS_COMMON+=" --no-descendants" ; shift ;;
|
||||
--formal) EXTRA_ARGS_COMMON+=" --formal" ; shift ;;
|
||||
--careful) EXTRA_ARGS_RPM+=" --careful" ; shift ;;
|
||||
--layer) EXTRA_ARGS_COMMON+=" --layer=$2"; shift 2 ;;
|
||||
--no-required) EXTRA_ARGS_RPM+=" --no-required" ; shift ;;
|
||||
--no-build-info) EXTRA_ARGS_COMMON+=" --no-build-info" ; shift ;;
|
||||
--no-autoclean) EXTRA_ARGS_RPM+=" --no-autoclean" ; shift ;;
|
||||
--no-meta-patch) EXTRA_ARGS_SRPM+=" --no-meta-patch" ; shift ;;
|
||||
-h|--help) HELP=1 ; shift ;;
|
||||
--clean) CLEAN_FLAG=1 ; shift ;;
|
||||
--dep-test) DEP_TEST_FLAG=1; EXTRA_ARGS_RPM+=" --dep-test"; shift ;;
|
||||
--edit) EDIT_FLAG=1 ; EXTRA_ARGS_SRPM+=" --edit"; shift ;;
|
||||
--rt) STD_BUILD=0 ; shift ;;
|
||||
--std) RT_BUILD=0 ; shift ;;
|
||||
--installer) INSTALLER_BUILD=1 ; STD_BUILD=0 ; RT_BUILD=0 ; shift ;;
|
||||
--containers) INSTALLER_BUILD=0 ; STD_BUILD=0 ; RT_BUILD=0 ; CONTAINERS_BUILD=1;shift ;;
|
||||
--serial) shift ;;
|
||||
--) shift ; break ;;
|
||||
*) usage; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ $HELP -eq 1 ]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Don't source until after BUILD_AVOIDANCE_OVERRIDE_* variables are set.
|
||||
source "${BUILD_PKGS_SERIAL_DIR}/build-avoidance-utils.sh"
|
||||
|
||||
function my_exit() {
|
||||
build-rpms-parallel --std --tmpfs-clean
|
||||
build-rpms-parallel --rt --tmpfs-clean
|
||||
}
|
||||
|
||||
function my_sigint() {
|
||||
echo "build-pkgs-parallel sigint"
|
||||
pkill -SIGABRT -P $BASHPID &> /dev/null
|
||||
echo "build-pkgs-parallel waiting"
|
||||
wait
|
||||
echo "build-pkgs-parallel wait complete"
|
||||
|
||||
}
|
||||
|
||||
function my_sighup() {
|
||||
echo "build-pkgs-parallel sighup"
|
||||
pkill -SIGABRT -P $BASHPID &> /dev/null
|
||||
echo "build-pkgs-parallel waiting"
|
||||
wait
|
||||
echo "build-pkgs-parallel wait complete"
|
||||
}
|
||||
|
||||
function my_sigabrt() {
|
||||
echo "build-pkgs-parallel sigabrt"
|
||||
pkill -SIGABRT -P $BASHPID &> /dev/null
|
||||
echo "build-pkgs-parallel waiting"
|
||||
wait
|
||||
echo "build-pkgs-parallel wait complete"
|
||||
}
|
||||
|
||||
function my_sigterm() {
|
||||
echo "build-pkgs-parallel sigterm"
|
||||
pkill -SIGABRT -P $BASHPID &> /dev/null
|
||||
echo "build-pkgs-parallel waiting"
|
||||
wait
|
||||
echo "build-pkgs-parallel wait complete"
|
||||
}
|
||||
|
||||
trap my_exit EXIT
|
||||
trap my_sigint INT
|
||||
trap my_sighup HUP
|
||||
trap my_sigabrt ABRT
|
||||
trap my_sigterm TERM
|
||||
|
||||
# Note: For ease of parsing, a TARGETS list always begins and ends
|
||||
# with a space. An empty target list consistes of two spaces.
|
||||
TARGETS=" $@ "
|
||||
EMPTY_TARGETS=" "
|
||||
|
||||
TARGETS_STD="$EMPTY_TARGETS"
|
||||
TARGETS_RT="$EMPTY_TARGETS"
|
||||
TARGETS_INSTALLER="$EMPTY_TARGETS"
|
||||
TARGETS_CONTAINERS="$EMPTY_TARGETS"
|
||||
TARGETS_MISC="$EMPTY_TARGETS"
|
||||
|
||||
find_targets () {
|
||||
local centos_pkg_dirs=$1
|
||||
local d=""
|
||||
local d2=""
|
||||
local g=""
|
||||
local x=""
|
||||
local name=""
|
||||
local path=""
|
||||
local RESULT="$EMPTY_TARGETS"
|
||||
local FOUND=0
|
||||
|
||||
for d in $GIT_LIST; do
|
||||
if [ -f $d/$centos_pkg_dirs ]; then
|
||||
for d2 in $(grep -v '^#' $d/$centos_pkg_dirs); do
|
||||
name=""
|
||||
if [ -f $d/$d2/centos/srpm_path ]; then
|
||||
path=$(cat $d/$d2/centos/srpm_path | head -n 1 | \
|
||||
sed -e "s#^mirror:CentOS/tis-r3-CentOS/mitaka#${CENTOS_REPO}#" \
|
||||
-e "s#^mirror:#${CENTOS_REPO}/#" \
|
||||
-e "s#^repo:#$MY_REPO/#" \
|
||||
-e "s#^Source/#${CENTOS_REPO}/Source/#")
|
||||
name=$(rpm -q --qf='%{NAME}' --nosignature -p $path)
|
||||
else
|
||||
path=$(find $d/$d2/centos/ -name '*.spec' | head -n 1)
|
||||
if [[ ( -z "$path" ) && ( -f $d/$d2/centos/spec_path ) ]]; then
|
||||
path=$(find $MY_REPO/$(cat $d/$d2/centos/spec_path) -maxdepth 1 -name '*.spec' | head -n 1)
|
||||
fi
|
||||
if [ "$path" != "" ]; then
|
||||
name=$(spec_find_tag Name "$path" 2>> /dev/null)
|
||||
fi
|
||||
fi
|
||||
if [ "$name" != "" ]; then
|
||||
if [ "$BUILD_TYPE" == "rt" ]; then
|
||||
FOUND=0
|
||||
for x in $TARGETS; do
|
||||
if [ "${x: -3}" == "-rt" ]; then
|
||||
if [ "${name}" == "$x" ] || [ "${name}-rt" == "${x}" ]; then
|
||||
RESULT+="$x "
|
||||
FOUND=1
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [ $FOUND -eq 0 ]; then
|
||||
for x in $TARGETS; do
|
||||
if [ "${name}" == "${x}-rt" ]; then
|
||||
RESULT+="$x-rt "
|
||||
FOUND=1
|
||||
break
|
||||
else
|
||||
if [ "${name}" == "$x" ] || [ "${name}-rt" == "${x}" ]; then
|
||||
RESULT+="$x "
|
||||
FOUND=1
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
else
|
||||
for x in $TARGETS; do
|
||||
if [ "${name}" == "$x" ]; then
|
||||
RESULT+="$x "
|
||||
FOUND=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
echo "$RESULT"
|
||||
return 0
|
||||
}
|
||||
|
||||
if [ $EDIT_FLAG -eq 1 ] || [ "$TARGETS" != "$EMPTY_TARGETS" ]; then
|
||||
BUILD_AVOIDANCE_FLAG=0
|
||||
fi
|
||||
|
||||
echo "BUILD_AVOIDANCE_FLAG=$BUILD_AVOIDANCE_FLAG"
|
||||
echo "CLEAN_FLAG=$CLEAN_FLAG"
|
||||
echo "EDIT_FLAG=$EDIT_FLAG"
|
||||
|
||||
if [ "$TARGETS" != "$EMPTY_TARGETS" ]; then
|
||||
TARGETS_STD="$(find_targets centos_pkg_dirs)"
|
||||
|
||||
BUILD_TYPE_SAVE="$BUILD_TYPE"
|
||||
BUILD_TYPE="rt"
|
||||
TARGETS_RT="$(find_targets centos_pkg_dirs_rt)"
|
||||
BUILD_TYPE="installer"
|
||||
TARGETS_INSTALLER="$(find_targets centos_pkg_dirs_installer)"
|
||||
BUILD_TYPE="containers"
|
||||
TARGETS_CONTAINERS="$(find_targets centos_pkg_dirs_containers)"
|
||||
BUILD_TYPE="$BUILD_TYPE_SAVE"
|
||||
|
||||
echo "TARGETS_STD=$TARGETS_STD"
|
||||
echo "TARGETS_RT=$TARGETS_RT"
|
||||
echo "TARGETS_INSTALLER=$TARGETS_INSTALLER"
|
||||
echo "TARGETS_CONTAINERS=$TARGETS_CONTAINERS"
|
||||
|
||||
for x in $TARGETS; do
|
||||
if [[ $TARGETS_STD == *" $x "* ]]
|
||||
then
|
||||
echo "found $x" >> /dev/null;
|
||||
else
|
||||
if [[ $TARGETS_RT == *" $x "* ]]
|
||||
then
|
||||
echo "found $x" >> /dev/null;
|
||||
else
|
||||
if [[ $TARGETS_INSTALLER == *" $x "* ]]
|
||||
then
|
||||
echo "found $x" >> /dev/null;
|
||||
INSTALLER_BUILD=1
|
||||
else
|
||||
if [[ $TARGETS_CONTAINERS == *" $x "* ]]
|
||||
then
|
||||
echo "found $x" >> /dev/null;
|
||||
CONTAINERS_BUILD=1
|
||||
else
|
||||
TARGETS_MISC+="$x "
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
echo "EXTRA_ARGS_COMMON='$EXTRA_ARGS_COMMON'"
|
||||
echo "EXTRA_ARGS_SRPM='$EXTRA_ARGS_SRPM'"
|
||||
echo "EXTRA_ARGS_RPM='$EXTRA_ARGS_RPM'"
|
||||
echo "TARGETS='$TARGETS'"
|
||||
echo "TARGETS_STD='$TARGETS_STD'"
|
||||
echo "TARGETS_RT='$TARGETS_RT'"
|
||||
echo "TARGETS_INSTALLER='$TARGETS_INSTALLER'"
|
||||
echo "TARGETS_CONTAINERS='$TARGETS_CONTAINERS'"
|
||||
echo "TARGETS_MISC='$TARGETS_MISC'"
|
||||
|
||||
if [ $CLEAN_FLAG -eq 1 ]; then
|
||||
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] && [ $BUILD_AVOIDANCE_FLAG -eq 1 ] ; then
|
||||
build_avoidance_clean
|
||||
fi
|
||||
|
||||
if [ $STD_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_STD" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
if [ $EDIT_FLAG -ne 1 ]; then
|
||||
echo "${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_STD $TARGETS_MISC"
|
||||
${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_STD $TARGETS_MISC || exit 1
|
||||
|
||||
fi
|
||||
fi
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_STD" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
echo "${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_STD $TARGETS_MISC"
|
||||
${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_STD $TARGETS_MISC || exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $RT_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_RT" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
if [ $EDIT_FLAG -ne 1 ]; then
|
||||
echo "${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_RT $TARGETS_MISC"
|
||||
${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_RT $TARGETS_MISC || exit 1
|
||||
fi
|
||||
fi
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_RT" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
echo "${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_RT $TARGETS_MISC"
|
||||
${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_RT $TARGETS_MISC || exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $INSTALLER_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_INSTALLER" != "$EMPTY_TARGETS" ]; then
|
||||
if [ $EDIT_FLAG -ne 1 ]; then
|
||||
echo "${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_INSTALLER"
|
||||
${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_INSTALLER || exit 1
|
||||
fi
|
||||
fi
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_INSTALLER" != "$EMPTY_TARGETS" ]; then
|
||||
echo "${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_INSTALLER"
|
||||
${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_INSTALLER || exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $CONTAINERS_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_CONTAINERS" != "$EMPTY_TARGETS" ]; then
|
||||
if [ $EDIT_FLAG -ne 1 ]; then
|
||||
echo "${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_CONTAINERS"
|
||||
${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_CONTAINERS || exit 1
|
||||
fi
|
||||
fi
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_CONTAINERS" != "$EMPTY_TARGETS" ]; then
|
||||
echo "${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_CONTAINERS"
|
||||
${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_CONTAINERS || exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
exit $?
|
||||
fi
|
||||
|
||||
function launch_build()
|
||||
{
|
||||
local build_type=$1
|
||||
shift
|
||||
|
||||
local logfile=$MY_WORKSPACE/build-$build_type.log
|
||||
local rc
|
||||
local targets
|
||||
|
||||
if [ "$build_type" == "std" ]; then
|
||||
targets="$TARGETS_STD $TARGETS_MISC"
|
||||
else
|
||||
if [ "$build_type" == "rt" ]; then
|
||||
targets="$TARGETS_RT $TARGETS_MISC"
|
||||
else
|
||||
if [ "$build_type" == "installer" ]; then
|
||||
targets="$TARGETS_INSTALLER $TARGETS_MISC"
|
||||
else
|
||||
if [ "$build_type" == "containers" ]; then
|
||||
targets="$TARGETS_CONTAINERS $TARGETS_MISC"
|
||||
else
|
||||
targets="$TARGETS"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Launching $build_type build, logging to $logfile"
|
||||
if [ $APPEND_LOG_FLAG -eq 0 ] && [ -f $logfile ]; then
|
||||
\rm $logfile
|
||||
fi
|
||||
|
||||
echo -e "\n######## $(date): Launching build-srpms-serial --$build_type $EXTRA_ARGS $@\n" | tee --append $logfile
|
||||
|
||||
if [ $BUILD_AVOIDANCE_FLAG -eq 1 ]; then
|
||||
# Build Avoidance requested. Get URL of a usable context, if any.
|
||||
export BUILD_AVOIDANCE_URL=$(get_build_avoidance_context $build_type)
|
||||
fi
|
||||
|
||||
echo "BUILD_AVOIDANCE_URL=$BUILD_AVOIDANCE_URL" | tee --append $logfile
|
||||
if [ "x$BUILD_AVOIDANCE_URL" != "x" ]; then
|
||||
echo "build_avoidance $build_type" | tee --append $logfile
|
||||
build_avoidance $build_type 2>&1 | tee --append $logfile
|
||||
fi
|
||||
|
||||
# No clean flag, call build-srpms-serial followed by build-rpms-serial
|
||||
echo "${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $targets" | tee --append $logfile
|
||||
${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $targets 2>&1 | tee --append $logfile
|
||||
rc=${PIPESTATUS[0]}
|
||||
if [ $rc -eq 0 ]; then
|
||||
echo -e "\n######## $(date): build-srpm-serial --$build_type was successful" | tee --append $logfile
|
||||
else
|
||||
echo -e "\n######## $(date): build-srpm-serial --$build_type failed with rc=$rc" | tee --append $logfile
|
||||
echo -e "\n$(date): build-srpm-serial --$build_type failed with rc=$rc"
|
||||
exit $rc
|
||||
fi
|
||||
|
||||
if [ $EDIT_FLAG -ne 1 ]; then
|
||||
echo -e "\n######## $(date): Launching build-rpms-serial --$build_type $EXTRA_ARGS $@\n" | tee --append $logfile
|
||||
echo "${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $targets" | tee --append $logfile
|
||||
${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $targets 2>&1 | tee --append $logfile
|
||||
rc=${PIPESTATUS[0]}
|
||||
if [ $rc -eq 0 ]; then
|
||||
echo -e "\n######## $(date): build-rpm-serial --$build_type was successful" | tee --append $logfile
|
||||
else
|
||||
echo -e "\n######## $(date): build-rpm-serial --$build_type failed with rc=$rc" | tee --append $logfile
|
||||
echo -e "\n$(date): build-rpm-serial --$build_type failed with rc=$rc"
|
||||
exit $rc
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "\n$(date): $build_type complete\n"
|
||||
#exit $rc
|
||||
}
|
||||
|
||||
function progbar()
|
||||
{
|
||||
while :; do
|
||||
for s in / - \\ \|; do
|
||||
printf "\r$s"
|
||||
sleep .5
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
# Create $MY_WORKSPACE if it doesn't exist already
|
||||
mkdir -p "${MY_WORKSPACE}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create directory '${MY_WORKSPACE}'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Capture build context"
|
||||
git_context > "${MY_WORKSPACE}/CONTEXT"
|
||||
|
||||
if [ $STD_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_STD" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
launch_build std
|
||||
else
|
||||
echo "Skipping 'std' build, no valid targets in list: $TARGETS"
|
||||
fi
|
||||
else
|
||||
echo "Skipping 'std' build"
|
||||
fi
|
||||
|
||||
if [ $RT_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_RT" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
launch_build rt
|
||||
else
|
||||
echo "Skipping 'rt' build, no valid targets in list: $TARGETS"
|
||||
fi
|
||||
else
|
||||
echo "Skipping 'rt' build"
|
||||
fi
|
||||
|
||||
if [ $INSTALLER_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_INSTALLER" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
launch_build installer
|
||||
else
|
||||
echo "Skipping 'installer' build, no valid targets in list: $TARGETS"
|
||||
fi
|
||||
else
|
||||
echo "Skipping 'installer' build"
|
||||
fi
|
||||
|
||||
if [ $CONTAINERS_BUILD -eq 1 ]; then
|
||||
if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_CONTAINERS " != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
|
||||
launch_build containers
|
||||
else
|
||||
echo "Skipping 'containers' build, no valid targets in list: $TARGETS"
|
||||
fi
|
||||
else
|
||||
echo "Skipping 'containers' build"
|
||||
fi
|
||||
|
||||
# Make sure REFERENCE_BUILD is set to something
|
||||
if [ -z $REFERENCE_BUILD ]; then
|
||||
REFERENCE_BUILD=0
|
||||
fi
|
||||
|
||||
if [ $REFERENCE_BUILD -eq 1 ]; then
|
||||
echo "Saving reference context"
|
||||
build_avoidance_save_reference_context
|
||||
fi
|
||||
|
||||
echo "All builds were successful"
|
||||
|
||||
exit 0
|
||||
|
@ -1,44 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# Builds rpm files from src.rpm files.
|
||||
#
|
||||
# This program is a wrapper around build-rpms-parallel and build-rpms-serial
|
||||
#
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
usage () {
|
||||
echo ""
|
||||
echo "Usage: "
|
||||
echo " Create binary rpms:"
|
||||
echo " build-rpms [--serial] [args]"
|
||||
}
|
||||
|
||||
SERIAL_FLAG=0
|
||||
|
||||
for arg in "$@"; do
|
||||
case "$1" in
|
||||
--serial) SERIAL_FLAG=1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
which mock_tmpfs_umount >> /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
SERIAL_FLAG=1
|
||||
fi
|
||||
|
||||
if [ $SERIAL_FLAG -eq 1 ]; then
|
||||
echo "build-rpms-serial $@"
|
||||
build-rpms-serial "$@"
|
||||
else
|
||||
echo "build-rpms-parallel $@"
|
||||
build-rpms-parallel "$@"
|
||||
fi
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,52 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# Create src.rpm files from source, or from a downloaded tarball
|
||||
# or src.rpm plus our additional patches.
|
||||
#
|
||||
# This program is a wrapper around build-srpms-parallel and build-srpms-serial
|
||||
#
|
||||
# The location of packages to be build are identified by
|
||||
# <distro>_pkg_dirs[_<opt-build-type>] files located at the root of
|
||||
# any git tree (e.g. istx/stx-integ/centos_pkg_dirs).
|
||||
#
|
||||
# The build of an individual package is driven by its build_srpm.data
|
||||
# file plus a <pkg-name>.spec file or an srpm_path file.
|
||||
#
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
usage () {
|
||||
echo ""
|
||||
echo "Usage: "
|
||||
echo " Create source rpms:"
|
||||
echo " build-srpms [--serial] [args]"
|
||||
}
|
||||
|
||||
SERIAL_FLAG=0
|
||||
|
||||
for arg in "$@"; do
|
||||
case "$1" in
|
||||
--serial) SERIAL_FLAG=1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
which mock_tmpfs_umount >> /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
SERIAL_FLAG=1
|
||||
fi
|
||||
|
||||
if [ $SERIAL_FLAG -eq 1 ]; then
|
||||
echo "build-srpms-serial $@"
|
||||
build-srpms-serial "$@"
|
||||
else
|
||||
echo "build-srpms-parallel $@"
|
||||
build-srpms-parallel "$@"
|
||||
fi
|
||||
|
@ -1,106 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# Functions common to build-srpm-serial and build-srpm-parallel.
|
||||
#
|
||||
|
||||
SRC_BUILD_TYPE_SRPM="srpm"
|
||||
SRC_BUILD_TYPE_SPEC="spec"
|
||||
SRC_BUILD_TYPES="$SRC_BUILD_TYPE_SRPM $SRC_BUILD_TYPE_SPEC"
|
||||
|
||||
set_build_info () {
|
||||
local info_file="$MY_WORKSPACE/BUILD_INFO"
|
||||
local layer_prefix="${LAYER^^}_"
|
||||
if [ "${LAYER}" == "" ]; then
|
||||
layer_prefix=""
|
||||
fi
|
||||
mkdir -p "$(dirname ${info_file})"
|
||||
echo "${layer_prefix}OS=\"centos\"" > "${info_file}"
|
||||
echo "${layer_prefix}JOB=\"n/a\"" >> "${info_file}"
|
||||
echo "${layer_prefix}BUILD_BY=\"${USER}\"" >> "${info_file}"
|
||||
echo "${layer_prefix}BUILD_NUMBER=\"n/a\"" >> "${info_file}"
|
||||
echo "${layer_prefix}BUILD_HOST=\"$(hostname)\"" >> "${info_file}"
|
||||
echo "${layer_prefix}BUILD_DATE=\"$(date '+%Y-%m-%d %H:%M:%S %z')\"" >> "${info_file}"
|
||||
}
|
||||
|
||||
|
||||
str_lst_contains() {
|
||||
TARGET="$1"
|
||||
LST="$2"
|
||||
|
||||
if [[ $LST =~ (^|[[:space:]])$TARGET($|[[:space:]]) ]] ; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# md5sums_from_input_vars <src-build-type> <srpm-or-spec-path> <work-dir>
|
||||
#
|
||||
# Returns md5 data for all input files of a src.rpm.
|
||||
# Assumes PKG_BASE, ORIG_SRPM_PATH have been defined and the
|
||||
# build_srpm.data file has already been sourced.
|
||||
#
|
||||
# Arguments:
|
||||
# src-build-type: Any single value from $SRC_BUILD_TYPES.
|
||||
# e.g. 'srpm' or 'spec'
|
||||
# srpm-or-spec-path: Absolute path to an src.rpm, or to a
|
||||
# spec file.
|
||||
# work-dir: Optional working directory. If a path is
|
||||
# specified but does not exist, it will be created.
|
||||
#
|
||||
# Returns: output of md5sum command with canonical path names
|
||||
#
|
||||
md5sums_from_input_vars () {
|
||||
local SRC_BUILD_TYPE="$1"
|
||||
local SRPM_OR_SPEC_PATH="$2"
|
||||
local WORK_DIR="$3"
|
||||
|
||||
local TMP_FLAG=0
|
||||
local LINK_FILTER='[/]stx[/]downloads[/]'
|
||||
|
||||
if ! str_lst_contains "$SRC_BUILD_TYPE" "$SRC_BUILD_TYPES" ; then
|
||||
>&2 echo "ERROR: $FUNCNAME (${LINENO}): invalid arg: SRC_BUILD_TYPE='$SRC_BUILD_TYPE'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ -z $WORK_DIR ]; then
|
||||
WORK_DIR=$(mktemp -d /tmp/${FUNCNAME}_XXXXXX)
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "ERROR: $FUNCNAME (${LINENO}): mktemp -d /tmp/${FUNCNAME}_XXXXXX"
|
||||
return 1
|
||||
fi
|
||||
TMP_FLAG=1
|
||||
else
|
||||
mkdir -p "$WORK_DIR"
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "ERROR: $FUNCNAME (${LINENO}): mkdir -p '$WORK_DIR'"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
local INPUT_FILES_SORTED="$WORK_DIR/srpm_sorted_input.files"
|
||||
|
||||
# Create lists of input files (INPUT_FILES) and symlinks (INPUT_LINKS).
|
||||
srpm_source_file_list "$SRC_BUILD_TYPE" "$SRPM_OR_SPEC_PATH" "$INPUT_FILES_SORTED"
|
||||
if [ $? -eq 1 ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Remove $MY_REPO prefix from paths
|
||||
cat $INPUT_FILES_SORTED | xargs -d '\n' md5sum | sed "s# $(readlink -f $MY_REPO)/# #"
|
||||
|
||||
if [ $TMP_FLAG -eq 0 ]; then
|
||||
\rm -f $INPUT_FILES_SORTED
|
||||
else
|
||||
\rm -rf $WORK_DIR
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -7,9 +7,8 @@ debian/Dockerfile:
|
||||
- convert thrifty & nss to wheels and don't install them in Dockerfile
|
||||
|
||||
build-wheel-tarball.sh:
|
||||
- current DEB wheel packages install wheels at random locations, rather
|
||||
than under /wheels as in CentOS. Fix them and remove the workaround
|
||||
in this script.
|
||||
- current DEB wheel packages install wheels at random locations.
|
||||
Fix them and remove the workaround in this script.
|
||||
|
||||
build-wheel-tarball.sh:
|
||||
- look for wheels in non-Starlingx DEBs. Requires accessing repomgr via
|
||||
|
@ -20,7 +20,7 @@ fi
|
||||
|
||||
KEEP_IMAGE=no
|
||||
KEEP_CONTAINER=no
|
||||
SUPPORTED_OS_LIST=('centos' 'debian')
|
||||
SUPPORTED_OS_LIST=( 'debian' )
|
||||
OS=
|
||||
OS_VERSION=
|
||||
BUILD_STREAM=stable
|
||||
@ -38,7 +38,7 @@ Usage:
|
||||
$(basename $0) [ --os <os> ] [ --keep-image ] [ --keep-container ] [ --stream <stable|dev> ]
|
||||
|
||||
Options:
|
||||
--os: Override base OS (eg. centos; default: auto)
|
||||
--os: Override base OS (eg. debian; default: auto)
|
||||
--os-version: Override OS version (default: auto)
|
||||
--keep-image: Skip deletion of the wheel build image in docker
|
||||
--keep-container: Skip deletion of container used for the build
|
||||
|
@ -17,7 +17,7 @@ if [ -z "${MY_WORKSPACE}" -o -z "${MY_REPO}" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SUPPORTED_OS_ARGS=('centos' 'debian')
|
||||
SUPPORTED_OS_ARGS=( 'debian' )
|
||||
OS=
|
||||
OS_VERSION=
|
||||
BUILD_STREAM=stable
|
||||
|
@ -1,46 +0,0 @@
|
||||
ARG RELEASE=7.5.1804
|
||||
FROM centos:${RELEASE}
|
||||
|
||||
ARG BUILD_STREAM=stable
|
||||
|
||||
# Install the necessary packages for building the python modules.
|
||||
# Some of these are dependencies of the specific modules, and could
|
||||
# instead be added to the wheels.cfg file in the future.
|
||||
RUN set -ex ;\
|
||||
sed -i '/\[main\]/ atimeout=120' /etc/yum.conf ;\
|
||||
yum makecache ;\
|
||||
# nss>3.53.1 causes compile errors with some wheels
|
||||
nss_rpms=$(echo nss nss-util nss-tools nss-sysinit nss-softokn \
|
||||
nss-softokn-devel nss-softokn-freebl nss-devel \
|
||||
nss-util-devel nss-softokn-freebl-devel) ;\
|
||||
# install/upgrade all NSS packages @ v3.53.1
|
||||
yum install -y $(echo $nss_rpms | awk -v RS=' ' '{print $1 "-3.53.1"}') ;\
|
||||
# add "exclude=$nss_rpms" to the CentOS repo file
|
||||
sed -i -r -e "/^\\s*[[]updates[]]/a exclude=$nss_rpms" /etc/yum.repos.d/CentOS-Base.repo ;\
|
||||
# install required packages
|
||||
yum install -y epel-release centos-release-openstack-queens ;\
|
||||
yum install -y git gcc zip bzip2 unzip \
|
||||
python3 python3-pip python3-wheel python3-devel \
|
||||
wget openldap-devel mariadb mariadb-devel \
|
||||
libvirt libvirt-devel liberasurecode-devel nss-devel \
|
||||
systemd-devel postgresql-devel ;\
|
||||
# pip<19.2.3 doesn't ignore yanked packages from pypi.org
|
||||
python3 -m pip install pip==19.2.3 ;\
|
||||
# setuptools-scm's maintainers keep publishing and yanking new versions.
|
||||
# Pin it to latest version known to work
|
||||
python3 -m pip install setuptools-scm==6.0.1 ;\
|
||||
# while setuptools is larger than 45.3, it no longer support "Features" in setup.py
|
||||
python3 -m pip install --user setuptools==45.3 ;\
|
||||
python3 -m pip install --user --upgrade wheel
|
||||
COPY docker-common/docker-build-wheel.sh /
|
||||
COPY centos/${BUILD_STREAM}-wheels.cfg /wheels.cfg
|
||||
|
||||
# Python2 packages
|
||||
RUN set -ex; \
|
||||
yum -y install python python-devel ;\
|
||||
wget https://bootstrap.pypa.io/pip/2.7/get-pip.py ;\
|
||||
python get-pip.py
|
||||
COPY centos/${BUILD_STREAM}-wheels-py2.cfg /wheels-py2.cfg
|
||||
|
||||
# root CA cert expired on October 1st, 2021
|
||||
RUN yum update -y ca-certificates
|
@ -1,20 +0,0 @@
|
||||
#
|
||||
# git: wheelname|git|git-source|basedir|branch
|
||||
# tar: wheelname|tar|wget-source|basedir
|
||||
# pypi: wheelname|pypi|wget-source
|
||||
# zip: wheelname|zip|wget-source|basedir
|
||||
#
|
||||
# If fix_setup must be called, add |fix_setup at the end of the line
|
||||
#
|
||||
# See doc/wheels-cfg.md for more info.
|
||||
#
|
||||
amqplib-1.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2
|
||||
lz4-0.9.0-cp27-cp27mu-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0
|
||||
panko-5.0.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0
|
||||
google_api_python_client-1.7.7-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/d7/47/940908e52487440f61fb93ad55cbbe3a28235d3bb143b26affb17b37dd28/google_api_python_client-1.7.7-py2.py3-none-any.whl
|
||||
neutron_lib-*.whl|git|https://github.com/openstack/neutron-lib|neutron-lib|master
|
||||
python_openstackclient-*.whl|git|https://github.com/openstack/python-openstackclient|python-openstackclient|master
|
||||
openstacksdk-*.whl|git|https://github.com/openstack/openstacksdk|openstacksdk|master
|
||||
networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl
|
||||
croniter-0.3.29-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a9/c9/11182a2507798c661b04a7914739ea8ca73a738e6869a23742029f51bc1a/croniter-0.3.29-py2.py3-none-any.whl
|
||||
pecan-1.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3
|
@ -1,21 +0,0 @@
|
||||
#
|
||||
# git: wheelname|git|git-source|basedir|branch
|
||||
# tar: wheelname|tar|wget-source|basedir
|
||||
# pypi: wheelname|pypi|wget-source
|
||||
# zip: wheelname|zip|wget-source|basedir
|
||||
#
|
||||
# If fix_setup must be called, add |fix_setup at the end of the line
|
||||
#
|
||||
# See doc/wheels-cfg.md for more info.
|
||||
#
|
||||
amqplib-1.0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2
|
||||
croniter-0.3.29-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a9/c9/11182a2507798c661b04a7914739ea8ca73a738e6869a23742029f51bc1a/croniter-0.3.29-py2.py3-none-any.whl
|
||||
google_api_python_client-1.7.7-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/d7/47/940908e52487440f61fb93ad55cbbe3a28235d3bb143b26affb17b37dd28/google_api_python_client-1.7.7-py2.py3-none-any.whl
|
||||
lz4-0.9.0-cp36-cp36m-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0
|
||||
networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl
|
||||
neutron_lib-*.whl|git|https://github.com/openstack/neutron-lib|neutron-lib|master
|
||||
python_openstackclient-*.whl|git|https://github.com/openstack/python-openstackclient|python-openstackclient|master
|
||||
openstacksdk-*.whl|git|https://github.com/openstack/openstacksdk|openstacksdk|master
|
||||
panko-5.0.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0
|
||||
pecan-1.3.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3
|
||||
|
@ -1,13 +0,0 @@
|
||||
# This file specifies constraint/requirement URLs for current and python2
|
||||
# openstack branches
|
||||
|
||||
# Current/stable
|
||||
STABLE_OPENSTACK_REQ_URL="https://raw.githubusercontent.com/openstack/requirements/stable/ussuri"
|
||||
# Current/experimental (for dev images)
|
||||
MASTER_OPENSTACK_REQ_URL="https://raw.githubusercontent.com/openstack/requirements/master"
|
||||
|
||||
# Python2/stable
|
||||
STABLE_OPENSTACK_REQ_URL_PY2="https://opendev.org/openstack/requirements/raw/commit/2da5c5045118b0e36fb14427872e4b9b37335071"
|
||||
# Python2/experimental (for dev images)
|
||||
MASTER_OPENSTACK_REQ_URL_PY2="https://raw.githubusercontent.com/openstack/requirements/stable/train"
|
||||
|
@ -1,178 +0,0 @@
|
||||
#
|
||||
# git: wheelname|git|git-source|basedir|branch
|
||||
# tar: wheelname|tar|wget-source|basedir
|
||||
# pypi: wheelname|pypi|wget-source
|
||||
# zip: wheelname|zip|wget-source|basedir
|
||||
#
|
||||
# If fix_setup must be called, add |fix_setup at the end of the line
|
||||
#
|
||||
# See doc/wheels-cfg.md for more info.
|
||||
#
|
||||
abclient-0.2.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/49/eb/091b02c1e36d68927adfb746706e2c80f7e7bfb3f16e3cbcfec2632118ab/abclient-0.2.3.tar.gz|abclient-0.2.3
|
||||
alembic-1.1.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/9a/0f/a5e8997d58882da8ecd288360dddf133a83145de6480216774923b393422/alembic-1.1.0.tar.gz|alembic-1.1.0
|
||||
amqplib-1.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2
|
||||
anyjson-0.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/c3/4d/d4089e1a3dd25b46bebdb55a992b0797cff657b4477bc32ce28038fdecbc/anyjson-0.3.3.tar.gz|anyjson-0.3.3
|
||||
backports.ssl_match_hostname-3.7.0.1-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ff/2b/8265224812912bc5b7a607c44bf7b027554e1b9775e9ee0de8032e3de4b2/backports.ssl_match_hostname-3.7.0.1.tar.gz|backports.ssl_match_hostname-3.7.0.1|fix_setup
|
||||
bottle-0.12.17-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/c4/a5/6bf41779860e9b526772e1b3b31a65a22bd97535572988d16028c5ab617d/bottle-0.12.17.tar.gz|bottle-0.12.17
|
||||
cassandra_driver-3.19.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/1c/fe/e4df42a3e864b6b7b2c7f6050b66cafc7fba8b46da0dfb9d51867e171a77/cassandra-driver-3.19.0.tar.gz|cassandra-driver-3.19.0
|
||||
cmd2-0.8.9-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/e9/40/a71caa2aaff10c73612a7106e2d35f693e85b8cf6e37ab0774274bca3cf9/cmd2-0.8.9-py2.py3-none-any.whl
|
||||
construct-2.8.22-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/e5/c6/3e3aeef38bb0c27364af3d21493d9690c7c3925f298559bca3c48b7c9419/construct-2.8.22.tar.gz|construct-2.8.22
|
||||
crc16-0.1.1-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/a6/e0/70a44c4385f2b33df82e518005aae16b5c1feaf082c73c0acebe3426fc0a/crc16-0.1.1.tar.gz|crc16-0.1.1|fix_setup
|
||||
demjson-2.2.4-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/96/67/6db789e2533158963d4af689f961b644ddd9200615b8ce92d6cad695c65a/demjson-2.2.4.tar.gz|demjson-2.2.4|fix_setup
|
||||
django_floppyforms-1.7.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/8c/18/30a9137c7ae279a27ccdeb10f6fe8be18ee98551d01ec030b6cfe8b2d2e2/django-floppyforms-1.7.0.tar.gz|django-floppyforms-1.7.0
|
||||
django_pyscss-2.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/4b/7f/d771802305184aac6010826f60a0b2ecaa3f57d19ab0e405f0c8db07e809/django-pyscss-2.0.2.tar.gz|django-pyscss-2.0.2
|
||||
docopt-0.6.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a2/55/8f8cab2afd404cf578136ef2cc5dfb50baa1761b68c9da1fb1e4eed343c9/docopt-0.6.2.tar.gz|docopt-0.6.2
|
||||
dogpile.cache-0.7.1-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/84/3e/dbf1cfc5228f1d3dca80ef714db2c5aaec5cd9efaf54d7e3daef6bc48b19/dogpile.cache-0.7.1.tar.gz|dogpile.cache-0.7.1
|
||||
enum_compat-0.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/95/6e/26bdcba28b66126f66cf3e4cd03bcd63f7ae330d29ee68b1f6b623550bfa/enum-compat-0.0.2.tar.gz|enum-compat-0.0.2
|
||||
etcd3-0.10.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/09/f1/93603a26daf7a993a0acbbcfd32afce8b2fdf30a765d5651571ab635969b/etcd3-0.10.0.tar.gz|etcd3-0.10.0
|
||||
exabgp-4.1.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/b9/f1/f2417bc82c9caa220fcd369a3b55ac895088bcc8afc262e4bb07d48aa40c/exabgp-4.1.2.tar.gz|exabgp-4.1.2
|
||||
flask_keystone-0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/1f/ca/3938de8c5f4a3d1c5dd4278bedb9d31d79816feba4d088293c620a366fb1/flask_keystone-0.2.tar.gz|flask_keystone-0.2
|
||||
flask_oslolog-0.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a7/62/fec02ce761b548b1289680bb1be1aa0bce2b2c4017d5b31bd6c67c78aef9/flask_oslolog-0.1.tar.gz|flask_oslolog-0.1
|
||||
fortiosclient-0.0.3-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e9/aa/b2c0705d5e52c8d9af35422d940800b49c562758fbdad3179a6fbf6e92f5/fortiosclient-0.0.3.tar.gz|fortiosclient-0.0.3
|
||||
frozendict-1.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/4e/55/a12ded2c426a4d2bee73f88304c9c08ebbdbadb82569ebdd6a0c007cfd08/frozendict-1.2.tar.gz|frozendict-1.2
|
||||
funcparserlib-0.3.6-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/cb/f7/b4a59c3ccf67c0082546eaeb454da1a6610e924d2e7a2a21f337ecae7b40/funcparserlib-0.3.6.tar.gz|funcparserlib-0.3.6
|
||||
functools32-3.2.3.post2-py2-none-any.whl|git|https://github.com/MiCHiLU/python-functools32|python-functools32|3.2.3-2|fix_setup
|
||||
future-0.17.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/90/52/e20466b85000a181e1e144fd8305caf2cf475e2f9674e797b222f8105f5f/future-0.17.1.tar.gz|future-0.17.1
|
||||
happybase-1.2.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/d1/9c/f5f7bdb5439cda2b7da4e20ac24ec0e2455fd68aade8397f211d2994c39d/happybase-1.2.0.tar.gz|happybase-1.2.0
|
||||
hiredis-1.0.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/9e/e0/c160dbdff032ffe68e4b3c576cba3db22d8ceffc9513ae63368296d1bcc8/hiredis-1.0.0.tar.gz|hiredis-1.0.0
|
||||
httplib2-0.13.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/78/23/bb9606e87a66fd8c72a2b1a75b049d3859a122bc2648915be845bc44e04f/httplib2-0.13.1.tar.gz|httplib2-0.13.1
|
||||
itsdangerous-1.1.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/76/ae/44b03b253d6fade317f32c24d100b3b35c2239807046a4c953c7b89fa49e/itsdangerous-1.1.0-py2.py3-none-any.whl
|
||||
jaeger_client-4.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/f1/da/569a4f1bc3d0c412c7f903053f09ef62fa10949374ca90bc852b22dd3860/jaeger-client-4.1.0.tar.gz|jaeger-client-4.1.0
|
||||
jsonpath_rw-1.4.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/71/7c/45001b1f19af8c4478489fbae4fc657b21c4c669d7a5a036a86882581d85/jsonpath-rw-1.4.0.tar.gz|jsonpath-rw-1.4.0
|
||||
krest-1.3.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/fb/d2/9dbbd3a76f2385041720a0eb51ddab676e688fa8bee8a1489470839616cf/krest-1.3.1.tar.gz|krest-1.3.1
|
||||
#libvirt_python-4.4.0-cp27-none-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/2b/8d/1160cf34dc3d296896eb5c8f4944439ea368b87d2d2431f58d08d6bdf374/libvirt-python-4.4.0.tar.gz|libvirt-python-4.4.0|fix_setup
|
||||
logutils-0.3.5-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/49/b2/b57450889bf73da26027f8b995fd5fbfab258ec24ef967e4c1892f7cb121/logutils-0.3.5.tar.gz|logutils-0.3.5|fix_setup
|
||||
lz4-0.9.0-cp27-cp27mu-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0
|
||||
Mako-1.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/b0/3c/8dcd6883d009f7cae0f3157fb53e9afb05a0d3d33b3db1268ec2e6f4a56b/Mako-1.1.0.tar.gz|Mako-1.1.0
|
||||
marathon-0.11.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/97/e3/f036af0d94f98d199233faa71b5bcbef8b8e8e634551940d98c95d276e4f/marathon-0.11.0-py2.py3-none-any.whl
|
||||
MarkupSafe-1.1.1-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/b9/2e/64db92e53b86efccfaea71321f597fa2e1b2bd3853d8ce658568f7a13094/MarkupSafe-1.1.1.tar.gz|MarkupSafe-1.1.1
|
||||
mox-0.5.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/0c/a1/64740c638cc5fae807022368f4141700518ee343b53eb3e90bf3cc15a4d4/mox-0.5.3.tar.gz|mox-0.5.3|fix_setup
|
||||
migrate-0.3.8-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/ce/31/1a4cbf8dc0536c55f41072e8ea37b3df1e412262dc731c57e5bb099eb9b2/migrate-0.3.8.tar.gz|migrate-0.3.8
|
||||
mpmath-1.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/ca/63/3384ebb3b51af9610086b23ea976e6d27d6d97bf140a76a365bd77a3eb32/mpmath-1.1.0.tar.gz|mpmath-1.1.0|fix_setup
|
||||
msgpack_python-0.4.8-cp27-cp27mu-linux_x86_64.whl|git|https://github.com/msgpack/msgpack-python.git|msgpack-python|0.4.8
|
||||
munch-2.3.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/68/f4/260ec98ea840757a0da09e0ed8135333d59b8dfebe9752a365b04857660a/munch-2.3.2.tar.gz|munch-2.3.2
|
||||
ndg_httpsclient-0.5.1-py2-none-any.whl|pypi|https://files.pythonhosted.org/packages/bf/b2/26470fde7ff55169df8e071fb42cb1f83e22bd952520ab2b5c5a5edc2acd/ndg_httpsclient-0.5.1-py2-none-any.whl
|
||||
netifaces-0.10.9-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/0d/18/fd6e9c71a35b67a73160ec80a49da63d1eed2d2055054cc2995714949132/netifaces-0.10.9.tar.gz|netifaces-0.10.9
|
||||
networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl
|
||||
networkx-2.2-py2.py3-none-any.whl|zip|https://files.pythonhosted.org/packages/f3/f4/7e20ef40b118478191cec0b58c3192f822cace858c19505c7670961b76b2/networkx-2.2.zip|networkx-2.2
|
||||
neutron_lib-1.29.1-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6b/dd/548cbb7a936de18aa642372927e409540d8f5d96a2f7650c4d1197845f3c/neutron_lib-1.29.1-py2.py3-none-any.whl
|
||||
nodeenv-1.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/00/6e/ed417bd1ed417ab3feada52d0c89ab0ed87d150f91590badf84273e047c9/nodeenv-1.3.3.tar.gz|nodeenv-1.3.3
|
||||
nose_exclude-0.5.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/63/cf/90c4be56bf11b7bc8801086d9445baf731aa36b8e8fc5791731e8e604dcd/nose-exclude-0.5.0.tar.gz|nose-exclude-0.5.0
|
||||
nosehtmloutput-0.0.5-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/0c/f7/6cb16c0b233d3f2d62be38ddb7d7c1bc967188c41575ecf0312e6575730d/nosehtmloutput-0.0.5.tar.gz|nosehtmloutput-0.0.5
|
||||
openshift-0.8.6-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/73/ed/c92c0ba23b6c4c8e5542151a1b89cb8ff01f68a72fe68f6c95a28d885ebe/openshift-0.8.6.tar.gz|openshift-0.8.6
|
||||
openstack.nose_plugin-0.11-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/bc/83/e7c9b9297e1a501d2c2617f98d6176199570e8ee32f0e72669c8852c6c81/openstack.nose_plugin-0.11.tar.gz|openstack.nose_plugin-0.11
|
||||
opentracing-2.2.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/94/9f/289424136addf621fb4c75624ef9a3a80e8575da3993a87950c57e93217e/opentracing-2.2.0.tar.gz|opentracing-2.2.0
|
||||
ovs-2.11.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/81/06/387b2159ac073de95e484aa6e2f108a232cd906e350307168843061f899f/ovs-2.11.0.tar.gz|ovs-2.11.0
|
||||
panko-5.0.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0
|
||||
pathlib-1.0.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/ac/aa/9b065a76b9af472437a0059f77e8f962fe350438b927cb80184c32f075eb/pathlib-1.0.1.tar.gz|pathlib-1.0.1|fix_setup
|
||||
pecan-1.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3
|
||||
pifpaf-2.2.2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/33/dc/4f276c55d94cd73fc1f94e2d23f34b476fea38d240e3e17b837a5749bc9f/pifpaf-2.2.2-py2.py3-none-any.whl
|
||||
pika_pool-0.1.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/ec/48/50c8f02a3eef4cb824bec50661ec1713040402cc1b2a38954dc977a59c23/pika-pool-0.1.3.tar.gz|pika-pool-0.1.3
|
||||
Pint-0.9-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/15/9d/bf177ebbc57d25e9e296addc14a1303d1e34d7964af5df428a8332349c42/Pint-0.9-py2.py3-none-any.whl
|
||||
ply-3.11-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a3/58/35da89ee790598a0700ea49b2a66594140f44dec458c07e8e3d4979137fc/ply-3.11-py2.py3-none-any.whl
|
||||
positional-1.1.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/8c/16/64a4fa0967c486380468dca18867d22ac1c17bba06349e31ace77c7757f7/positional-1.1.2.tar.gz|positional-1.1.2
|
||||
prettytable-0.7.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/e0/a1/36203205f77ccf98f3c6cf17cf068c972e6458d7e58509ca66da949ca347/prettytable-0.7.2.tar.gz|prettytable-0.7.2
|
||||
proboscis-1.2.6.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/3c/c8/c187818ab8d0faecdc3c16c1e0b2e522f3b38570f0fb91dcae21662019d0/proboscis-1.2.6.0.tar.gz|proboscis-1.2.6.0
|
||||
psutil-5.6.3-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/1c/ca/5b8c1fe032a458c2c4bcbe509d1401dca9dda35c7fc46b36bb81c2834740/psutil-5.6.3.tar.gz|psutil-5.6.3
|
||||
psycopg2-2.8.3-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/5c/1c/6997288da181277a0c29bc39a5f9143ff20b8c99f2a7d059cfb55163e165/psycopg2-2.8.3.tar.gz|psycopg2-2.8.3
|
||||
PuLP-1.6.10-py2-none-any.whl|zip|https://files.pythonhosted.org/packages/2d/33/3ae6d9d2ac8c7068937af6372fd8828ac605e62a8b17106fe57110930d38/PuLP-1.6.10.zip|PuLP-1.6.10
|
||||
pycparser-2.19-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/68/9e/49196946aee219aead1290e00d1e7fdeab8567783e83e1b9ab5585e6206a/pycparser-2.19.tar.gz|pycparser-2.19
|
||||
pycrypto-2.6.1-cp27-cp27mu-linux_x86_64.whl|git|https://github.com/dlitz/pycrypto|pycrypto|v2.6.1|fix_setup
|
||||
pycryptodomex-3.9.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e4/90/a01cafbbad7466491e3a630bf1d734294a32ff1b10e7429e9a4e8478669e/pycryptodomex-3.9.0.tar.gz|pycryptodomex-3.9.0
|
||||
pydot-1.4.1-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/33/d1/b1479a770f66d962f545c2101630ce1d5592d90cb4f083d38862e93d16d2/pydot-1.4.1-py2.py3-none-any.whl
|
||||
pydotplus-2.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/60/bf/62567830b700d9f6930e9ab6831d6ba256f7b0b730acb37278b0ccdffacf/pydotplus-2.0.2.tar.gz|pydotplus-2.0.2
|
||||
pyeclib-1.6.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/aa/d6/ca6bba5e66fc7a9810a995b17a3675492da2bec405806d8ac3db18cfd93b/pyeclib-1.6.0.tar.gz|pyeclib-1.6.0|fix_setup
|
||||
pyinotify-0.9.6-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/e3/c0/fd5b18dde17c1249658521f69598f3252f11d9d7a980c5be8619970646e1/pyinotify-0.9.6.tar.gz|pyinotify-0.9.6
|
||||
pykerberos-1.2.1-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/9a/b8/1ec56b6fa8a2e2a81420bd3d90e70b59fc83f6b857fb2c2c37accddc8be3/pykerberos-1.2.1.tar.gz|pykerberos-1.2.1
|
||||
PyKMIP-0.9.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/24/b2/258332aea85163f49a187337e8c85ee4529eb499b84fe0a6fe2d1a9c8d25/PyKMIP-0.9.1.tar.gz|PyKMIP-0.9.1
|
||||
pylxd-2.2.10-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/49/9a/eba58646721ffbff40dc41571b13c9528fdc4e26a82252318c997cdbe26a/pylxd-2.2.10.tar.gz|pylxd-2.2.10
|
||||
pyngus-2.3.0-py2-none-any.whl|zip|https://files.pythonhosted.org/packages/58/b1/336b8f64e7e4efa9b95027af71e02cd4cfacca8f919345badb852381878a/pyngus-2.3.0.zip|pyngus-2.3.0
|
||||
pyperclip-1.7.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/2d/0f/4eda562dffd085945d57c2d9a5da745cfb5228c02bc90f2c74bbac746243/pyperclip-1.7.0.tar.gz|pyperclip-1.7.0
|
||||
pyroute2-0.5.6-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/f6/80/16a604075345f0c253537d55e5c5282a37c61a1fc8ee0fcc42d1fd2a0739/pyroute2-0.5.6.tar.gz|pyroute2-0.5.6|fix_setup
|
||||
pyrsistent-0.15.4-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/b9/66/b2638d96a2d128b168d0dba60fdc77b7800a9b4a5340cefcc5fc4eae6295/pyrsistent-0.15.4.tar.gz|pyrsistent-0.15.4
|
||||
pyScss-1.3.4-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/1d/4a/221ae7561c8f51c4f28b2b172366ccd0820b14bb947350df82428dfce381/pyScss-1.3.4.tar.gz|pyScss-1.3.4
|
||||
pysendfile-2.0.1-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/cd/3f/4aa268afd0252f06b3b487c296a066a01ddd4222a46b7a3748599c8fc8c3/pysendfile-2.0.1.tar.gz|pysendfile-2.0.1
|
||||
pystache-0.5.4-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/d6/fd/eb8c212053addd941cc90baac307c00ac246ac3fce7166b86434c6eae963/pystache-0.5.4.tar.gz|pystache-0.5.4
|
||||
python_cinderclient-4.3.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/f1/09/760c454c5bf67509d7f8479d583a3e84411f51ec2a1942aea3741a49b090/python_cinderclient-4.3.0-py2.py3-none-any.whl
|
||||
python_consul-1.1.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/3f/d0/59bc5f1c6c4d4b498c41d8ce7052ee9e9d68be19e16038a55252018a6c4d/python_consul-1.1.0-py2.py3-none-any.whl
|
||||
python_editor-1.0.4-py2-none-any.whl|pypi|https://files.pythonhosted.org/packages/55/a0/3c0ba1c10f2ca381645dd46cb7afbb73fddc8de9f957e1f9e726a846eabc/python_editor-1.0.4-py2-none-any.whl
|
||||
python_etcd-0.4.5-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a1/da/616a4d073642da5dd432e5289b7c1cb0963cc5dde23d1ecb8d726821ab41/python-etcd-0.4.5.tar.gz|python-etcd-0.4.5
|
||||
python_ldap-3.2.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/ea/93/596f875e003c770447f4b99267820a0c769dd2dc3ae3ed19afe460fcbad0/python-ldap-3.2.0.tar.gz|python-ldap-3.2.0
|
||||
python_memcached-1.59-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/f5/90/19d3908048f70c120ec66a39e61b92c253e834e6e895cd104ce5e46cbe53/python_memcached-1.59-py2.py3-none-any.whl
|
||||
python_nss-1.0.1-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/6b/29/629098e34951c358b1f04f13a70b3590eb0cf2df817d945bd05c4169d71b/python-nss-1.0.1.tar.bz2|python-nss-1.0.1|fix_setup
|
||||
python_pcre-0.7-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/9d/af/61435bd163f01fe3709fca9b1f79e4978d8089ee671d2e004fc85e10de29/python-pcre-0.7.tar.gz|python-pcre-0.7|fix_setup
|
||||
python_pytun-2.3.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/52/a4/a062106c739eac79c8160fcf5779ebc84afc1c38b016ab216ed1e6da69b6/python-pytun-2.3.0.tar.gz|python-pytun-2.3.0|fix_setup
|
||||
python_qpid_proton-0.28.0-cp27-cp27mu-linux_x86_64.whl|zip|https://files.pythonhosted.org/packages/96/35/2c86d844aec1acdfe7778966994aa270fcf03f076df393003bd4fc07dfa9/python-qpid-proton-0.28.0.zip|python-qpid-proton-0.28.0|fix_setup
|
||||
python_string_utils-0.6.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/5d/13/216f2d4a71307f5a4e5782f1f59e6e8e5d6d6c00eaadf9f92aeccfbb900c/python-string-utils-0.6.0.tar.gz|python-string-utils-0.6.0|fix_setup
|
||||
pyudev-0.21.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/bc/a2/31a07829acea8e70a28c247f43fa5d981229ae0f9edfeddedf52de00709b/pyudev-0.21.0.tar.gz|pyudev-0.21.0
|
||||
PyYAML-5.1.2-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e3/e8/b3212641ee2718d556df0f23f78de8303f068fe29cdaa7a91018849582fe/PyYAML-5.1.2.tar.gz|PyYAML-5.1.2
|
||||
pyzabbix-0.7.5-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/11/ad/24e19d0cf16d05b7ee19f337f02058ee9b760649171865469ccceef83027/pyzabbix-0.7.5.tar.gz|pyzabbix-0.7.5
|
||||
qpid_python-1.36.0.post1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/2a/33/026ac50a29a85d5d54dd7784a98d624f6142cb07ce185ed268ef9bd3b6dc/qpid-python-1.36.0-1.tar.gz|qpid-python-1.36.0-1|fix_setup
|
||||
rcssmin-1.0.6-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e2/5f/852be8aa80d1c24de9b030cdb6532bc7e7a1c8461554f6edbe14335ba890/rcssmin-1.0.6.tar.gz|rcssmin-1.0.6|fix_setup
|
||||
repoze.lru-0.7-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/12/bc/595a77c4b5e204847fdf19268314ef59c85193a9dc9f83630fc459c0fee5/repoze.lru-0.7.tar.gz|repoze.lru-0.7
|
||||
requests_aws-0.1.8-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/5e/2f/4da17752036c04cf4c9af7a2da0d41ef2205043f1c61008006475aa24b8b/requests-aws-0.1.8.tar.gz|requests-aws-0.1.8
|
||||
restructuredtext_lint-1.3.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/62/76/bd8760de759fb74d7863e6935200af101cb128a7de008741a4e22341d03c/restructuredtext_lint-1.3.0.tar.gz|restructuredtext_lint-1.3.0
|
||||
retrying-1.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/44/ef/beae4b4ef80902f22e3af073397f079c96969c69b2c7d52a57ea9ae61c9d/retrying-1.3.3.tar.gz|retrying-1.3.3
|
||||
rfc3986-1.4.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/78/be/7b8b99fd74ff5684225f50dd0e865393d2265656ef3b4ba9eaaaffe622b8/rfc3986-1.4.0-py2.py3-none-any.whl
|
||||
rjsmin-1.1.0-cp27-cp27mu-manylinux1_x86_64.whl|pypi|https://files.pythonhosted.org/packages/c3/8e/079b7cc3a0fc9934ab05d868a00183c7aafd90b5d6138313d98ac2b9f666/rjsmin-1.1.0-cp27-cp27mu-manylinux1_x86_64.whl
|
||||
rtslib_fb-2.1.69-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/fc/1a/77a26207bdad13cc39b93d874b3a1b04e5a0b0332fb716e4d654537bacdb/rtslib-fb-2.1.69.tar.gz|rtslib-fb-2.1.69
|
||||
scandir-1.10.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/df/f5/9c052db7bd54d0cbf1bc0bb6554362bba1012d03e5888950a4f5c5dadc4e/scandir-1.10.0.tar.gz|scandir-1.10.0
|
||||
scrypt-0.8.13-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/80/3d/141eb80e754b86f6c25a2ffaf6c3af3acdb65a3e3700829a05ab0c5d965d/scrypt-0.8.13.tar.gz|scrypt-0.8.13
|
||||
SecretStorage-2.3.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a5/a5/0830cfe34a4cfd0d1c3c8b614ede1edb2aaf999091ac8548dd19cb352e79/SecretStorage-2.3.1.tar.gz|SecretStorage-2.3.1
|
||||
setproctitle-1.1.10-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/5a/0d/dc0d2234aacba6cf1a729964383e3452c52096dc695581248b548786f2b3/setproctitle-1.1.10.tar.gz|setproctitle-1.1.10
|
||||
simplegeneric-0.8.1-py2-none-any.whl|zip|https://files.pythonhosted.org/packages/3d/57/4d9c9e3ae9a255cd4e1106bb57e24056d3d0709fc01b2e3e345898e49d5b/simplegeneric-0.8.1.zip|simplegeneric-0.8.1
|
||||
simplejson-3.16.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e3/24/c35fb1c1c315fc0fffe61ea00d3f88e85469004713dab488dee4f35b0aff/simplejson-3.16.0.tar.gz|simplejson-3.16.0
|
||||
skydive_client-0.5.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/e4/68/78a246619d9b16bb226562c155f18f798283f86db8f01a89c30b97ac7a27/skydive-client-0.5.0.tar.gz|skydive-client-0.5.0
|
||||
smmap2-2.0.5-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/55/d2/866d45e3a121ee15a1dc013824d58072fd5c7799c9c34d01378eb262ca8f/smmap2-2.0.5-py2.py3-none-any.whl
|
||||
sphinxcontrib_fulltoc-1.2.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/8e/a6/d1297db9b75650681e5429e92e13df139ee6b64303ff1b2eea4ebd32c0a9/sphinxcontrib-fulltoc-1.2.0.tar.gz|sphinxcontrib-fulltoc-1.2.0
|
||||
sphinxcontrib_pecanwsme-0.10.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/75/2b/105d07f47485ecf774cd80b881c29e148182b72a3a60596abdd016c87fce/sphinxcontrib-pecanwsme-0.10.0.tar.gz|sphinxcontrib-pecanwsme-0.10.0
|
||||
SQLAlchemy-1.3.8-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/fc/49/82d64d705ced344ba458197dadab30cfa745f9650ee22260ac2b275d288c/SQLAlchemy-1.3.8.tar.gz|SQLAlchemy-1.3.8
|
||||
SQLAlchemy_Utils-0.34.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/45/61/3bdd2931e86253fa7df6445a26929fbcc9bc43ad6b27a10f991eb6ecde75/SQLAlchemy-Utils-0.34.2.tar.gz|SQLAlchemy-Utils-0.34.2
|
||||
stomp.py-4.1.22-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/52/7e/22ca617f61e0d5904e06c1ebd5d453adf30099526c0b64dca8d74fff0cad/stomp.py-4.1.22.tar.gz|stomp.py-4.1.22
|
||||
subprocess32-3.5.4-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/32/c8/564be4d12629b912ea431f1a50eb8b3b9d00f1a0b1ceff17f266be190007/subprocess32-3.5.4.tar.gz|subprocess32-3.5.4
|
||||
suds_jurko-0.6-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/bd/6f/54fbf0999a606680d27c69b1ad12dfff62768ecb9fe48524cebda6eb4423/suds-jurko-0.6.tar.bz2|suds-jurko-0.6
|
||||
systemd_python-234-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e8/a8/00ba0f605837a8f69523e6c3a4fb14675a6430c163f836540129c50b3aef/systemd-python-234.tar.gz|systemd-python-234|fix_setup
|
||||
sysv_ipc-1.0.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/08/7d/a862f3045fa191eeece23650725273f2ccaf9ac6b95443dfe4cac6508638/sysv_ipc-1.0.0.tar.gz|sysv_ipc-1.0.0|fix_setup
|
||||
Tempita-0.5.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/56/c8/8ed6eee83dbddf7b0fc64dd5d4454bc05e6ccaafff47991f73f2894d9ff4/Tempita-0.5.2.tar.gz|Tempita-0.5.2
|
||||
termcolor-1.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz|termcolor-1.1.0|fix_setup
|
||||
testrepository-0.0.20-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/0c/85/f495b58b2b0ac907def07385219e9747b75840fa01280f228546a4a5ad7f/testrepository-0.0.20.tar.gz|testrepository-0.0.20
|
||||
thrift-0.11.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/c6/b4/510617906f8e0c5660e7d96fbc5585113f83ad547a3989b80297ac72a74c/thrift-0.11.0.tar.gz|thrift-0.11.0
|
||||
thriftpy-0.3.9-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/f4/19/cca118cf7d2087310dbc8bd70dc7df0c1320f2652873a93d06d7ba356d4a/thriftpy-0.3.9.tar.gz|thriftpy-0.3.9
|
||||
thriftpy2-0.4.8-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/2c/23/57b00b3d5d3d0ae66d79844a39d3c3b92dde3063c901036808602137d3ab/thriftpy2-0.4.8.tar.gz|thriftpy2-0.4.8
|
||||
tinyrpc-1.0.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/21/7a/ff1a74256e1bcc04fbaa414c13a2bb79a29ac9918b25f2238592b991e3bc/tinyrpc-1.0.3.tar.gz|tinyrpc-1.0.3
|
||||
tornado-4.5.3-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e3/7b/e29ab3d51c8df66922fea216e2bddfcb6430fb29620e5165b16a216e0d3c/tornado-4.5.3.tar.gz|tornado-4.5.3
|
||||
trollius-2.2.post1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/0b/31/356ae13ad4df58f963e9954d55118f6cffdb3a903c1547973ad7bc347fb9/trollius-2.2.post1.tar.gz|trollius-2.2.post1
|
||||
ujson-1.35-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/16/c4/79f3409bc710559015464e5f49b9879430d8f87498ecdc335899732e5377/ujson-1.35.tar.gz|ujson-1.35
|
||||
unicodecsv-0.14.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/6f/a4/691ab63b17505a26096608cc309960b5a6bdf39e4ba1a793d5f9b1a53270/unicodecsv-0.14.1.tar.gz|unicodecsv-0.14.1
|
||||
uWSGI-2.0.17.1-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/a2/c9/a2d5737f63cd9df4317a4acc15d1ddf4952e28398601d8d7d706c16381e0/uwsgi-2.0.17.1.tar.gz|uwsgi-2.0.17.1
|
||||
voluptuous-0.11.7-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/24/3b/fe531688c0d9e057fccc0bc9430c0a3d4b90e0d2f015326e659c2944e328/voluptuous-0.11.7.tar.gz|voluptuous-0.11.7
|
||||
warlock-1.3.3-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/c2/36/178b26a338cd6d30523246da4721b1114306f588deb813f3f503052825ee/warlock-1.3.3.tar.gz|warlock-1.3.3
|
||||
weakrefmethod-1.0.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/99/82/73a21e3eab9a1ff76d12375f7301fba5c6325b9598eed0ae5b0cf5243656/weakrefmethod-1.0.3.tar.gz|weakrefmethod-1.0.3
|
||||
websockify-0.9.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/c4/5b/16ec1e9f4fc536846d95a01a77d97da12f8042ca5cf83cdf3dd0442e881c/websockify-0.9.0.tar.gz|websockify-0.9.0
|
||||
whereto-0.4.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/80/83/371a699ce90257608592dadca400a7ecd9a2db6137d78f6f433c7c5e3197/whereto-0.4.0.tar.gz|whereto-0.4.0
|
||||
wrapt-1.11.2-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/23/84/323c2415280bc4fc880ac5050dddfb3c8062c2552b34c2e512eb4aa68f79/wrapt-1.11.2.tar.gz|wrapt-1.11.2|fix_setup
|
||||
ws4py-0.5.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/53/20/4019a739b2eefe9282d3822ef6a225250af964b117356971bd55e274193c/ws4py-0.5.1.tar.gz|ws4py-0.5.1
|
||||
WSME-0.9.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/d1/b6/8027248bfca3ce192bc54d46fcda4324c86c8beabe344cbb80fb57a6c868/WSME-0.9.3.tar.gz|WSME-0.9.3
|
||||
xattr-0.9.6-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/60/80/a1f35bfd3c7ffb78791b2a6a15c233584a102a20547fd96d48933ec453e7/xattr-0.9.6.tar.gz|xattr-0.9.6
|
||||
XStatic-1.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/36/78/c0ffaf14216517a14d3daa67ff24fbb60b4703e95ce1059a48fd508e6b8c/XStatic-1.0.2.tar.gz|XStatic-1.0.2
|
||||
XStatic_Angular_FileUpload-12.0.4.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/4d/fd/c3051915d2f12e8fa11f59c01162ce85e38eca15d9ec73a3d7b271b49744/XStatic-Angular-FileUpload-12.0.4.0.tar.gz|XStatic-Angular-FileUpload-12.0.4.0
|
||||
XStatic_Angular_lrdragndrop-1.0.2.4-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/80/ea/ffdde05892eabe468f22403f75299cf5d991f0af4f1400bebbf3af04bc9a/XStatic_Angular_lrdragndrop-1.0.2.4-py2.py3-none-any.whl
|
||||
XStatic_Angular_Schema_Form-0.8.13.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/57/71/ceea2c0a72e2ee2d316d6ab1c06b21faa9f5cbc4b36a4127d7847b7079c5/XStatic-Angular-Schema-Form-0.8.13.0.tar.gz|XStatic-Angular-Schema-Form-0.8.13.0
|
||||
XStatic_Bootstrap_Datepicker-1.3.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/91/4f/832f14478e714815bb3d44d01dfe8dbe19ccf9f823e0bc7ac1a8cf7fa6b3/XStatic-Bootstrap-Datepicker-1.3.1.0.tar.gz|XStatic-Bootstrap-Datepicker-1.3.1.0
|
||||
XStatic_Hogan-2.0.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/21/fe/37d5c8247f24738e7e368d27ebf945de1ea29fbc3112ac5e75b1b7f1d0c9/XStatic-Hogan-2.0.0.2.tar.gz|XStatic-Hogan-2.0.0.2
|
||||
XStatic_Jasmine-2.4.1.2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/05/43/ceac7def3b6eaf82b6f593e3db2b03a9693a7b002b569e664e382aecddbc/XStatic_Jasmine-2.4.1.2-py2.py3-none-any.whl
|
||||
XStatic_jQuery-1.12.4.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/67/f1/c18c14fc4aab386e4aba587c5d10c268de222c75bf5e271b6f68a2ea6e77/XStatic-jQuery-1.12.4.1.tar.gz|XStatic-jQuery-1.12.4.1
|
||||
XStatic_JQuery_Migrate-1.2.1.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/7c/fc/edbfcb4574ec3cf0b68a0613dd1904c9139e3bf6dede792d2e7edcf13023/XStatic-JQuery-Migrate-1.2.1.1.tar.gz|XStatic-JQuery-Migrate-1.2.1.1
|
||||
XStatic_JQuery.quicksearch-2.0.3.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/ea/ab/f934d06a78ce2c6bb594e9a426f6966b3192c4c279467c9898be6fd284d3/XStatic-JQuery.quicksearch-2.0.3.1.tar.gz|XStatic-JQuery.quicksearch-2.0.3.1
|
||||
XStatic_JQuery.TableSorter-2.14.5.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/c1/6c/d6b0807906af90536e793a3b23cca557869fa5a27156639f0029de8b1f1f/XStatic-JQuery.TableSorter-2.14.5.1.tar.gz|XStatic-JQuery.TableSorter-2.14.5.1
|
||||
XStatic_jquery_ui-1.12.1.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/e6/5a/883b22dad1d3e01708312d71c5bc63d543d66cef9b448c1cf85379d64fb3/XStatic-jquery-ui-1.12.1.1.tar.gz|XStatic-jquery-ui-1.12.1.1
|
||||
XStatic_mdi-1.6.50.2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/73/49/13b9f7ce9fbcc7fabe086b7ac1b056118cbd4c9abf185e01cc4a54631136/XStatic_mdi-1.6.50.2-py2.py3-none-any.whl
|
||||
XStatic_objectpath-1.2.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/23/6c/56de25d9d3be430e7de2fcf4baac10279dad78d7b16cbda339cf014c2fe5/XStatic-objectpath-1.2.1.0.tar.gz|XStatic-objectpath-1.2.1.0
|
||||
XStatic_Rickshaw-1.5.0.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/45/c6/39aa4d02ea96b04ff372d1e3558587155790b1c5444855a97b89c255be38/XStatic-Rickshaw-1.5.0.0.tar.gz|XStatic-Rickshaw-1.5.0.0
|
||||
XStatic_Spin-1.2.5.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/af/21/cca7f0b7abfe008cdd03dd4c4255aad3087f4a892a010c0f6f1452d7344b/XStatic-Spin-1.2.5.2.tar.gz|XStatic-Spin-1.2.5.2
|
||||
XStatic_term.js-0.0.7.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/63/7a/7bfec29f5f28fdda7170ebbbb2204aeb1d33d6050f3476a807590de06434/XStatic-term.js-0.0.7.0.tar.gz|XStatic-term.js-0.0.7.0
|
||||
XStatic_tv4-1.2.7.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/2b/26/b07115af27b339c861b8c9a775a621524b421c898e26e015880dfb888c49/XStatic-tv4-1.2.7.0.tar.gz|XStatic-tv4-1.2.7.0
|
||||
xvfbwrapper-0.2.9-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/57/b6/4920eabda9b49630dea58745e79f9919aba6408d460afe758bf6e9b21a04/xvfbwrapper-0.2.9.tar.gz|xvfbwrapper-0.2.9
|
||||
yappi-1.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/d2/92/7cd637a19fa2a10c0e55a44f8b36bcb83f0e1943ba8f1fb5edb15c819f2e/yappi-1.0.tar.gz|yappi-1.0
|
||||
zerorpc-0.6.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/73/ff/d61ef9f5d10e671421d1368e87d3525325483ebd7da262b1d3087443662b/zerorpc-0.6.3.tar.gz|zerorpc-0.6.3
|
||||
zVMCloudConnector-1.4.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/11/92/9f704de9759816e7b9897b9fb41285b421498b4642551b6fbcccd2850008/zVMCloudConnector-1.4.1.tar.gz|zVMCloudConnector-1.4.1
|
@ -1,183 +0,0 @@
|
||||
#
|
||||
# git: wheelname|git|git-source|basedir|branch
|
||||
# tar: wheelname|tar|wget-source|basedir
|
||||
# pypi: wheelname|pypi|wget-source
|
||||
# zip: wheelname|zip|wget-source|basedir
|
||||
#
|
||||
# If fix_setup must be called, add |fix_setup at the end of the line
|
||||
#
|
||||
# See doc/wheels-cfg.md for more info.
|
||||
#
|
||||
abclient-0.2.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/49/eb/091b02c1e36d68927adfb746706e2c80f7e7bfb3f16e3cbcfec2632118ab/abclient-0.2.3.tar.gz|abclient-0.2.3
|
||||
alembic-1.4.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/60/1e/cabc75a189de0fbb2841d0975243e59bde8b7822bacbb95008ac6fe9ad47/alembic-1.4.2.tar.gz|alembic-1.4.2
|
||||
amqplib-1.0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2
|
||||
anyjson-0.3.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/c3/4d/d4089e1a3dd25b46bebdb55a992b0797cff657b4477bc32ce28038fdecbc/anyjson-0.3.3.tar.gz|anyjson-0.3.3
|
||||
backports.ssl_match_hostname-3.7.0.1-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ff/2b/8265224812912bc5b7a607c44bf7b027554e1b9775e9ee0de8032e3de4b2/backports.ssl_match_hostname-3.7.0.1.tar.gz|backports.ssl_match_hostname-3.7.0.1|fix_setup
|
||||
bottle-0.12.18-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/e9/39/2bf3a1fd963e749cdbe5036a184eda8c37d8af25d1297d94b8b7aeec17c4/bottle-0.12.18-py3-none-any.whl
|
||||
cassandra_driver-3.23.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/90/d7/d68083117bf50941870a795150f3261c5270e74c2d57ca3af0bd8423ed74/cassandra-driver-3.23.0.tar.gz|cassandra-driver-3.23.0
|
||||
cmd2-0.8.9-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/e9/40/a71caa2aaff10c73612a7106e2d35f693e85b8cf6e37ab0774274bca3cf9/cmd2-0.8.9-py2.py3-none-any.whl
|
||||
construct-2.8.22-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e5/c6/3e3aeef38bb0c27364af3d21493d9690c7c3925f298559bca3c48b7c9419/construct-2.8.22.tar.gz|construct-2.8.22
|
||||
crc16-0.1.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/a6/e0/70a44c4385f2b33df82e518005aae16b5c1feaf082c73c0acebe3426fc0a/crc16-0.1.1.tar.gz|crc16-0.1.1|fix_setup
|
||||
demjson-2.2.4-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/96/67/6db789e2533158963d4af689f961b644ddd9200615b8ce92d6cad695c65a/demjson-2.2.4.tar.gz|demjson-2.2.4|fix_setup
|
||||
django_debreach-2.0.1-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/2a/92/8c363cf5d1ee33d4c3b999b41c127c5cd3c64d4c20aa47bdfb6c386c9309/django_debreach-2.0.1-py3-none-any.whl
|
||||
django_floppyforms-1.8.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a9/d2/498b883ac309b56b70c26877974bd50927615dd3f6433f5463e2668b1128/django_floppyforms-1.8.0-py2.py3-none-any.whl
|
||||
django_pyscss-2.0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/4b/7f/d771802305184aac6010826f60a0b2ecaa3f57d19ab0e405f0c8db07e809/django-pyscss-2.0.2.tar.gz|django-pyscss-2.0.2
|
||||
docopt-0.6.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a2/55/8f8cab2afd404cf578136ef2cc5dfb50baa1761b68c9da1fb1e4eed343c9/docopt-0.6.2.tar.gz|docopt-0.6.2
|
||||
dogpile.cache-0.9.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ac/6a/9ac405686a94b7f009a20a50070a5786b0e1aedc707b88d40d0c4b51a82e/dogpile.cache-0.9.0.tar.gz|dogpile.cache-0.9.0
|
||||
enum_compat-0.0.3-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/55/ae/467bc4509246283bb59746e21a1a2f5a8aecbef56b1fa6eaca78cd438c8b/enum_compat-0.0.3-py3-none-any.whl
|
||||
etcd3-0.10.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/09/f1/93603a26daf7a993a0acbbcfd32afce8b2fdf30a765d5651571ab635969b/etcd3-0.10.0.tar.gz|etcd3-0.10.0
|
||||
exabgp-4.2.6-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/b6/36/7270c8e4b5b0ddba79301f5bbf206ce4b76247957169162b428e2695efa9/exabgp-4.2.6.tar.gz|exabgp-4.2.6
|
||||
flask_keystone-0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/1f/ca/3938de8c5f4a3d1c5dd4278bedb9d31d79816feba4d088293c620a366fb1/flask_keystone-0.2.tar.gz|flask_keystone-0.2
|
||||
flask_oslolog-0.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a7/62/fec02ce761b548b1289680bb1be1aa0bce2b2c4017d5b31bd6c67c78aef9/flask_oslolog-0.1.tar.gz|flask_oslolog-0.1
|
||||
fortiosclient-0.0.3-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e9/aa/b2c0705d5e52c8d9af35422d940800b49c562758fbdad3179a6fbf6e92f5/fortiosclient-0.0.3.tar.gz|fortiosclient-0.0.3
|
||||
frozendict-1.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/4e/55/a12ded2c426a4d2bee73f88304c9c08ebbdbadb82569ebdd6a0c007cfd08/frozendict-1.2.tar.gz|frozendict-1.2
|
||||
future-0.18.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/45/0b/38b06fd9b92dc2b68d58b75f900e97884c45bedd2ff83203d933cf5851c9/future-0.18.2.tar.gz|future-0.18.2
|
||||
googleapis_common_protos-1.51.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/05/46/168fd780f594a4d61122f7f3dc0561686084319ad73b4febbf02ae8b32cf/googleapis-common-protos-1.51.0.tar.gz|googleapis-common-protos-1.51.0
|
||||
happybase-1.2.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/d1/9c/f5f7bdb5439cda2b7da4e20ac24ec0e2455fd68aade8397f211d2994c39d/happybase-1.2.0.tar.gz|happybase-1.2.0
|
||||
hiredis-1.0.1-cp36-cp36m-manylinux1_x86_64.whl|pypi|https://files.pythonhosted.org/packages/5b/a3/23bc840f0e2baa4aedb41d90b3196fed3ae88ee43ec60059a0c8f31be4b8/hiredis-1.0.1-cp36-cp36m-manylinux1_x86_64.whl
|
||||
httplib2-0.17.2-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/dd/a6/e3d8ae2c5b3a89de9a6b5e1e9396ce41432e08feafe25c37c4dc6b49d79d/httplib2-0.17.2-py3-none-any.whl
|
||||
ifaddr-0.1.6-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/9f/54/d92bda685093ebc70e2057abfa83ef1b3fb0ae2b6357262a3e19dfe96bb8/ifaddr-0.1.6.tar.gz|ifaddr-0.1.6
|
||||
itsdangerous-1.1.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/76/ae/44b03b253d6fade317f32c24d100b3b35c2239807046a4c953c7b89fa49e/itsdangerous-1.1.0-py2.py3-none-any.whl
|
||||
jaeger_client-4.3.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/2b/75/17a937a61135671cebc175ab5c299dc0f7477042469482fd9a6f91262c68/jaeger-client-4.3.0.tar.gz|jaeger-client-4.3.0
|
||||
jsonpath_rw-1.4.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/71/7c/45001b1f19af8c4478489fbae4fc657b21c4c669d7a5a036a86882581d85/jsonpath-rw-1.4.0.tar.gz|jsonpath-rw-1.4.0
|
||||
krest-1.3.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/fb/d2/9dbbd3a76f2385041720a0eb51ddab676e688fa8bee8a1489470839616cf/krest-1.3.1.tar.gz|krest-1.3.1
|
||||
libvirt_python-4.7.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/ad/d7/251c52f937f1e6c6304c4a2ca088a0cfb9ae139c9be5c476e8351d976b4a/libvirt-python-4.7.0.tar.gz|libvirt-python-4.7.0|fix_setup
|
||||
logutils-0.3.5-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/49/b2/b57450889bf73da26027f8b995fd5fbfab258ec24ef967e4c1892f7cb121/logutils-0.3.5.tar.gz|logutils-0.3.5|fix_setup
|
||||
lz4-0.9.0-cp36-cp36m-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0
|
||||
Mako-1.1.2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/50/78/f6ade1e18aebda570eed33b7c534378d9659351cadce2fcbc7b31be5f615/Mako-1.1.2-py2.py3-none-any.whl
|
||||
marathon-0.12.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/41/66/814432693297dfb076958ae5ac781e3a88fd70d335473a57f4f2c6329515/marathon-0.12.0-py2.py3-none-any.whl
|
||||
MarkupSafe-1.1.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/b9/2e/64db92e53b86efccfaea71321f597fa2e1b2bd3853d8ce658568f7a13094/MarkupSafe-1.1.1.tar.gz|MarkupSafe-1.1.1
|
||||
migrate-0.3.8-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ce/31/1a4cbf8dc0536c55f41072e8ea37b3df1e412262dc731c57e5bb099eb9b2/migrate-0.3.8.tar.gz|migrate-0.3.8
|
||||
mox-0.5.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/0c/a1/64740c638cc5fae807022368f4141700518ee343b53eb3e90bf3cc15a4d4/mox-0.5.3.tar.gz|mox-0.5.3|fix_setup
|
||||
mpmath-1.1.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ca/63/3384ebb3b51af9610086b23ea976e6d27d6d97bf140a76a365bd77a3eb32/mpmath-1.1.0.tar.gz|mpmath-1.1.0|fix_setup
|
||||
msgpack_python-0.4.8-cp36-cp36m-linux_x86_64.whl|git|https://github.com/msgpack/msgpack-python.git|msgpack-python|0.4.8
|
||||
munch-2.5.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/cc/ab/85d8da5c9a45e072301beb37ad7f833cd344e04c817d97e0cc75681d248f/munch-2.5.0-py2.py3-none-any.whl
|
||||
ndg_httpsclient-0.5.1-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/fb/67/c2f508c00ed2a6911541494504b7cac16fe0b0473912568df65fd1801132/ndg_httpsclient-0.5.1-py3-none-any.whl
|
||||
netifaces-0.10.9-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/0d/18/fd6e9c71a35b67a73160ec80a49da63d1eed2d2055054cc2995714949132/netifaces-0.10.9.tar.gz|netifaces-0.10.9
|
||||
networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl
|
||||
networkx-2.2-py2.py3-none-any.whl|zip|https://files.pythonhosted.org/packages/f3/f4/7e20ef40b118478191cec0b58c3192f822cace858c19505c7670961b76b2/networkx-2.2.zip|networkx-2.2
|
||||
neutron_lib-2.3.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/83/52/805c061a96efca3c70c91d93fa8f7f555a7f86ba955ab9e4d1b41399459f/neutron_lib-2.3.0-py3-none-any.whl
|
||||
nodeenv-1.3.5-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/08/43/86ff33286c83f7b5e8903c32db01fe122c5e8a9d8dc1067dcaa9be54a033/nodeenv-1.3.5-py2.py3-none-any.whl
|
||||
nose_exclude-0.5.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/63/cf/90c4be56bf11b7bc8801086d9445baf731aa36b8e8fc5791731e8e604dcd/nose-exclude-0.5.0.tar.gz|nose-exclude-0.5.0
|
||||
nosehtmloutput-0.0.7-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/e0/5d/2bb521a8ccb0222bd94ed557645955d95ba6798df6b3b4bdc2c31dec4f7c/nosehtmloutput-0.0.7-py2.py3-none-any.whl
|
||||
openshift-0.8.6-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/73/ed/c92c0ba23b6c4c8e5542151a1b89cb8ff01f68a72fe68f6c95a28d885ebe/openshift-0.8.6.tar.gz|openshift-0.8.6
|
||||
openstack.nose_plugin-0.11-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/bc/83/e7c9b9297e1a501d2c2617f98d6176199570e8ee32f0e72669c8852c6c81/openstack.nose_plugin-0.11.tar.gz|openstack.nose_plugin-0.11
|
||||
opentracing-2.3.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e4/a8/df5285f42cd07782409d0ae835785fae6e2a0f7e8b0036ea302f1422fd25/opentracing-2.3.0.tar.gz|opentracing-2.3.0
|
||||
ovs-2.11.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/81/06/387b2159ac073de95e484aa6e2f108a232cd906e350307168843061f899f/ovs-2.11.0.tar.gz|ovs-2.11.0
|
||||
panko-5.0.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0
|
||||
pathlib-1.0.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ac/aa/9b065a76b9af472437a0059f77e8f962fe350438b927cb80184c32f075eb/pathlib-1.0.1.tar.gz|pathlib-1.0.1|fix_setup
|
||||
pecan-1.3.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3
|
||||
pifpaf-2.4.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/66/12/ed1533c0b31647ea9fb879b5ad239336ad98628227d0b90d3c7157ffb3fb/pifpaf-2.4.0-py2.py3-none-any.whl
|
||||
pika_pool-0.1.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ec/48/50c8f02a3eef4cb824bec50661ec1713040402cc1b2a38954dc977a59c23/pika-pool-0.1.3.tar.gz|pika-pool-0.1.3
|
||||
Pint-0.9-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/15/9d/bf177ebbc57d25e9e296addc14a1303d1e34d7964af5df428a8332349c42/Pint-0.9-py2.py3-none-any.whl
|
||||
ply-3.11-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a3/58/35da89ee790598a0700ea49b2a66594140f44dec458c07e8e3d4979137fc/ply-3.11-py2.py3-none-any.whl
|
||||
positional-1.1.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/8c/16/64a4fa0967c486380468dca18867d22ac1c17bba06349e31ace77c7757f7/positional-1.1.2.tar.gz|positional-1.1.2
|
||||
prettytable-0.7.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e0/a1/36203205f77ccf98f3c6cf17cf068c972e6458d7e58509ca66da949ca347/prettytable-0.7.2.tar.gz|prettytable-0.7.2
|
||||
proboscis-1.2.6.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/3c/c8/c187818ab8d0faecdc3c16c1e0b2e522f3b38570f0fb91dcae21662019d0/proboscis-1.2.6.0.tar.gz|proboscis-1.2.6.0
|
||||
psutil-5.7.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/c4/b8/3512f0e93e0db23a71d82485ba256071ebef99b227351f0f5540f744af41/psutil-5.7.0.tar.gz|psutil-5.7.0
|
||||
psycopg2-2.8.5-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/a8/8f/1c5690eebf148d1d1554fc00ccf9101e134636553dbb75bdfef4f85d7647/psycopg2-2.8.5.tar.gz|psycopg2-2.8.5
|
||||
PuLP-2.1-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/41/34/757c88c320f80ce602199603afe63aed1e0bc11180b9a9fb6018fb2ce7ef/PuLP-2.1-py3-none-any.whl
|
||||
pycparser-2.20-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/ae/e7/d9c3a176ca4b02024debf82342dab36efadfc5776f9c8db077e8f6e71821/pycparser-2.20-py2.py3-none-any.whl
|
||||
pycrypto-2.6.1-cp36-cp36m-linux_x86_64.whl|git|https://github.com/dlitz/pycrypto|pycrypto|v2.6.1|fix_setup
|
||||
pycryptodomex-3.9.7-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/7f/3c/80cfaec41c3a9d0f524fe29bca9ab22d02ac84b5bfd6e22ade97d405bdba/pycryptodomex-3.9.7.tar.gz|pycryptodomex-3.9.7
|
||||
pydot-1.4.1-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/33/d1/b1479a770f66d962f545c2101630ce1d5592d90cb4f083d38862e93d16d2/pydot-1.4.1-py2.py3-none-any.whl
|
||||
pydotplus-2.0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/60/bf/62567830b700d9f6930e9ab6831d6ba256f7b0b730acb37278b0ccdffacf/pydotplus-2.0.2.tar.gz|pydotplus-2.0.2
|
||||
pyeclib-1.6.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/aa/d6/ca6bba5e66fc7a9810a995b17a3675492da2bec405806d8ac3db18cfd93b/pyeclib-1.6.0.tar.gz|pyeclib-1.6.0|fix_setup
|
||||
pyinotify-0.9.6-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e3/c0/fd5b18dde17c1249658521f69598f3252f11d9d7a980c5be8619970646e1/pyinotify-0.9.6.tar.gz|pyinotify-0.9.6
|
||||
pykerberos-1.2.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/9a/b8/1ec56b6fa8a2e2a81420bd3d90e70b59fc83f6b857fb2c2c37accddc8be3/pykerberos-1.2.1.tar.gz|pykerberos-1.2.1
|
||||
PyKMIP-0.10.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/f8/3e/e343bb9c2feb2a793affd052cb0da62326a021457a07d59251f771b523e7/PyKMIP-0.10.0.tar.gz|PyKMIP-0.10.0
|
||||
pylxd-2.2.10-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/49/9a/eba58646721ffbff40dc41571b13c9528fdc4e26a82252318c997cdbe26a/pylxd-2.2.10.tar.gz|pylxd-2.2.10
|
||||
pyngus-2.3.0-py3-none-any.whl|zip|https://files.pythonhosted.org/packages/58/b1/336b8f64e7e4efa9b95027af71e02cd4cfacca8f919345badb852381878a/pyngus-2.3.0.zip|pyngus-2.3.0
|
||||
pyperclip-1.8.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/f6/5b/55866e1cde0f86f5eec59dab5de8a66628cb0d53da74b8dbc15ad8dabda3/pyperclip-1.8.0.tar.gz|pyperclip-1.8.0
|
||||
pyroute2-0.5.11-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/00/5c/600b3fa746da0c857e1775b9cf0861eb8aaaec67c42352bb82f90c77e6fc/pyroute2-0.5.11.tar.gz|pyroute2-0.5.11
|
||||
pyrsistent-0.16.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/9f/0d/cbca4d0bbc5671822a59f270e4ce3f2195f8a899c97d0d5abb81b191efb5/pyrsistent-0.16.0.tar.gz|pyrsistent-0.16.0
|
||||
pyScss-1.3.7-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e6/0d/6b52a5211121b870cc0c4c908b689fd460630b01a9e501a534db78e67bad/pyScss-1.3.7.tar.gz|pyScss-1.3.7
|
||||
pysendfile-2.0.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/cd/3f/4aa268afd0252f06b3b487c296a066a01ddd4222a46b7a3748599c8fc8c3/pysendfile-2.0.1.tar.gz|pysendfile-2.0.1
|
||||
pystache-0.5.4-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/d6/fd/eb8c212053addd941cc90baac307c00ac246ac3fce7166b86434c6eae963/pystache-0.5.4.tar.gz|pystache-0.5.4
|
||||
python_barbicanclient-4.10.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/93/bf/b254f88d3c1a50212609d44ff8798e64f11df28011ead93161a2390cd4a2/python_barbicanclient-4.10.0-py3-none-any.whl
|
||||
python_cinderclient-7.0.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/64/8f/c675ad3f12d52739948b299607285a56d0a1e7d1bcc72ceed1f625a38fff/python_cinderclient-7.0.0-py3-none-any.whl
|
||||
python_consul-1.1.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/3f/d0/59bc5f1c6c4d4b498c41d8ce7052ee9e9d68be19e16038a55252018a6c4d/python_consul-1.1.0-py2.py3-none-any.whl
|
||||
python_editor-1.0.4-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/c6/d3/201fc3abe391bbae6606e6f1d598c15d367033332bd54352b12f35513717/python_editor-1.0.4-py3-none-any.whl
|
||||
python_etcd-0.4.5-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a1/da/616a4d073642da5dd432e5289b7c1cb0963cc5dde23d1ecb8d726821ab41/python-etcd-0.4.5.tar.gz|python-etcd-0.4.5
|
||||
python_json_logger-0.1.11-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/80/9d/1c3393a6067716e04e6fcef95104c8426d262b4adaf18d7aa2470eab028d/python-json-logger-0.1.11.tar.gz|python-json-logger-0.1.11
|
||||
python_ldap-3.2.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/ea/93/596f875e003c770447f4b99267820a0c769dd2dc3ae3ed19afe460fcbad0/python-ldap-3.2.0.tar.gz|python-ldap-3.2.0
|
||||
python_memcached-1.59-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/f5/90/19d3908048f70c120ec66a39e61b92c253e834e6e895cd104ce5e46cbe53/python_memcached-1.59-py2.py3-none-any.whl
|
||||
python_neutronclient-7.1.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/e2/b9/2680f60f679e3d5099274e966a68d0c45e2387aa53c8754c7f120838aeb4/python_neutronclient-7.1.0-py3-none-any.whl
|
||||
python_nss-1.0.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/6b/29/629098e34951c358b1f04f13a70b3590eb0cf2df817d945bd05c4169d71b/python-nss-1.0.1.tar.bz2|python-nss-1.0.1|fix_setup
|
||||
python_pcre-0.7-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/9d/af/61435bd163f01fe3709fca9b1f79e4978d8089ee671d2e004fc85e10de29/python-pcre-0.7.tar.gz|python-pcre-0.7|fix_setup
|
||||
python_pytun-2.3.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/52/a4/a062106c739eac79c8160fcf5779ebc84afc1c38b016ab216ed1e6da69b6/python-pytun-2.3.0.tar.gz|python-pytun-2.3.0|fix_setup
|
||||
python_string_utils-0.6.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/5d/13/216f2d4a71307f5a4e5782f1f59e6e8e5d6d6c00eaadf9f92aeccfbb900c/python-string-utils-0.6.0.tar.gz|python-string-utils-0.6.0|fix_setup
|
||||
pyudev-0.22.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/72/c8/4660d815a79b1d42c409012aaa10ebd6b07a47529b4cb6880f27a24bd646/pyudev-0.22.0.tar.gz|pyudev-0.22.0
|
||||
PyYAML-5.3.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/64/c2/b80047c7ac2478f9501676c988a5411ed5572f35d1beff9cae07d321512c/PyYAML-5.3.1.tar.gz|PyYAML-5.3.1
|
||||
pyzabbix-0.7.5-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/11/ad/24e19d0cf16d05b7ee19f337f02058ee9b760649171865469ccceef83027/pyzabbix-0.7.5.tar.gz|pyzabbix-0.7.5
|
||||
rcssmin-1.0.6-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e2/5f/852be8aa80d1c24de9b030cdb6532bc7e7a1c8461554f6edbe14335ba890/rcssmin-1.0.6.tar.gz|rcssmin-1.0.6|fix_setup
|
||||
repoze.lru-0.7-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/12/bc/595a77c4b5e204847fdf19268314ef59c85193a9dc9f83630fc459c0fee5/repoze.lru-0.7.tar.gz|repoze.lru-0.7
|
||||
requests_aws-0.1.8-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/5e/2f/4da17752036c04cf4c9af7a2da0d41ef2205043f1c61008006475aa24b8b/requests-aws-0.1.8.tar.gz|requests-aws-0.1.8
|
||||
restructuredtext_lint-1.3.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/62/76/bd8760de759fb74d7863e6935200af101cb128a7de008741a4e22341d03c/restructuredtext_lint-1.3.0.tar.gz|restructuredtext_lint-1.3.0
|
||||
retrying-1.3.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/44/ef/beae4b4ef80902f22e3af073397f079c96969c69b2c7d52a57ea9ae61c9d/retrying-1.3.3.tar.gz|retrying-1.3.3
|
||||
rfc3986-1.4.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/78/be/7b8b99fd74ff5684225f50dd0e865393d2265656ef3b4ba9eaaaffe622b8/rfc3986-1.4.0-py2.py3-none-any.whl
|
||||
rjsmin-1.1.0-cp36-cp36m-manylinux1_x86_64.whl|pypi|https://files.pythonhosted.org/packages/62/ee/574b170bbe7a059314e7239305cb829379232a408901585019e012e71170/rjsmin-1.1.0-cp36-cp36m-manylinux1_x86_64.whl
|
||||
rtslib_fb-2.1.71-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/9e/1b/c26bc038888b1e6042d35ec97599cef05181fb6a7a7ecdbb0c041c3f50ea/rtslib-fb-2.1.71.tar.gz|rtslib-fb-2.1.71|
|
||||
scandir-1.10.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/df/f5/9c052db7bd54d0cbf1bc0bb6554362bba1012d03e5888950a4f5c5dadc4e/scandir-1.10.0.tar.gz|scandir-1.10.0
|
||||
scrypt-0.8.13-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/80/3d/141eb80e754b86f6c25a2ffaf6c3af3acdb65a3e3700829a05ab0c5d965d/scrypt-0.8.13.tar.gz|scrypt-0.8.13
|
||||
SecretStorage-2.3.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a5/a5/0830cfe34a4cfd0d1c3c8b614ede1edb2aaf999091ac8548dd19cb352e79/SecretStorage-2.3.1.tar.gz|SecretStorage-2.3.1
|
||||
setproctitle-1.1.10-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/5a/0d/dc0d2234aacba6cf1a729964383e3452c52096dc695581248b548786f2b3/setproctitle-1.1.10.tar.gz|setproctitle-1.1.10
|
||||
simplegeneric-0.8.1-py3-none-any.whl|zip|https://files.pythonhosted.org/packages/3d/57/4d9c9e3ae9a255cd4e1106bb57e24056d3d0709fc01b2e3e345898e49d5b/simplegeneric-0.8.1.zip|simplegeneric-0.8.1
|
||||
simplejson-3.17.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/98/87/a7b98aa9256c8843f92878966dc3d8d914c14aad97e2c5ce4798d5743e07/simplejson-3.17.0.tar.gz|simplejson-3.17.0
|
||||
skydive_client-0.7.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/98/86/62925511c6282add4e339639fc5a9e22fd0dc95783b7627fd56bf45a32bf/skydive_client-0.7.0-py3-none-any.whl
|
||||
smmap-3.0.4-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/b0/9a/4d409a6234eb940e6a78dfdfc66156e7522262f5f2fecca07dc55915952d/smmap-3.0.4-py2.py3-none-any.whl
|
||||
sphinxcontrib_fulltoc-1.2.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/8e/a6/d1297db9b75650681e5429e92e13df139ee6b64303ff1b2eea4ebd32c0a9/sphinxcontrib-fulltoc-1.2.0.tar.gz|sphinxcontrib-fulltoc-1.2.0
|
||||
sphinxcontrib_pecanwsme-0.10.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/75/2b/105d07f47485ecf774cd80b881c29e148182b72a3a60596abdd016c87fce/sphinxcontrib-pecanwsme-0.10.0.tar.gz|sphinxcontrib-pecanwsme-0.10.0
|
||||
SQLAlchemy-1.3.16-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/7f/4b/adfb1f03da7f50db054a5b728d32dbfae8937754cfa159efa0216a3758d1/SQLAlchemy-1.3.16.tar.gz|SQLAlchemy-1.3.16
|
||||
SQLAlchemy_Utils-0.36.3-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/aa/24/68937e9b5c757f62795467e2f02a8f463a3a1fd3d08bd32a6b0583ba3dbf/SQLAlchemy-Utils-0.36.3.tar.gz|SQLAlchemy-Utils-0.36.3
|
||||
stomp.py-6.0.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/d7/a0/a67e46ec1e63f2e78497e7331092eeb2ce4b69738d80a8210122e7a000a9/stomp.py-6.0.0-py3-none-any.whl
|
||||
subprocess32-3.5.4-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/32/c8/564be4d12629b912ea431f1a50eb8b3b9d00f1a0b1ceff17f266be190007/subprocess32-3.5.4.tar.gz|subprocess32-3.5.4
|
||||
suds_jurko-0.6-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/bd/6f/54fbf0999a606680d27c69b1ad12dfff62768ecb9fe48524cebda6eb4423/suds-jurko-0.6.tar.bz2|suds-jurko-0.6
|
||||
systemd_python-234-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e8/a8/00ba0f605837a8f69523e6c3a4fb14675a6430c163f836540129c50b3aef/systemd-python-234.tar.gz|systemd-python-234|fix_setup
|
||||
sysv_ipc-1.0.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/57/8a/9bbb064566320cd66c6e32c35db76d43932d7b94348f0c4c1e74d03ec261/sysv_ipc-1.0.1.tar.gz|sysv_ipc-1.0.1|fix_setup
|
||||
tabulate-0.8.7-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/c4/f4/770ae9385990f5a19a91431163d262182d3203662ea2b5739d0fcfc080f1/tabulate-0.8.7-py3-none-any.whl
|
||||
tempest-24.0.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/f0/eb/d3fb2cdb72c20caa7a4e0af2c60176ce82e120e99ce7e5a62a386faae89c/tempest-24.0.0-py3-none-any.whl
|
||||
Tempita-0.5.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/56/c8/8ed6eee83dbddf7b0fc64dd5d4454bc05e6ccaafff47991f73f2894d9ff4/Tempita-0.5.2.tar.gz|Tempita-0.5.2
|
||||
termcolor-1.1.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz|termcolor-1.1.0|fix_setup
|
||||
testrepository-0.0.20-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/0c/85/f495b58b2b0ac907def07385219e9747b75840fa01280f228546a4a5ad7f/testrepository-0.0.20.tar.gz|testrepository-0.0.20
|
||||
thrift-0.13.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/97/1e/3284d19d7be99305eda145b8aa46b0c33244e4a496ec66440dac19f8274d/thrift-0.13.0.tar.gz|thrift-0.13.0
|
||||
thriftpy-0.3.9-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/f4/19/cca118cf7d2087310dbc8bd70dc7df0c1320f2652873a93d06d7ba356d4a/thriftpy-0.3.9.tar.gz|thriftpy-0.3.9
|
||||
thriftpy2-0.4.11-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/a9/f0/9bf08e6b5983aa6a6103818da21eadfaea1ad99ec9882be3e75a30e8e9ff/thriftpy2-0.4.11.tar.gz|thriftpy2-0.4.11
|
||||
tinyrpc-1.0.4-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/9d/91/c639ba014aada92446516c5fc4b04f2cee3539ab2d0758a6a87a6da973cb/tinyrpc-1.0.4.tar.gz|tinyrpc-1.0.4
|
||||
tornado-6.0.4-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/95/84/119a46d494f008969bf0c775cb2c6b3579d3c4cc1bb1b41a022aa93ee242/tornado-6.0.4.tar.gz|tornado-6.0.4
|
||||
trollius-2.2.post1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/0b/31/356ae13ad4df58f963e9954d55118f6cffdb3a903c1547973ad7bc347fb9/trollius-2.2.post1.tar.gz|trollius-2.2.post1
|
||||
ujson-2.0.3-cp36-cp36m-manylinux1_x86_64.whl|pypi|https://files.pythonhosted.org/packages/a8/e4/a79c57e22d6d09bbeb5e8febb8cfa0fe10ede69eed9c3458d3ec99014e20/ujson-2.0.3-cp36-cp36m-manylinux1_x86_64.whl
|
||||
unicodecsv-0.14.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/6f/a4/691ab63b17505a26096608cc309960b5a6bdf39e4ba1a793d5f9b1a53270/unicodecsv-0.14.1.tar.gz|unicodecsv-0.14.1
|
||||
uWSGI-2.0.17.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/a2/c9/a2d5737f63cd9df4317a4acc15d1ddf4952e28398601d8d7d706c16381e0/uwsgi-2.0.17.1.tar.gz|uwsgi-2.0.17.1
|
||||
voluptuous-0.11.7-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/24/3b/fe531688c0d9e057fccc0bc9430c0a3d4b90e0d2f015326e659c2944e328/voluptuous-0.11.7.tar.gz|voluptuous-0.11.7
|
||||
warlock-1.3.3-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/c2/36/178b26a338cd6d30523246da4721b1114306f588deb813f3f503052825ee/warlock-1.3.3.tar.gz|warlock-1.3.3
|
||||
weakrefmethod-1.0.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/99/82/73a21e3eab9a1ff76d12375f7301fba5c6325b9598eed0ae5b0cf5243656/weakrefmethod-1.0.3.tar.gz|weakrefmethod-1.0.3
|
||||
websockify-0.9.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/c4/5b/16ec1e9f4fc536846d95a01a77d97da12f8042ca5cf83cdf3dd0442e881c/websockify-0.9.0.tar.gz|websockify-0.9.0
|
||||
whereto-0.4.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/80/83/371a699ce90257608592dadca400a7ecd9a2db6137d78f6f433c7c5e3197/whereto-0.4.0.tar.gz|whereto-0.4.0
|
||||
wrapt-1.12.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/82/f7/e43cefbe88c5fd371f4cf0cf5eb3feccd07515af9fd6cf7dbf1d1793a797/wrapt-1.12.1.tar.gz|wrapt-1.12.1|fix_setup
|
||||
ws4py-0.5.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/53/20/4019a739b2eefe9282d3822ef6a225250af964b117356971bd55e274193c/ws4py-0.5.1.tar.gz|ws4py-0.5.1
|
||||
WSME-0.10.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/e6/79/8aca55e7f3f21549dba59c276fc990b8d9bbde071fb17e1a968254d1df36/WSME-0.10.0-py3-none-any.whl
|
||||
xattr-0.9.7-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/c1/74/1ff659d6deb1d2d6babb9483171edfa330264ae2cbf005035bb7a77b07d2/xattr-0.9.7.tar.gz|xattr-0.9.7
|
||||
XStatic-1.0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/36/78/c0ffaf14216517a14d3daa67ff24fbb60b4703e95ce1059a48fd508e6b8c/XStatic-1.0.2.tar.gz|XStatic-1.0.2
|
||||
XStatic_Angular_FileUpload-12.0.4.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/4d/fd/c3051915d2f12e8fa11f59c01162ce85e38eca15d9ec73a3d7b271b49744/XStatic-Angular-FileUpload-12.0.4.0.tar.gz|XStatic-Angular-FileUpload-12.0.4.0
|
||||
XStatic_Angular_lrdragndrop-1.0.2.4-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/80/ea/ffdde05892eabe468f22403f75299cf5d991f0af4f1400bebbf3af04bc9a/XStatic_Angular_lrdragndrop-1.0.2.4-py2.py3-none-any.whl
|
||||
XStatic_Angular_Schema_Form-0.8.13.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/57/71/ceea2c0a72e2ee2d316d6ab1c06b21faa9f5cbc4b36a4127d7847b7079c5/XStatic-Angular-Schema-Form-0.8.13.0.tar.gz|XStatic-Angular-Schema-Form-0.8.13.0
|
||||
XStatic_Bootstrap_Datepicker-1.4.0.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/3e/ab/806279e234318feb71c392b51d3a5c537c96e123b8e53c7bdeadf987b174/XStatic_Bootstrap_Datepicker-1.4.0.0-py3-none-any.whl
|
||||
XStatic_Hogan-2.0.0.3-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6d/a3/822ce8570757a5b258c39f71f357b2276365f0e6d91094e37d706da5bee4/XStatic_Hogan-2.0.0.3-py3-none-any.whl
|
||||
XStatic_Jasmine-2.4.1.2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/05/43/ceac7def3b6eaf82b6f593e3db2b03a9693a7b002b569e664e382aecddbc/XStatic_Jasmine-2.4.1.2-py2.py3-none-any.whl
|
||||
XStatic_jQuery-1.12.4.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/67/f1/c18c14fc4aab386e4aba587c5d10c268de222c75bf5e271b6f68a2ea6e77/XStatic-jQuery-1.12.4.1.tar.gz|XStatic-jQuery-1.12.4.1
|
||||
XStatic_JQuery_Migrate-1.2.1.2-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/07/25/a1b3d6ecec8a889132951935cd1daec7b3a3f91bf08bdfb670b7ee5c3785/XStatic_JQuery_Migrate-1.2.1.2-py3-none-any.whl
|
||||
XStatic_JQuery.quicksearch-2.0.3.2-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/41/cf/24665d03c2c5963f0ad476b2af16a59af377735ab89d48d97e178409faf5/XStatic_JQuery.quicksearch-2.0.3.2-py3-none-any.whl
|
||||
XStatic_JQuery.TableSorter-2.14.5.2-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/38/af/f36c9ef0c5c1e12caca2d9f126573cdd7b97bc8d922fabe903964d078181/XStatic_JQuery.TableSorter-2.14.5.2-py3-none-any.whl
|
||||
XStatic_jquery_ui-1.12.1.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e6/5a/883b22dad1d3e01708312d71c5bc63d543d66cef9b448c1cf85379d64fb3/XStatic-jquery-ui-1.12.1.1.tar.gz|XStatic-jquery-ui-1.12.1.1
|
||||
XStatic_mdi-1.6.50.2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/73/49/13b9f7ce9fbcc7fabe086b7ac1b056118cbd4c9abf185e01cc4a54631136/XStatic_mdi-1.6.50.2-py2.py3-none-any.whl
|
||||
XStatic_objectpath-1.2.1.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/23/6c/56de25d9d3be430e7de2fcf4baac10279dad78d7b16cbda339cf014c2fe5/XStatic-objectpath-1.2.1.0.tar.gz|XStatic-objectpath-1.2.1.0
|
||||
XStatic_Rickshaw-1.5.1.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/23/cc/20380c36f60a424e655c005ce8be9329cbf41c58c5aa3db773485d1d0dcd/XStatic_Rickshaw-1.5.1.0-py3-none-any.whl
|
||||
XStatic_Spin-1.2.5.3-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/ba/27/c678a4ca0e0a14f5a9edf4c97a89a6c493446b1a00aee78ea03e79333097/XStatic_Spin-1.2.5.3-py3-none-any.whl
|
||||
XStatic_term.js-0.0.7.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/63/7a/7bfec29f5f28fdda7170ebbbb2204aeb1d33d6050f3476a807590de06434/XStatic-term.js-0.0.7.0.tar.gz|XStatic-term.js-0.0.7.0
|
||||
XStatic_tv4-1.2.7.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/2b/26/b07115af27b339c861b8c9a775a621524b421c898e26e015880dfb888c49/XStatic-tv4-1.2.7.0.tar.gz|XStatic-tv4-1.2.7.0
|
||||
XStatic_Font_Awesome-4.7.0.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/b4/ca/24685f91f744cde936294c033685cb4bb3302430f005cc834d86d75b9640/XStatic_Font_Awesome-4.7.0.0-py2.py3-none-any.whl
|
||||
xvfbwrapper-0.2.9-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/57/b6/4920eabda9b49630dea58745e79f9919aba6408d460afe758bf6e9b21a04/xvfbwrapper-0.2.9.tar.gz|xvfbwrapper-0.2.9
|
||||
yappi-1.2.3-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/37/dc/86bbe1822cdc6dbf46c644061bd24217f6a0f056f00162a3697c9bea7575/yappi-1.2.3.tar.gz|yappi-1.2.3
|
||||
yaql-1.1.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/77/89/cfee017cf4f2d6f5e7159bbf13fe4131c7dbf20d675b78c9928ae9aa9df8/yaql-1.1.3.tar.gz|yaql-1.1.3
|
||||
zVMCloudConnector-1.4.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/11/92/9f704de9759816e7b9897b9fb41285b421498b4642551b6fbcccd2850008/zVMCloudConnector-1.4.1.tar.gz|zVMCloudConnector-1.4.1
|
@ -1,6 +1,6 @@
|
||||
# Syntax of wheels config files
|
||||
|
||||
The files {debian,centos}/{stable,dev}-wheels.cfg list the 3rd-party wheels
|
||||
The files {debian}/{stable,dev}-wheels.cfg list the 3rd-party wheels
|
||||
(ie compiled python modules) to be included in the wheels tarball. Wheels are
|
||||
listed one per line, each with the following "|"-separated fields.
|
||||
|
||||
|
@ -14,7 +14,7 @@ if [ -z "${MY_WORKSPACE}" -o -z "${MY_REPO}" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SUPPORTED_OS_ARGS=('centos' 'debian')
|
||||
SUPPORTED_OS_ARGS=('debian')
|
||||
OS=
|
||||
BUILD_STREAM=stable
|
||||
|
||||
@ -24,7 +24,7 @@ Usage:
|
||||
$(basename $0) [ --os <os> ] [ --stream <stable|dev> ]
|
||||
|
||||
Options:
|
||||
--os: Specify base OS (eg. centos)
|
||||
--os: Specify base OS (eg. debian)
|
||||
--stream: Openstack release (default: stable)
|
||||
|
||||
EOF
|
||||
@ -92,47 +92,23 @@ fi
|
||||
|
||||
source ${MY_REPO}/build-tools/git-utils.sh
|
||||
|
||||
# For backward compatibility. Old repo location or new?
|
||||
if [ "${OS}" = "centos" ]; then
|
||||
CENTOS_REPO=${MY_REPO}/centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
echo "ERROR: directory ${MY_REPO}/centos-repo not found."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
function get_wheels_files {
|
||||
find ${GIT_LIST} -maxdepth 1 -name "${OS}_${BUILD_STREAM}_wheels.inc"
|
||||
}
|
||||
|
||||
function get_lower_layer_wheels_files {
|
||||
# FIXME: debian: these are in repomgr pod, can't get to them easily
|
||||
if [[ "${OS}" != "centos" ]] ; then
|
||||
if [[ "${OS}" == "debian" ]] ; then
|
||||
echo "$OS: lower layer wheels not supported!" >&2
|
||||
return 1
|
||||
fi
|
||||
find ${CENTOS_REPO}/layer_wheels_inc -maxdepth 1 -name "*_${OS}_${BUILD_STREAM}_wheels.inc"
|
||||
}
|
||||
|
||||
function find_wheel_rpm {
|
||||
local wheel="$1"
|
||||
local repo=
|
||||
|
||||
for repo in ${MY_WORKSPACE}/std/rpmbuild/RPMS \
|
||||
${CENTOS_REPO}/Binary; do
|
||||
if [ -d $repo ]; then
|
||||
find $repo -name "${wheel}-[^-]*-[^-]*[.][^.]*[.]rpm"
|
||||
fi
|
||||
done | head -n 1
|
||||
# find ${DEBIAN_REPO}/layer_wheels_inc -maxdepth 1 -name "*_${OS}_${BUILD_STREAM}_wheels.inc"
|
||||
}
|
||||
|
||||
function find_wheel_deb {
|
||||
local wheel="$1"
|
||||
local repo=
|
||||
# FIXME: debian: we should also scan non-stx RPMs, but they are in repomgr
|
||||
# FIXME: debian: we should also scan non-stx packages, but they are in repomgr
|
||||
# pod and we can't easily get to them.
|
||||
for repo in ${MY_WORKSPACE}/std ; do
|
||||
if [ -d $repo ]; then
|
||||
@ -160,26 +136,6 @@ cd ${BUILD_OUTPUT_PATH}
|
||||
declare -a FAILED
|
||||
for wheel in $(sed -e 's/#.*//' ${WHEELS_FILES[@]} | sort -u); do
|
||||
case $OS in
|
||||
centos)
|
||||
# Bash globbing does not handle [^\-] well,
|
||||
# so use grep instead
|
||||
wheelfile="$(find_wheel_rpm ${wheel})"
|
||||
|
||||
if [ ! -e "${wheelfile}" ]; then
|
||||
echo "Could not find ${wheel}" >&2
|
||||
FAILED+=($wheel)
|
||||
continue
|
||||
fi
|
||||
|
||||
echo Extracting ${wheelfile}
|
||||
|
||||
rpm2cpio ${wheelfile} | cpio -vidu
|
||||
if [ ${PIPESTATUS[0]} -ne 0 -o ${PIPESTATUS[1]} -ne 0 ]; then
|
||||
echo "Failed to extract content of ${wheelfile}" >&2
|
||||
FAILED+=($wheel)
|
||||
fi
|
||||
|
||||
;;
|
||||
debian)
|
||||
wheelfile="$(find_wheel_deb ${wheel})"
|
||||
if [ ! -e "${wheelfile}" ]; then
|
||||
|
@ -1,123 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
#
|
||||
# Build a bootable guest image from the supplied rootfs archive
|
||||
#
|
||||
|
||||
import getopt
|
||||
import guestfs
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
MBR_FILE='/usr/share/syslinux/mbr.bin'
|
||||
MBR_SIZE=440
|
||||
|
||||
def build_image(inputfile, outputfile, extrasize, trace):
|
||||
g = guestfs.GuestFS(python_return_dict=True)
|
||||
|
||||
# Set the trace flag so that we can see each libguestfs call.
|
||||
if trace:
|
||||
g.set_trace(1)
|
||||
|
||||
# Create a raw-format sparse disk image with padding of size
|
||||
inputsize = os.path.getsize(inputfile)
|
||||
g.disk_create(outputfile, "raw", inputsize + extrasize)
|
||||
|
||||
# Attach the new disk image to libguestfs.
|
||||
g.add_drive_opts(outputfile, format="raw", readonly=0)
|
||||
|
||||
# Run the libguestfs back-end.
|
||||
g.launch()
|
||||
|
||||
# Get the list of devices. Because we only added one drive
|
||||
# above, we expect that this list should contain a single
|
||||
# element.
|
||||
devices = g.list_devices()
|
||||
assert(len(devices) == 1)
|
||||
|
||||
# Partition the disk as one single MBR partition.
|
||||
g.part_disk(devices[0], "mbr")
|
||||
|
||||
# Get the list of partitions. We expect a single element, which
|
||||
# is the partition we have just created.
|
||||
partitions = g.list_partitions()
|
||||
assert(len(partitions) == 1)
|
||||
|
||||
# Create a filesystem on the partition.
|
||||
# NOTE: extlinux does not support 64-bit file systems
|
||||
g.mkfs("ext4", partitions[0], features="^64bit")
|
||||
|
||||
# Now mount the filesystem so that we can add files.
|
||||
g.mount(partitions[0], "/")
|
||||
|
||||
# Upload file system files and directories.
|
||||
g.tar_in(inputfile, "/")
|
||||
|
||||
# Install the boot loader
|
||||
g.extlinux("/boot")
|
||||
|
||||
# Unmount the file systems.
|
||||
g.umount_all();
|
||||
|
||||
# Write the master boot record.
|
||||
with open(MBR_FILE, mode='rb') as mbr:
|
||||
mbr_data = mbr.read()
|
||||
assert(len(mbr_data) == MBR_SIZE)
|
||||
g.pwrite_device(devices[0], mbr_data, 0)
|
||||
|
||||
# Mark the device as bootable.
|
||||
g.part_set_bootable(devices[0], 1, 1)
|
||||
|
||||
# Label the boot disk for root identification
|
||||
g.set_label(partitions[0], "wrs_guest")
|
||||
|
||||
# Shutdown and close guest image
|
||||
g.shutdown()
|
||||
g.close()
|
||||
|
||||
|
||||
def exit_usage(result=0):
|
||||
print('USAGE: -i <input-file> -o <output-file> [-s <extra-bytes>]')
|
||||
sys.exit(result)
|
||||
|
||||
|
||||
def main(argv):
|
||||
inputfile = None
|
||||
outputfile = None
|
||||
extrasize = None
|
||||
trace = False
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(argv,"hxi:o:s:",
|
||||
["input=", "output=", "size="])
|
||||
except getopt.GetoptError:
|
||||
exit_usage(2)
|
||||
for opt, arg in opts:
|
||||
if opt == '-h':
|
||||
exit_usage()
|
||||
if opt == '-x':
|
||||
trace = True
|
||||
elif opt in ("-i", "--input"):
|
||||
inputfile = arg
|
||||
elif opt in ("-o", "--output"):
|
||||
outputfile = arg
|
||||
elif opt in ("-s", "--size"):
|
||||
extrasize = int(arg)
|
||||
|
||||
if not inputfile:
|
||||
print(stderr, "ERROR: missing input file")
|
||||
exit_usage(-1)
|
||||
|
||||
if not outputfile:
|
||||
print(stderr, "ERROR: missing output file")
|
||||
exit_usage(-1)
|
||||
|
||||
if not extrasize:
|
||||
extrasize = 0
|
||||
|
||||
build_image(inputfile, outputfile, extrasize, trace)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[1:])
|
@ -1,14 +0,0 @@
|
||||
# List of packages to be included/installed in RT guest image
|
||||
# If these have dependencies, they will be pulled in automatically
|
||||
#
|
||||
|
||||
# This will help us have our automation debug TC failures when pings to VMs fail.
|
||||
qemu-guest-agent
|
||||
|
||||
# Add debugging tools
|
||||
zip
|
||||
unzip
|
||||
traceroute
|
||||
|
||||
# Add cfn-push-stats for heat demos
|
||||
heat-cfntools
|
@ -1,14 +0,0 @@
|
||||
# List of packages to be included/installed in guest image
|
||||
# If these have dependencies, they will be pulled in automatically
|
||||
#
|
||||
|
||||
# This will help us have our automation debug TC failures when pings to VMs fail.
|
||||
qemu-guest-agent
|
||||
|
||||
# Add debugging tools
|
||||
zip
|
||||
unzip
|
||||
traceroute
|
||||
|
||||
# Add cfn-push-stats for heat demos
|
||||
heat-cfntools
|
@ -1,13 +0,0 @@
|
||||
# exclude special filesystems
|
||||
/builddir
|
||||
/dev/*
|
||||
/proc/*
|
||||
/tmp/*
|
||||
/sys/*
|
||||
/root/rootfs.tar
|
||||
|
||||
# exclude local repo yum configuration
|
||||
/etc/yum/yum.conf
|
||||
|
||||
# omit platform hooks to check install uuid
|
||||
/etc/dhcp/dhclient-enter-hooks
|
@ -1,7 +0,0 @@
|
||||
SERIAL 0 115200
|
||||
|
||||
DEFAULT linux
|
||||
LABEL linux
|
||||
KERNEL vmlinuz
|
||||
INITRD initramfs.img
|
||||
APPEND rw root=LABEL=wrs_guest clocksource=pit console=tty0 console=ttyS0 biosdevname=0 net.ifnames=0 no_timer_check audit=0 cgroup_disable=memory isolcpus=1-3 irqaffinity=0 nmi_watchdog=0 softlockup_panic=0 intel_idle.max_cstate=0 processor.max_cstate=1 idle=poll
|
@ -1,92 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
BUILD_MODE=''
|
||||
if [ "$1" == "--rt" ]; then
|
||||
BUILD_MODE="rt"
|
||||
fi
|
||||
if [ "$1" == "--std" ]; then
|
||||
BUILD_MODE="std"
|
||||
fi
|
||||
|
||||
# Setup boot directory for syslinux configuration (/boot/extlinux.conf)
|
||||
ln -s $(ls /boot/vmlinuz-*.x86_64 | head -1) /boot/vmlinuz
|
||||
ln -s $(ls /boot/initramfs-*.x86_64.img | head -1) /boot/initramfs.img
|
||||
|
||||
# Setup root and sysadmin users
|
||||
usermod -p $(openssl passwd -1 root) root
|
||||
useradd -p $(openssl passwd -1 sysadmin) sysadmin
|
||||
|
||||
# Enable SUDO access for sysadmin
|
||||
echo "sysadmin ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||
|
||||
# Enable remote root login to permit automated tools to run privileged commands
|
||||
sed -i 's%^#\(PermitRootLogin \)%\1%' /etc/ssh/sshd_config
|
||||
sed -i 's#^\(PermitRootLogin \).*#\1yes#' /etc/ssh/sshd_config
|
||||
|
||||
# Enable password login to permit automated tools to run commands
|
||||
sed -i 's%^#\(PasswordAuthentication \)%\1%' /etc/ssh/sshd_config
|
||||
sed -i 's#^\(PasswordAuthentication \).*#\1yes#' /etc/ssh/sshd_config
|
||||
|
||||
# Disable PAM authentication
|
||||
sed -i 's#^\(UsePAM \).*#\1no#' /etc/ssh/sshd_config
|
||||
|
||||
# Prevent cloud_init for reverting our changes
|
||||
sed -i 's#^\(ssh_pwauth:\).*#\1 1#' /etc/cloud/cloud.cfg
|
||||
sed -i 's#^\(disable_root:\).*#\1 0#' /etc/cloud/cloud.cfg
|
||||
|
||||
# Setup SSHD to mark packets for QoS processing in the host (this seems to
|
||||
# be broken in our version of SSHd so equivalent iptables rules are being
|
||||
# added to compensate.
|
||||
echo "IPQoS cs7" >> /etc/ssh/sshd_config
|
||||
|
||||
# Disable reverse path filtering to permit traffic testing from
|
||||
# foreign routes.
|
||||
sed -i 's#^\(net.ipv4.conf.*.rp_filter=\).*#\10#' /etc/sysctl.conf
|
||||
|
||||
# Change /etc/rc.local to touch a file to indicate that the init has
|
||||
# completed. This is required by the AVS vbenchmark tool so that it knows
|
||||
# that the VM is ready to run. This was added because VM instances take a
|
||||
# long time (2-3 minutes) to resize their filesystem when run on a system with
|
||||
# HDD instead of SSD.
|
||||
chmod +x /etc/rc.d/rc.local
|
||||
echo "touch /var/run/.init-complete" >> /etc/rc.local
|
||||
|
||||
if [ "$BUILD_MODE" == "rt" ]; then
|
||||
# Adjust system tuning knobs during init when using rt kernel (CGTS-7047)
|
||||
echo "echo 1 > /sys/devices/virtual/workqueue/cpumask" >> /etc/rc.local
|
||||
echo "echo 1 > /sys/bus/workqueue/devices/writeback/cpumask" >> /etc/rc.local
|
||||
echo "echo -1 > /proc/sys/kernel/sched_rt_runtime_us" >> /etc/rc.local
|
||||
echo "echo 0 > /proc/sys/kernel/timer_migration" >> /etc/rc.local
|
||||
echo "echo 10 > /proc/sys/vm/stat_interval" >> /etc/rc.local
|
||||
fi
|
||||
|
||||
# Disable audit service by default
|
||||
# With this enabled, it causes system delays when running at maximum
|
||||
# capacity that impacts the traffic processing enough to cause unclean
|
||||
# traffic runs when doing benchmark tests.
|
||||
systemctl disable auditd
|
||||
|
||||
if [ "$BUILD_MODE" == "rt" ]; then
|
||||
# Additional services to disable on rt guest (CGTS-7047)
|
||||
systemctl disable polkit.service
|
||||
systemctl disable tuned.service
|
||||
fi
|
||||
|
||||
# Clean the yum cache. We don't want to maintain it on the guest file system.
|
||||
yum clean all
|
||||
|
||||
# update /etc/rsyslog.conf to have OmitLocalLogging off
|
||||
if [ -f /etc/rsyslog.conf ]; then
|
||||
sed -i 's#OmitLocalLogging on#OmitLocalLogging off#g' /etc/rsyslog.conf
|
||||
fi
|
||||
|
||||
# select correct kernel and initrd
|
||||
if [ "$BUILD_MODE" == "rt" ]; then
|
||||
PATTERN=$(rpm -q --qf '%{VERSION}-%{RELEASE}' kernel-rt)
|
||||
else
|
||||
PATTERN=$(rpm -q --qf '%{VERSION}-%{RELEASE}' kernel)
|
||||
fi
|
||||
cd /boot
|
||||
rm -f vmlinuz initramfs.img
|
||||
ln -s $(ls -1 vmlinuz-$PATTERN*) vmlinuz
|
||||
ln -s $(ls -1 initramfs-$PATTERN*img) initramfs.img
|
@ -1,7 +0,0 @@
|
||||
SERIAL 0 115200
|
||||
|
||||
DEFAULT linux
|
||||
LABEL linux
|
||||
KERNEL vmlinuz
|
||||
INITRD initramfs.img
|
||||
APPEND rw root=LABEL=wrs_guest clocksource=pit console=tty0 console=ttyS0 biosdevname=0 net.ifnames=0 no_timer_check
|
@ -1,18 +0,0 @@
|
||||
# Override the datasource list to use only those that are expected (and needed)
|
||||
# to work in our lab environment.
|
||||
#
|
||||
datasource_list:
|
||||
- NoCloud
|
||||
- ConfigDrive
|
||||
- Ec2
|
||||
- None
|
||||
|
||||
# Adjust the Ec2 max_wait to be 30 seconds instead of the default 120 seconds,
|
||||
# and set the list of URLs to be the only one that we expect to work in our lab
|
||||
# environment so that we avoid DNS lookup failures for alternate choices.
|
||||
#
|
||||
datasource:
|
||||
Ec2:
|
||||
timeout: 10
|
||||
max_wait: 30
|
||||
metadata_urls: ['http://169.254.169.254']
|
@ -1,21 +0,0 @@
|
||||
## Use a CID based on the hardware address for both IPv4 and IPv6. This mostly
|
||||
## useful for IPv6 to ensure that the client is not using a random DUID for the
|
||||
## CID on each reboot.
|
||||
send dhcp6.client-id = concat(00:03:00, hardware);
|
||||
send dhcp-client-identifier = concat(00:03:00, hardware);
|
||||
|
||||
## Defaults for all interfaces
|
||||
request interface-mtu, subnet-mask, broadcast-address, time-offset,
|
||||
classless-static-routes;
|
||||
|
||||
interface "eth0" {
|
||||
## Override for eth0 to add requests for attributes that we only care to
|
||||
## configure for our primary network interface
|
||||
request interface-mtu, subnet-mask, broadcast-address, time-offset,
|
||||
domain-name, domain-name-servers, host-name,
|
||||
classless-static-routes, routers;
|
||||
}
|
||||
|
||||
timeout 15;
|
||||
|
||||
retry 5;
|
@ -1,12 +0,0 @@
|
||||
*mangle
|
||||
:PREROUTING ACCEPT [0:0]
|
||||
:INPUT ACCEPT [0:0]
|
||||
:FORWARD ACCEPT [0:0]
|
||||
:OUTPUT ACCEPT [0:0]
|
||||
:POSTROUTING ACCEPT [0:0]
|
||||
-A OUTPUT -o eth0 -p tcp --sport 22 -j DSCP --set-dscp-class CS7
|
||||
-A OUTPUT -o eth0 -p tcp --dport 22 -j DSCP --set-dscp-class CS7
|
||||
-A OUTPUT -o eth0 -p udp --sport 67:68 -j DSCP --set-dscp-class CS7
|
||||
-A OUTPUT -o eth0 -p udp --dport 67:68 -j DSCP --set-dscp-class CS7
|
||||
-A OUTPUT -o eth0 -d 169.254.169.254 -j DSCP --set-dscp-class CS7
|
||||
COMMIT
|
@ -1 +0,0 @@
|
||||
blacklist floppy
|
@ -1 +0,0 @@
|
||||
options wrs_avp kthread_cpulist=0-7 kthread_policy=0
|
@ -1 +0,0 @@
|
||||
wrs_avp
|
@ -1,8 +0,0 @@
|
||||
DEVICE=eth0
|
||||
BOOTPROTO=dhcp
|
||||
ONBOOT=yes
|
||||
TYPE=Ethernet
|
||||
USERCTL=yes
|
||||
PEERDNS=yes
|
||||
IPV6INIT=no
|
||||
PERSISTENT_DHCLIENT=1
|
@ -1,4 +0,0 @@
|
||||
# Renames interfaces to be sequential ethX interface names regardless of interface type
|
||||
# This is required to avoid a kernel host patch that starts number at 1000 and to
|
||||
# override slot specific naming for non-kernel interfaces.
|
||||
ACTION=="add", SUBSYSTEM=="net", DRIVERS=="?*", ATTR{type}=="1", KERNEL=="eth?*" PROGRAM=="/usr/lib/udev/renumber_device", NAME="$result"
|
@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Renames interfaces to be sequential ethX interface names regardless of interface type
|
||||
# This is required to avoid a kernel host patch that starts number at 1000 and to
|
||||
# override slot specific naming for non-kernel interfaces.
|
||||
|
||||
# The ifindex for the first interface that is not 'lo' will be 2.
|
||||
# Therefore adjust the numbering to start at 0 for eth0..ethN naming
|
||||
|
||||
INDEX=$(($IFINDEX-2))
|
||||
echo "eth$INDEX"
|
||||
|
||||
exit 0
|
@ -1,294 +0,0 @@
|
||||
# list of standard packages to include in the guest image
|
||||
acl
|
||||
acpid
|
||||
audit
|
||||
audit-libs
|
||||
audit-libs-python
|
||||
authconfig
|
||||
basesystem
|
||||
bash
|
||||
bind-libs-lite
|
||||
bind-license
|
||||
binutils
|
||||
bridge-utils
|
||||
btrfs-progs
|
||||
bzip2-libs
|
||||
ca-certificates
|
||||
centos-logos
|
||||
centos-release
|
||||
checkpolicy
|
||||
chkconfig
|
||||
cloud-init
|
||||
coreutils
|
||||
cpio
|
||||
cracklib
|
||||
cracklib-dicts
|
||||
cronie
|
||||
cronie-anacron
|
||||
crontabs
|
||||
cryptsetup-libs
|
||||
curl
|
||||
cyrus-sasl-lib
|
||||
dbus
|
||||
dbus-glib
|
||||
dbus-libs
|
||||
dbus-python
|
||||
device-mapper
|
||||
device-mapper-libs
|
||||
dhclient
|
||||
dhcp-common
|
||||
dhcp-libs
|
||||
diffutils
|
||||
dmidecode
|
||||
dnsmasq
|
||||
dracut
|
||||
dracut-config-rescue
|
||||
dracut-network
|
||||
e2fsprogs
|
||||
e2fsprogs-libs
|
||||
elfutils-libelf
|
||||
elfutils-libs
|
||||
ethtool
|
||||
expat
|
||||
file
|
||||
file-libs
|
||||
filesystem
|
||||
findutils
|
||||
fipscheck
|
||||
fipscheck-lib
|
||||
freetype
|
||||
gawk
|
||||
gdbm
|
||||
gettext
|
||||
gettext-libs
|
||||
glib2
|
||||
glibc
|
||||
glibc-common
|
||||
glib-networking
|
||||
gmp
|
||||
gnupg2
|
||||
gnutls
|
||||
gobject-introspection
|
||||
gpgme
|
||||
grep
|
||||
groff-base
|
||||
grub2
|
||||
grub2-tools
|
||||
grubby
|
||||
gsettings-desktop-schemas
|
||||
gssproxy
|
||||
gzip
|
||||
hardlink
|
||||
hostname
|
||||
info
|
||||
initscripts
|
||||
iperf3
|
||||
iproute
|
||||
iptables
|
||||
iputils
|
||||
jansson
|
||||
jbigkit-libs
|
||||
json-c
|
||||
kbd
|
||||
kbd-legacy
|
||||
kbd-misc
|
||||
kernel-rt
|
||||
kernel-rt-tools
|
||||
kernel-rt-tools-libs
|
||||
kexec-tools
|
||||
keyutils
|
||||
keyutils-libs
|
||||
kmod
|
||||
kmod-libs
|
||||
kpartx
|
||||
krb5-libs
|
||||
less
|
||||
libacl
|
||||
libassuan
|
||||
libattr
|
||||
libbasicobjects
|
||||
libblkid
|
||||
libcap
|
||||
libcap-ng
|
||||
libcgroup
|
||||
libcollection
|
||||
libcom_err
|
||||
libcroco
|
||||
libcurl
|
||||
libdaemon
|
||||
libdb
|
||||
libdb-utils
|
||||
libedit
|
||||
libestr
|
||||
libevent
|
||||
libffi
|
||||
libgcc
|
||||
libgcrypt
|
||||
libgomp
|
||||
libgpg-error
|
||||
libgudev1
|
||||
libidn
|
||||
libini_config
|
||||
libjpeg-turbo
|
||||
libmnl
|
||||
libmodman
|
||||
libmount
|
||||
libndp
|
||||
libnetfilter_conntrack
|
||||
libnfnetlink
|
||||
libnfsidmap
|
||||
libnl3
|
||||
libnl3-cli
|
||||
libpath_utils
|
||||
libpcap
|
||||
libpipeline
|
||||
libproxy
|
||||
libpwquality
|
||||
libref_array
|
||||
libselinux
|
||||
libselinux-python
|
||||
libselinux-utils
|
||||
libsemanage
|
||||
libsemanage-python
|
||||
libsepol
|
||||
libsoup
|
||||
libss
|
||||
libssh2
|
||||
libstdc++
|
||||
libsysfs
|
||||
libtalloc
|
||||
libtasn1
|
||||
libteam
|
||||
libtevent
|
||||
libtiff
|
||||
libtirpc
|
||||
libunistring
|
||||
libuser
|
||||
libutempter
|
||||
libuuid
|
||||
libverto
|
||||
libverto-tevent
|
||||
libwebp
|
||||
libxml2
|
||||
libyaml
|
||||
logrotate
|
||||
lua
|
||||
lzo
|
||||
make
|
||||
man-db
|
||||
mariadb-libs
|
||||
microcode_ctl
|
||||
mozjs17
|
||||
ncurses
|
||||
ncurses-base
|
||||
ncurses-libs
|
||||
nettle
|
||||
net-tools
|
||||
newt
|
||||
newt-python
|
||||
nfs-utils
|
||||
nspr
|
||||
nss
|
||||
nss-softokn
|
||||
nss-softokn-freebl
|
||||
nss-sysinit
|
||||
nss-tools
|
||||
nss-util
|
||||
numactl-libs
|
||||
openssh
|
||||
openssh-clients
|
||||
openssh-server
|
||||
openssl
|
||||
openssl-libs
|
||||
os-prober
|
||||
p11-kit
|
||||
p11-kit-trust
|
||||
pam
|
||||
parted
|
||||
passwd
|
||||
pciutils
|
||||
pciutils-libs
|
||||
pcre
|
||||
pinentry
|
||||
pkgconfig
|
||||
policycoreutils
|
||||
policycoreutils-python
|
||||
polkit
|
||||
polkit-pkla-compat
|
||||
popt
|
||||
procps-ng
|
||||
pth
|
||||
pygobject3-base
|
||||
pygpgme
|
||||
pyliblzma
|
||||
python
|
||||
python-backports
|
||||
python-backports-ssl_match_hostname
|
||||
python-chardet
|
||||
python-configobj
|
||||
python-decorator
|
||||
python-iniparse
|
||||
python-IPy
|
||||
python-jsonpatch
|
||||
python-jsonpointer
|
||||
python-kitchen
|
||||
python-libs
|
||||
python-perf
|
||||
python-pillow
|
||||
python-prettytable
|
||||
python-pycurl
|
||||
python-pygments
|
||||
python-pyudev
|
||||
python-requests
|
||||
python2-six
|
||||
python-urlgrabber
|
||||
python-urllib3
|
||||
pyxattr
|
||||
PyYAML
|
||||
qrencode-libs
|
||||
quota
|
||||
quota-nls
|
||||
rdma
|
||||
readline
|
||||
rootfiles
|
||||
rpcbind
|
||||
rpm
|
||||
rpm-build-libs
|
||||
rpm-libs
|
||||
rpm-python
|
||||
rsync
|
||||
rsyslog
|
||||
sed
|
||||
rt-setup
|
||||
rtctl
|
||||
shadow-utils
|
||||
shared-mime-info
|
||||
slang
|
||||
snappy
|
||||
sqlite
|
||||
sudo
|
||||
systemd
|
||||
systemd-libs
|
||||
systemd-sysv
|
||||
sysvinit-tools
|
||||
tar
|
||||
tcpdump
|
||||
tcp_wrappers
|
||||
tcp_wrappers-libs
|
||||
teamd
|
||||
trousers
|
||||
tuned
|
||||
tzdata
|
||||
ustr
|
||||
util-linux
|
||||
vim-minimal
|
||||
virt-what
|
||||
wget
|
||||
which
|
||||
xz
|
||||
xz-libs
|
||||
yum
|
||||
yum-metadata-parser
|
||||
yum-plugin-fastestmirror
|
||||
yum-utils
|
||||
zlib
|
@ -1,291 +0,0 @@
|
||||
# list of standard packages to include in the guest image
|
||||
acl
|
||||
acpid
|
||||
audit
|
||||
audit-libs
|
||||
audit-libs-python
|
||||
authconfig
|
||||
basesystem
|
||||
bash
|
||||
bind-libs-lite
|
||||
bind-license
|
||||
binutils
|
||||
bridge-utils
|
||||
btrfs-progs
|
||||
bzip2-libs
|
||||
ca-certificates
|
||||
centos-logos
|
||||
centos-release
|
||||
checkpolicy
|
||||
chkconfig
|
||||
cloud-init
|
||||
coreutils
|
||||
cpio
|
||||
cracklib
|
||||
cracklib-dicts
|
||||
cronie
|
||||
cronie-anacron
|
||||
crontabs
|
||||
cryptsetup-libs
|
||||
curl
|
||||
cyrus-sasl-lib
|
||||
dbus
|
||||
dbus-glib
|
||||
dbus-libs
|
||||
dbus-python
|
||||
device-mapper
|
||||
device-mapper-libs
|
||||
dhclient
|
||||
dhcp-common
|
||||
dhcp-libs
|
||||
diffutils
|
||||
dmidecode
|
||||
dnsmasq
|
||||
dracut
|
||||
dracut-config-rescue
|
||||
dracut-network
|
||||
e2fsprogs
|
||||
e2fsprogs-libs
|
||||
elfutils-libelf
|
||||
elfutils-libs
|
||||
ethtool
|
||||
expat
|
||||
file
|
||||
file-libs
|
||||
filesystem
|
||||
findutils
|
||||
fipscheck
|
||||
fipscheck-lib
|
||||
freetype
|
||||
gawk
|
||||
gdbm
|
||||
gettext
|
||||
gettext-libs
|
||||
glib2
|
||||
glibc
|
||||
glibc-common
|
||||
glib-networking
|
||||
gmp
|
||||
gnupg2
|
||||
gnutls
|
||||
gobject-introspection
|
||||
gpgme
|
||||
grep
|
||||
groff-base
|
||||
grub2
|
||||
grub2-tools
|
||||
grubby
|
||||
gsettings-desktop-schemas
|
||||
gssproxy
|
||||
gzip
|
||||
hardlink
|
||||
hostname
|
||||
info
|
||||
initscripts
|
||||
iperf3
|
||||
iproute
|
||||
iptables
|
||||
iputils
|
||||
jansson
|
||||
jbigkit-libs
|
||||
json-c
|
||||
kbd
|
||||
kbd-legacy
|
||||
kbd-misc
|
||||
kernel
|
||||
kernel-tools
|
||||
kernel-tools-libs
|
||||
kexec-tools
|
||||
keyutils
|
||||
keyutils-libs
|
||||
kmod
|
||||
kmod-libs
|
||||
kpartx
|
||||
krb5-libs
|
||||
less
|
||||
libacl
|
||||
libassuan
|
||||
libattr
|
||||
libbasicobjects
|
||||
libblkid
|
||||
libcap
|
||||
libcap-ng
|
||||
libcgroup
|
||||
libcollection
|
||||
libcom_err
|
||||
libcroco
|
||||
libcurl
|
||||
libdaemon
|
||||
libdb
|
||||
libdb-utils
|
||||
libedit
|
||||
libestr
|
||||
libevent
|
||||
libffi
|
||||
libgcc
|
||||
libgcrypt
|
||||
libgomp
|
||||
libgpg-error
|
||||
libgudev1
|
||||
libidn
|
||||
libini_config
|
||||
libjpeg-turbo
|
||||
libmnl
|
||||
libmodman
|
||||
libmount
|
||||
libndp
|
||||
libnetfilter_conntrack
|
||||
libnfnetlink
|
||||
libnfsidmap
|
||||
libnl3
|
||||
libnl3-cli
|
||||
libpath_utils
|
||||
libpcap
|
||||
libpipeline
|
||||
libproxy
|
||||
libpwquality
|
||||
libref_array
|
||||
libselinux
|
||||
libselinux-python
|
||||
libselinux-utils
|
||||
libsemanage
|
||||
libsemanage-python
|
||||
libsepol
|
||||
libsoup
|
||||
libss
|
||||
libssh2
|
||||
libstdc++
|
||||
libsysfs
|
||||
libtalloc
|
||||
libtasn1
|
||||
libteam
|
||||
libtevent
|
||||
libtiff
|
||||
libtirpc
|
||||
libunistring
|
||||
libuser
|
||||
libutempter
|
||||
libuuid
|
||||
libverto
|
||||
libverto-tevent
|
||||
libwebp
|
||||
libxml2
|
||||
libyaml
|
||||
logrotate
|
||||
lua
|
||||
lzo
|
||||
make
|
||||
man-db
|
||||
mariadb-libs
|
||||
microcode_ctl
|
||||
mozjs17
|
||||
ncurses
|
||||
ncurses-base
|
||||
ncurses-libs
|
||||
nettle
|
||||
net-tools
|
||||
newt
|
||||
newt-python
|
||||
nfs-utils
|
||||
nspr
|
||||
nss
|
||||
nss-softokn
|
||||
nss-softokn-freebl
|
||||
nss-sysinit
|
||||
nss-tools
|
||||
nss-util
|
||||
numactl-libs
|
||||
openssh
|
||||
openssh-clients
|
||||
openssh-server
|
||||
openssl
|
||||
openssl-libs
|
||||
os-prober
|
||||
p11-kit
|
||||
p11-kit-trust
|
||||
pam
|
||||
parted
|
||||
passwd
|
||||
pciutils
|
||||
pciutils-libs
|
||||
pcre
|
||||
pinentry
|
||||
pkgconfig
|
||||
policycoreutils
|
||||
policycoreutils-python
|
||||
polkit
|
||||
polkit-pkla-compat
|
||||
popt
|
||||
procps-ng
|
||||
pth
|
||||
pygobject3-base
|
||||
pygpgme
|
||||
pyliblzma
|
||||
python
|
||||
python-backports
|
||||
python-backports-ssl_match_hostname
|
||||
python-chardet
|
||||
python-configobj
|
||||
python-decorator
|
||||
python-iniparse
|
||||
python-IPy
|
||||
python-jsonpatch
|
||||
python-jsonpointer
|
||||
python-kitchen
|
||||
python-libs
|
||||
python-perf
|
||||
python-pillow
|
||||
python-prettytable
|
||||
python-pycurl
|
||||
python-pygments
|
||||
python-pyudev
|
||||
python-requests
|
||||
python2-six
|
||||
python-urlgrabber
|
||||
python-urllib3
|
||||
pyxattr
|
||||
PyYAML
|
||||
qrencode-libs
|
||||
quota
|
||||
quota-nls
|
||||
rdma
|
||||
readline
|
||||
rootfiles
|
||||
rpcbind
|
||||
rpm
|
||||
rpm-build-libs
|
||||
rpm-libs
|
||||
rpm-python
|
||||
rsync
|
||||
sed
|
||||
setup
|
||||
shadow-utils
|
||||
shared-mime-info
|
||||
slang
|
||||
snappy
|
||||
sqlite
|
||||
sudo
|
||||
systemd
|
||||
systemd-libs
|
||||
systemd-sysv
|
||||
sysvinit-tools
|
||||
tar
|
||||
tcpdump
|
||||
tcp_wrappers
|
||||
tcp_wrappers-libs
|
||||
teamd
|
||||
trousers
|
||||
tzdata
|
||||
ustr
|
||||
util-linux
|
||||
vim-enhanced
|
||||
virt-what
|
||||
wget
|
||||
which
|
||||
xz
|
||||
xz-libs
|
||||
yum
|
||||
yum-metadata-parser
|
||||
yum-plugin-fastestmirror
|
||||
yum-utils
|
||||
zlib
|
@ -1,7 +0,0 @@
|
||||
# list of packages to be excluded from guest image
|
||||
cpp
|
||||
gcc
|
||||
gcc-c++
|
||||
gdb
|
||||
linux-firmware
|
||||
rpm-build
|
@ -1,40 +0,0 @@
|
||||
#version=DEVEL
|
||||
# System authorization information
|
||||
auth --enableshadow --passalgo=sha512
|
||||
# Use CDROM installation media
|
||||
cdrom
|
||||
# Use graphical install
|
||||
graphical
|
||||
# Run the Setup Agent on first boot
|
||||
firstboot --enable
|
||||
ignoredisk --only-use=sda
|
||||
# Keyboard layouts
|
||||
keyboard --vckeymap=us --xlayouts='us'
|
||||
# System language
|
||||
lang en_US.UTF-8
|
||||
|
||||
# Network information
|
||||
network --bootproto=dhcp --device=enp0s3 --onboot=off --ipv6=auto
|
||||
network --bootproto=static --device=enp0s8 --ip=10.10.10.10 --netmask=255.255.255.0 --ipv6=auto --activate
|
||||
network --device=lo --hostname=localhost.localdomain
|
||||
|
||||
#Root password
|
||||
rootpw --lock
|
||||
# System timezone
|
||||
timezone America/New_York --isUtc
|
||||
user --groups=wheel --name=sysadmin --password=$6$Mazui8NX.w6C5I$UWNzOnui.vb3qOT3Qyw0I6hMLW0G02KfQGcCZTXdVv9GDZLUXHJVeGEN1/RAe.EOgz2cLkFkVaS8pvwBTFG1j/ --iscrypted --gecos="sysadmin"
|
||||
# System bootloader configuration
|
||||
bootloader --location=mbr --boot-drive=sda
|
||||
autopart --type=lvm
|
||||
# Partition clearing information
|
||||
clearpart --all --initlabel --drives=sda
|
||||
|
||||
%packages
|
||||
@^minimal
|
||||
@core
|
||||
|
||||
%end
|
||||
|
||||
%addon com_redhat_kdump --disable --reserve-mb='auto'
|
||||
|
||||
%end
|
@ -1,352 +0,0 @@
|
||||
#!/bin/env bash
|
||||
|
||||
#
|
||||
# Copyright (c) 2018-2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Here's the score, kids. There are a few different places from which we can
|
||||
# get packages. In priority order, they are:
|
||||
#
|
||||
# The CGTS packages we've built ourselves
|
||||
# The CGTS packages that Jenkins has built (coming soon to a script near you)
|
||||
# The CentOS packages in various repos
|
||||
# - Base OS
|
||||
# - OpenStack Repos
|
||||
# EPEL (Extra Packages for Enterprise Linux)
|
||||
#
|
||||
# This script can function in two ways:
|
||||
# If you specify a filename, it assumes the file is a list of packages you
|
||||
# want to install, or dependencies you want to meet. It installs whatever
|
||||
# is in the list into current directory. Failure to find a dependency
|
||||
# results in a return code of 1
|
||||
#
|
||||
# If no file is specified, we generate a file ($DEPLISTFILE) of dependencies
|
||||
# based on current directory
|
||||
#
|
||||
# We then continuously loop through generating new dependencies and installing
|
||||
# them until either all dependencies are met, or we cannot install anymore
|
||||
#
|
||||
# We also log where dependencies were installed from into
|
||||
# export/dist/report_deps.txt
|
||||
#
|
||||
|
||||
CGTS_DEPS_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
|
||||
|
||||
# Set REPOQUERY, REPOQUERY_SUB_COMMAND, REPOQUERY_RESOLVE and
|
||||
# REPOQUERY_WHATPROVIDES_DELIM for our build environment.
|
||||
source ${CGTS_DEPS_DIR}/../pkg-manager-utils.sh
|
||||
|
||||
# This function generates a simple file of dependencies we're trying to resolve
|
||||
function generate_dep_list {
|
||||
TMP_RPM_DB=$(mktemp -d $(pwd)/tmp_rpm_db_XXXXXX)
|
||||
mkdir -p $TMP_RPM_DB
|
||||
rpm --initdb --dbpath $TMP_RPM_DB
|
||||
rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' > $DEPLISTFILE_NEW 2>&1
|
||||
cat $DEPLISTFILE_NEW >> $DEPDETAILLISTFILE
|
||||
cat $DEPLISTFILE_NEW \
|
||||
| grep -v -e "error:" -e "warning:" -e "Preparing..." \
|
||||
-e "Verifying..." -e "installing package" \
|
||||
| sed -e "s/ is needed by.*$//" -e "s/ [<=>].*$//" \
|
||||
| sort -u > $DEPLISTFILE
|
||||
\rm -rf $TMP_RPM_DB
|
||||
}
|
||||
|
||||
join_array() {
|
||||
local IFS="$1"
|
||||
shift
|
||||
echo "$*"
|
||||
}
|
||||
|
||||
# Takes a list of requirements (either explcit package name, or capabilities
|
||||
# to provide) and install packages to meet those dependancies
|
||||
#
|
||||
# We take the list of requirements and first try to look them up based on
|
||||
# package name. If we can't find a package with the name of the requirement,
|
||||
# we use --whatprovides to complete the lookup.
|
||||
#
|
||||
# The reason for this initial name-based attempt is that a couple of funky
|
||||
# packages (notably -devel packages) have "Provides:" capabilities which
|
||||
# conflict with named packages. So if explictly say we want "xyz" then we'll
|
||||
# install the "xyz" package, rather than "something-devel" which has "xyz"
|
||||
# capabilities.
|
||||
function install_deps {
|
||||
local DEP_LIST=""
|
||||
local DEP_LIST_ARRAY=()
|
||||
local DEP_LIST_FILE="$1"
|
||||
|
||||
# Temporary files are used in a few different ways
|
||||
# Here we essenitally create variable aliases to make it easier to read
|
||||
# the script
|
||||
local UNSORTED_PACKAGES=$TMPFILE
|
||||
local SORTED_PACKAGES=$TMPFILE1
|
||||
local UNRESOLVED_PACKAGES=$TMPFILE2
|
||||
|
||||
rm -f $UNSORTED_PACKAGES
|
||||
|
||||
while read DEP
|
||||
do
|
||||
DEP_LIST+=" '${DEP}'"
|
||||
done < $DEP_LIST_FILE
|
||||
|
||||
echo "Debug: List of deps to resolve: ${DEP_LIST}"
|
||||
|
||||
if [ -z "${DEP_LIST}" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# go through each repo and convert deps to packages based on package name
|
||||
for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do
|
||||
echo "TMPDIR=${TMP_DIR}"\
|
||||
"${REPOQUERY} --config=${YUM} --repoid=$REPOID"\
|
||||
"${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
|
||||
"${DEP_LIST} --qf='%{name}'"
|
||||
|
||||
TMPDIR=${TMP_DIR} \
|
||||
${REPOQUERY} --config=${YUM} --repoid=$REPOID \
|
||||
${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \
|
||||
--qf='%{name}' ${DEP_LIST} \
|
||||
| sed "s/kernel-debug/kernel/g" >> $UNSORTED_PACKAGES
|
||||
|
||||
\rm -rf $TMP_DIR/yum-$USER-*
|
||||
done
|
||||
sort $UNSORTED_PACKAGES -u > $SORTED_PACKAGES
|
||||
|
||||
# figure out any dependancies which could not be resolved based on
|
||||
# package name. We use --whatpovides to deal with this
|
||||
#
|
||||
# First, we build a new DEP_LIST based on what was NOT found in
|
||||
# search-by-name attempt
|
||||
sort $DEP_LIST_FILE -u > $TMPFILE
|
||||
comm -2 -3 $TMPFILE $SORTED_PACKAGES > $UNRESOLVED_PACKAGES
|
||||
|
||||
# If there are any requirements not resolved, look up the packages with
|
||||
# --whatprovides
|
||||
if [ -s $UNRESOLVED_PACKAGES ]; then
|
||||
DEP_LIST_ARRAY=()
|
||||
\cp $SORTED_PACKAGES $UNSORTED_PACKAGES
|
||||
while read DEP
|
||||
do
|
||||
DEP_LIST_ARRAY+=( "${DEP}" )
|
||||
done < $UNRESOLVED_PACKAGES
|
||||
|
||||
if [ "${REPOQUERY_WHATPROVIDES_DELIM}" != " " ]; then
|
||||
DEP_LIST_ARRAY=( "$(join_array "${REPOQUERY_WHATPROVIDES_DELIM}" "${DEP_LIST_ARRAY[@]}" )" )
|
||||
fi
|
||||
|
||||
if [ ${#DEP_LIST_ARRAY[@]} -gt 0 ]; then
|
||||
|
||||
for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do
|
||||
echo "TMPDIR=${TMP_DIR}"\
|
||||
"${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\
|
||||
"${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
|
||||
"--qf='%{name}' --whatprovides ${DEP_LIST_ARRAY[@]}"
|
||||
|
||||
TMPDIR=${TMP_DIR} \
|
||||
${REPOQUERY} --config=${YUM} --repoid=${REPOID} \
|
||||
${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \
|
||||
--qf='%{name}' --whatprovides ${DEP_LIST_ARRAY[@]} \
|
||||
| sed "s/kernel-debug/kernel/g" >> $UNSORTED_PACKAGES
|
||||
|
||||
\rm -rf $TMP_DIR/yum-$USER-*
|
||||
done
|
||||
fi
|
||||
|
||||
sort -u $UNSORTED_PACKAGES > $SORTED_PACKAGES
|
||||
fi
|
||||
|
||||
# clean up
|
||||
\rm -f $UNSORTED_PACKAGES $UNRESOLVED_PACKAGES
|
||||
|
||||
# We now have, in SORTED_PACKAGES, a list of all packages that we need to install
|
||||
# to meet our dependancies
|
||||
DEP_LIST=" "
|
||||
while read DEP
|
||||
do
|
||||
DEP_LIST+="${DEP} "
|
||||
done < $SORTED_PACKAGES
|
||||
rm $SORTED_PACKAGES
|
||||
|
||||
# go through each repo and install packages
|
||||
local TARGETS="${DEP_LIST}"
|
||||
echo "Debug: Resolved list of deps to install: ${TARGETS}"
|
||||
local UNRESOLVED
|
||||
for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do
|
||||
UNRESOLVED="$TARGETS"
|
||||
|
||||
if [[ ! -z "${TARGETS// }" ]]; then
|
||||
REPO_PATH=$(cat $YUM | sed -n "/^\[$REPOID\]\$/,\$p" | grep '^baseurl=' | head -n 1 | awk -F 'file://' '{print $2}' | sed 's:/$::')
|
||||
|
||||
>&2 echo "TMPDIR=${TMP_DIR}"\
|
||||
"${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\
|
||||
"${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
|
||||
"--qf='%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}'"\
|
||||
"${REPOQUERY_RESOLVE} ${TARGETS}"
|
||||
|
||||
TMPDIR=${TMP_DIR} \
|
||||
${REPOQUERY} --config=${YUM} --repoid=${REPOID} \
|
||||
${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \
|
||||
--qf="%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}" \
|
||||
${REPOQUERY_RESOLVE} ${TARGETS} \
|
||||
| sort -r -V > $TMPFILE
|
||||
|
||||
\rm -rf $TMP_DIR/yum-$USER-*
|
||||
|
||||
while read STR
|
||||
do
|
||||
>&2 echo "STR=$STR"
|
||||
if [ "x$STR" == "x" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
PKG=`echo $STR | cut -d " " -f 1`
|
||||
PKG_FILE=`echo $STR | cut -d " " -f 2`
|
||||
PKG_REL_PATH=`echo $STR | cut -d " " -f 3`
|
||||
PKG_PATH="${REPO_PATH}/${PKG_REL_PATH}"
|
||||
|
||||
>&2 echo "Installing PKG=$PKG PKG_FILE=$PKG_FILE PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH from repo $REPOID"
|
||||
cp $PKG_PATH .
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo " Here's what I have to work with..."
|
||||
>&2 echo " TMPDIR=${TMP_DIR}"\
|
||||
"${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\
|
||||
"${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
|
||||
"--qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\""\
|
||||
"${REPOQUERY_RESOLVE} ${PKG}"
|
||||
>&2 echo " PKG=$PKG PKG_FILE=$PKG_FILE REPO_PATH=$REPO_PATH PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH"
|
||||
fi
|
||||
|
||||
echo $UNRESOLVED | grep $PKG >> /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "$PKG found in $REPOID as $PKG" >> $BUILT_REPORT
|
||||
echo "$PKG_PATH" >> $BUILT_REPORT
|
||||
UNRESOLVED=$(echo "$UNRESOLVED" | sed "s# $PKG # #g")
|
||||
else
|
||||
echo "$PKG satisfies unknown target in $REPOID" >> $BUILT_REPORT
|
||||
echo " but it doesn't match targets, $UNRESOLVED" >> $BUILT_REPORT
|
||||
echo " path $PKG_PATH" >> $BUILT_REPORT
|
||||
FOUND_UNKNOWN=1
|
||||
fi
|
||||
done < $TMPFILE
|
||||
\rm -rf $TMP_DIR/yum-$USER-*
|
||||
TARGETS="$UNRESOLVED"
|
||||
fi
|
||||
done
|
||||
>&2 echo "Debug: Packages still unresolved: $UNRESOLVED"
|
||||
echo "Debug: Packages still unresolved: $UNRESOLVED" >> $WARNINGS_REPORT
|
||||
echo "Debug: Packages still unresolved: $UNRESOLVED" >> $BUILT_REPORT
|
||||
>&2 echo ""
|
||||
}
|
||||
|
||||
function check_all_explicit_deps_installed {
|
||||
|
||||
PKGS_TO_CHECK=" "
|
||||
while read PKG_TO_ADD
|
||||
do
|
||||
PKGS_TO_CHECK="$PKGS_TO_CHECK ${PKG_TO_ADD}"
|
||||
done < $DEPLISTFILE
|
||||
rpm -qp $MY_WORKSPACE/export/dist/isolinux/Packages/*.rpm --qf="%{name}\n" --nosignature > $TMPFILE
|
||||
|
||||
while read INSTALLED_PACKAGE
|
||||
do
|
||||
echo $PKGS_TO_CHECK | grep -q "${INSTALLED_PACKAGE}"
|
||||
if [ $? -eq 0 ]; then
|
||||
PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^${INSTALLED_PACKAGE} //"`
|
||||
PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/ ${INSTALLED_PACKAGE} / /"`
|
||||
PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/ ${INSTALLED_PACKAGE}\$//"`
|
||||
PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^${INSTALLED_PACKAGE}\$//"`
|
||||
fi
|
||||
done < $TMPFILE
|
||||
|
||||
# Strip leading spaces. Don't want isomething like ' ' to trigger a failure
|
||||
PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^[ ]*//"`
|
||||
if [ -z "$PKGS_TO_CHECK" ]; then
|
||||
>&2 echo "All explicitly specified packages resolved!"
|
||||
else
|
||||
>&2 echo "Could not resolve packages: $PKGS_TO_CHECK"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
ATTEMPTED=0
|
||||
DISCOVERED=0
|
||||
OUTPUT_DIR=$MY_WORKSPACE/export
|
||||
TMP_DIR=$MY_WORKSPACE/tmp
|
||||
YUM=$OUTPUT_DIR/yum.conf
|
||||
DEPLISTFILE=$OUTPUT_DIR/deps.txt
|
||||
DEPLISTFILE_NEW=$OUTPUT_DIR/deps_new.txt
|
||||
DEPDETAILLISTFILE=$OUTPUT_DIR/deps_detail.txt
|
||||
|
||||
BUILT_REPORT=$OUTPUT_DIR/local.txt
|
||||
WARNINGS_REPORT=$OUTPUT_DIR/warnings.txt
|
||||
LAST_TEST=$OUTPUT_DIR/last_test.txt
|
||||
TMPFILE=$OUTPUT_DIR/cgts_deps_tmp.txt
|
||||
TMPFILE1=$OUTPUT_DIR/cgts_deps_tmp1.txt
|
||||
TMPFILE2=$OUTPUT_DIR/cgts_deps_tmp2.txt
|
||||
|
||||
touch "$BUILT_REPORT"
|
||||
touch "$WARNINGS_REPORT"
|
||||
|
||||
for i in "$@"
|
||||
do
|
||||
case $i in
|
||||
-d=*|--deps=*)
|
||||
DEPS="${i#*=}"
|
||||
shift # past argument=value
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
mkdir -p $TMP_DIR
|
||||
|
||||
rm -f "$DEPDETAILLISTFILE"
|
||||
# FIRST PASS we are being given a list of REQUIRED dependencies
|
||||
if [ "${DEPS}x" != "x" ]; then
|
||||
cat $DEPS | grep -v "^#" | sed '/^\s*$/d' > $DEPLISTFILE
|
||||
install_deps $DEPLISTFILE
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# check that we resolved them all
|
||||
check_all_explicit_deps_installed
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error -- could not install all explicitly listed packages"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ALL_RESOLVED=0
|
||||
|
||||
while [ $ALL_RESOLVED -eq 0 ]; do
|
||||
cp $DEPLISTFILE $DEPLISTFILE.old
|
||||
generate_dep_list
|
||||
if [ ! -s $DEPLISTFILE ]; then
|
||||
# no more dependencies!
|
||||
ALL_RESOLVED=1
|
||||
else
|
||||
DIFFLINES=`diff $DEPLISTFILE.old $DEPLISTFILE | wc -l`
|
||||
if [ $DIFFLINES -eq 0 ]; then
|
||||
>&2 echo "Warning: Infinite loop detected in dependency resolution. See $DEPLISTFILE for details -- exiting"
|
||||
>&2 echo "These RPMS had problems (likely version conflicts)"
|
||||
>&2 cat $DEPLISTFILE
|
||||
|
||||
echo "Warning: Infinite loop detected in dependency resolution See $DEPLISTFILE for details -- exiting" >> $WARNINGS_REPORT
|
||||
echo "These RPMS had problems (likely version conflicts)" >> $WARNINGS_REPORT
|
||||
cat $DEPLISTFILE >> $WARNINGS_REPORT
|
||||
|
||||
date > $LAST_TEST
|
||||
|
||||
rm -f $DEPLISTFILE.old
|
||||
exit 1 # nothing fixed
|
||||
fi
|
||||
install_deps $DEPLISTFILE
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
exit 0
|
Binary file not shown.
@ -1,122 +0,0 @@
|
||||
#!/usr/bin/perl
|
||||
|
||||
# Copy/pasted from http://www.smorgasbork.com/content/gather_packages.txt
|
||||
# As referenced by http://www.smorgasbork.com/2012/01/04/building-a-custom-centos-7-kickstart-disc-part-2/
|
||||
|
||||
use XML::Simple;
|
||||
|
||||
my ($comps_file, $rpm_src_path, $rpm_dst_path, $arch, @extra_groups_and_packages) = @ARGV;
|
||||
|
||||
if (!-e $comps_file)
|
||||
{
|
||||
print_usage ("Can't find '$comps_file'");
|
||||
}
|
||||
if (!-e $rpm_src_path)
|
||||
{
|
||||
print_usage ("RPM source path '$rpm_src_path' does not exist");
|
||||
}
|
||||
if (!-e $rpm_dst_path)
|
||||
{
|
||||
print_usage ("RPM destination path '$rpm_dst_path' does not exist");
|
||||
}
|
||||
if (!$arch)
|
||||
{
|
||||
print_usage ("Architecture not specified");
|
||||
}
|
||||
|
||||
#### we always gather core and base; note that for CentOS 7, we also need
|
||||
#### to include the grub2 package, or installation will fail
|
||||
@desired_groups = ('core', 'base', 'grub2');
|
||||
foreach (@extra_groups_and_packages)
|
||||
{
|
||||
push (@desired_groups, $_);
|
||||
}
|
||||
|
||||
$regex = '^(' . join ('|', @desired_groups) . ')$';
|
||||
|
||||
print "reading $comps_file...\n";
|
||||
print "getting RPMs from $rpm_src_path...\n";
|
||||
|
||||
$xml = new XML::Simple;
|
||||
$comps = $xml->XMLin($comps_file);
|
||||
|
||||
$cmd = "rm $rpm_dst_path/*";
|
||||
print "$cmd\n";
|
||||
`$cmd`;
|
||||
|
||||
%copied_groups = {};
|
||||
%copied_packages = {};
|
||||
|
||||
foreach $group (@{$comps->{group}})
|
||||
{
|
||||
$id = $group->{id};
|
||||
if ($id !~ m#$regex#)
|
||||
{
|
||||
next;
|
||||
}
|
||||
|
||||
print "#### group \@$id\n";
|
||||
$packagelist = $group->{packagelist};
|
||||
foreach $pr (@{$packagelist->{packagereq}})
|
||||
{
|
||||
if ($pr->{type} eq 'optional' || $pr->{type} eq 'conditional')
|
||||
{
|
||||
next;
|
||||
}
|
||||
|
||||
$cmd = "cp $rpm_src_path/" . $pr->{content} . "-[0-9]*.$arch.rpm"
|
||||
. " $rpm_src_path/" . $pr->{content} . "-[0-9]*.noarch.rpm $rpm_dst_path";
|
||||
print "$cmd\n";
|
||||
`$cmd 2>&1`;
|
||||
|
||||
$copied_packages{$pr->{content}} = 1;
|
||||
}
|
||||
|
||||
$copied_groups{$group} = 1;
|
||||
}
|
||||
|
||||
#### assume that any strings that weren't matched in the comps file's group list
|
||||
#### are actually packages
|
||||
|
||||
foreach $group (@desired_groups)
|
||||
{
|
||||
if ($copied_groups{$group})
|
||||
{
|
||||
next;
|
||||
}
|
||||
|
||||
$cmd = "cp $rpm_src_path/" . $group . "-[0-9]*.$arch.rpm"
|
||||
. " $rpm_src_path/" . $group . "-[0-9]*.noarch.rpm $rpm_dst_path";
|
||||
print "$cmd\n";
|
||||
`$cmd 2>&1`;
|
||||
}
|
||||
|
||||
sub print_usage
|
||||
{
|
||||
my ($msg) = @_;
|
||||
|
||||
($msg) && print "$msg\n\n";
|
||||
|
||||
print <<__TEXT__;
|
||||
|
||||
parse_comps.pl comps_file rpm_src_path arch [xtra_grps_and_pkgs]
|
||||
|
||||
comps_file the full path to the comps.xml file (as provided
|
||||
in the original distro
|
||||
|
||||
rpm_src_path the full path to the directory of all RPMs from
|
||||
the distro
|
||||
|
||||
rpm_dst_path the full path to the directory where you want
|
||||
to save the RPMs for your kickstart
|
||||
|
||||
arch the target system architecture (e.g. x86_64)
|
||||
|
||||
xtra_grps_and_pkgs a list of extra groups and packages, separated by spaces
|
||||
|
||||
|
||||
__TEXT__
|
||||
|
||||
exit;
|
||||
}
|
||||
|
@ -1,6 +0,0 @@
|
||||
# The following packages will not be included in the customer ISO
|
||||
#
|
||||
# They are exceptional packages only to be included in developer builds
|
||||
enable-dev-patch
|
||||
fio
|
||||
dstat
|
@ -1,84 +0,0 @@
|
||||
# List of packages to be included/installed in ISO
|
||||
# If these have dependencies, they will be pulled in automatically
|
||||
#
|
||||
acpid
|
||||
gdb
|
||||
python2-gunicorn
|
||||
iperf3
|
||||
isomd5sum
|
||||
python2-aodhclient
|
||||
python2-oslo-log
|
||||
python2-six
|
||||
python-d2to1
|
||||
hiera
|
||||
python2-pecan
|
||||
python-configobj
|
||||
python-pep8
|
||||
python2-rsa
|
||||
ruby-shadow
|
||||
swig
|
||||
syslinux
|
||||
iotop
|
||||
linuxptp
|
||||
procps-ng
|
||||
python-daemon
|
||||
python-pyudev
|
||||
curl
|
||||
lvm2
|
||||
time
|
||||
postgresql
|
||||
postgresql-server
|
||||
postgresql-contrib
|
||||
targetcli
|
||||
strace
|
||||
wget
|
||||
bind-utils
|
||||
selinux-policy
|
||||
pm-utils
|
||||
tcpdump
|
||||
sysstat
|
||||
smartmontools
|
||||
collectd
|
||||
puppet-collectd
|
||||
socat
|
||||
attr
|
||||
|
||||
# for realtime kernel
|
||||
rtctl
|
||||
rt-setup
|
||||
|
||||
# For low-latency compute
|
||||
OVMF
|
||||
|
||||
# neutron bgp
|
||||
python2-pankoclient
|
||||
|
||||
# ima plugin for RPM
|
||||
ntfs-3g
|
||||
ntfsprogs
|
||||
python-memcached
|
||||
python2-coverage
|
||||
|
||||
# kubernetes packages
|
||||
docker-ce
|
||||
etcd
|
||||
docker-forward-journald
|
||||
|
||||
# Add debugging tools
|
||||
zip
|
||||
unzip
|
||||
traceroute
|
||||
|
||||
# support for persistent sessions
|
||||
screen
|
||||
|
||||
# For kata container
|
||||
kata-runtime
|
||||
|
||||
# For nvme disk firmware update
|
||||
nvme-cli
|
||||
|
||||
# Add openscap tools
|
||||
openscap
|
||||
openscap-scanner
|
||||
scap-security-guide
|
@ -1,125 +0,0 @@
|
||||
default vesamenu.c32
|
||||
timeout 600
|
||||
|
||||
display boot.msg
|
||||
|
||||
# Clear the screen when exiting the menu, instead of leaving the menu displayed.
|
||||
# For vesamenu, this means the graphical background is still displayed without
|
||||
# the menu itself for as long as the screen remains in graphics mode.
|
||||
menu clear
|
||||
menu background splash.png
|
||||
menu title CentOS 7
|
||||
menu vshift 8
|
||||
menu rows 18
|
||||
menu margin 8
|
||||
#menu hidden
|
||||
menu helpmsgrow 15
|
||||
menu tabmsgrow 13
|
||||
|
||||
# Border Area
|
||||
menu color border * #00000000 #00000000 none
|
||||
|
||||
# Selected item
|
||||
menu color sel 0 #ffffffff #00000000 none
|
||||
|
||||
# Title bar
|
||||
menu color title 0 #ff7ba3d0 #00000000 none
|
||||
|
||||
# Press [Tab] message
|
||||
menu color tabmsg 0 #ff3a6496 #00000000 none
|
||||
|
||||
# Unselected menu item
|
||||
menu color unsel 0 #84b8ffff #00000000 none
|
||||
|
||||
# Selected hotkey
|
||||
menu color hotsel 0 #84b8ffff #00000000 none
|
||||
|
||||
# Unselected hotkey
|
||||
menu color hotkey 0 #ffffffff #00000000 none
|
||||
|
||||
# Help text
|
||||
menu color help 0 #ffffffff #00000000 none
|
||||
|
||||
# A scrollbar of some type? Not sure.
|
||||
menu color scrollbar 0 #ffffffff #ff355594 none
|
||||
|
||||
# Timeout msg
|
||||
menu color timeout 0 #ffffffff #00000000 none
|
||||
menu color timeout_msg 0 #ffffffff #00000000 none
|
||||
|
||||
# Command prompt text
|
||||
menu color cmdmark 0 #84b8ffff #00000000 none
|
||||
menu color cmdline 0 #ffffffff #00000000 none
|
||||
|
||||
# Do not display the actual menu unless the user presses a key. All that is displayed is a timeout message.
|
||||
|
||||
menu tabmsg Press Tab for full configuration options on menu items.
|
||||
|
||||
menu separator # insert an empty line
|
||||
menu separator # insert an empty line
|
||||
|
||||
label tis
|
||||
menu label ^Install Titanium Cloud
|
||||
menu default
|
||||
kernel vmlinuz
|
||||
append initrd=initrd.img inst.ks=cdrom:/dev/cdrom:/ks/ks.cfg
|
||||
|
||||
label linux
|
||||
menu label ^Install CentOS 7
|
||||
kernel vmlinuz
|
||||
append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 quiet
|
||||
|
||||
label check
|
||||
menu label Test this ^media & install CentOS 7
|
||||
kernel vmlinuz
|
||||
append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 rd.live.check quiet
|
||||
|
||||
menu separator # insert an empty line
|
||||
|
||||
# utilities submenu
|
||||
menu begin ^Troubleshooting
|
||||
menu title Troubleshooting
|
||||
|
||||
label vesa
|
||||
menu indent count 5
|
||||
menu label Install CentOS 7 in ^basic graphics mode
|
||||
text help
|
||||
Try this option out if you're having trouble installing
|
||||
CentOS 7.
|
||||
endtext
|
||||
kernel vmlinuz
|
||||
append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 xdriver=vesa nomodeset quiet
|
||||
|
||||
label rescue
|
||||
menu indent count 5
|
||||
menu label ^Rescue a CentOS system
|
||||
text help
|
||||
If the system will not boot, this lets you access files
|
||||
and edit config files to try to get it booting again.
|
||||
endtext
|
||||
kernel vmlinuz
|
||||
append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 rescue quiet
|
||||
|
||||
label memtest
|
||||
menu label Run a ^memory test
|
||||
text help
|
||||
If your system is having issues, a problem with your
|
||||
system's memory may be the cause. Use this utility to
|
||||
see if the memory is working correctly.
|
||||
endtext
|
||||
kernel memtest
|
||||
|
||||
menu separator # insert an empty line
|
||||
|
||||
label local
|
||||
menu label Boot from ^local drive
|
||||
localboot 0xffff
|
||||
|
||||
menu separator # insert an empty line
|
||||
menu separator # insert an empty line
|
||||
|
||||
label returntomain
|
||||
menu label Return to ^main menu
|
||||
menu exit
|
||||
|
||||
menu end
|
@ -1,36 +0,0 @@
|
||||
install
|
||||
text
|
||||
lang en_US.UTF-8
|
||||
keyboard us
|
||||
reboot --eject
|
||||
firstboot --enable
|
||||
auth --enableshadow --passalgo=sha512
|
||||
|
||||
# Network information
|
||||
network --bootproto=dhcp --device=enp0s3 --onboot=on --ipv6=auto --activate
|
||||
network --bootproto=static --device=enp0s8 --ip=10.10.10.12 --netmask=255.255.255.0 --ipv6=auto --activate
|
||||
network --device=lo --hostname=localhost.localdomain
|
||||
|
||||
rootpw --lock
|
||||
timezone America/New_York --isUtc
|
||||
user --groups=wheel --name=sysadmin --password=$6$c3gaCcJlh.rp//Yx$/mIjNNoUDS1qZldBL29YSJdsA9ttPA/nXN1CPsIcCmionXC22APT3IoRSd9j5dPiZoviDdQf7YxLsOYdieOQr/ --iscrypted --gecos="sysadmin"
|
||||
|
||||
# System bootloader configuration
|
||||
#bootloader --location=mbr --boot-drive=sda
|
||||
|
||||
autopart --type=lvm
|
||||
# Partition clearing information
|
||||
clearpart --all --initlabel --drives=sda
|
||||
|
||||
cdrom
|
||||
#repo --name=base --baseurl=http://mirror.cogentco.com/pub/linux/centos/7/os/x86_64/
|
||||
#url --url="http://mirror.cogentco.com/pub/linux/centos/7/os/x86_64/"
|
||||
|
||||
%packages --nobase --ignoremissing
|
||||
@^minimal
|
||||
@core
|
||||
kexec-tools
|
||||
net-tools
|
||||
# CGTS packages
|
||||
# end CGTS packages
|
||||
%end
|
@ -1,256 +0,0 @@
|
||||
acl
|
||||
alsa-lib
|
||||
audit
|
||||
audit-libs
|
||||
authconfig
|
||||
basesystem
|
||||
bind-libs-lite
|
||||
bind-license
|
||||
binutils
|
||||
biosdevname
|
||||
btrfs-progs
|
||||
bzip2-libs
|
||||
ca-certificates
|
||||
centos-logos
|
||||
chkconfig
|
||||
coreutils
|
||||
cpio
|
||||
cracklib
|
||||
cracklib-dicts
|
||||
cronie
|
||||
cronie-anacron
|
||||
crontabs
|
||||
cryptsetup
|
||||
cryptsetup-libs
|
||||
curl
|
||||
cyrus-sasl-lib
|
||||
dbus
|
||||
dbus-glib
|
||||
dbus-libs
|
||||
dbus-python
|
||||
device-mapper
|
||||
device-mapper-event
|
||||
device-mapper-event-libs
|
||||
device-mapper-libs
|
||||
device-mapper-multipath
|
||||
device-mapper-multipath-libs
|
||||
device-mapper-persistent-data
|
||||
diffutils
|
||||
dmidecode
|
||||
dosfstools
|
||||
dracut
|
||||
dracut-config-rescue
|
||||
dracut-network
|
||||
e2fsprogs
|
||||
e2fsprogs-libs
|
||||
efibootmgr
|
||||
efivar-libs
|
||||
elfutils-libelf
|
||||
elfutils-libs
|
||||
ethtool
|
||||
expat
|
||||
file
|
||||
file-libs
|
||||
filesystem
|
||||
findutils
|
||||
fipscheck
|
||||
fipscheck-lib
|
||||
firewalld
|
||||
freetype
|
||||
gawk
|
||||
gdbm
|
||||
gettext
|
||||
gettext-libs
|
||||
glib2
|
||||
glibc
|
||||
glibc-common
|
||||
glib-networking
|
||||
gmp
|
||||
gnupg2
|
||||
gnutls
|
||||
gobject-introspection
|
||||
gpgme
|
||||
grep
|
||||
groff-base
|
||||
grub2
|
||||
grub2-efi-x64
|
||||
grub2-tools
|
||||
grubby
|
||||
gsettings-desktop-schemas
|
||||
gzip
|
||||
hardlink
|
||||
hostname
|
||||
hwdata
|
||||
info
|
||||
iproute
|
||||
iprutils
|
||||
iptables-ebtables
|
||||
iputils
|
||||
jansson
|
||||
json-c
|
||||
kbd
|
||||
kbd-legacy
|
||||
kbd-misc
|
||||
kernel-tools
|
||||
kernel-tools-libs
|
||||
kexec-tools
|
||||
keyutils-libs
|
||||
kmod
|
||||
kmod-libs
|
||||
kpartx
|
||||
krb5-libs
|
||||
less
|
||||
libacl
|
||||
libaio
|
||||
libassuan
|
||||
libattr
|
||||
libblkid
|
||||
libcap
|
||||
libcap-ng
|
||||
libcom_err
|
||||
libconfig
|
||||
libcroco
|
||||
libcurl
|
||||
libdaemon
|
||||
libdb
|
||||
libdb-utils
|
||||
libdrm
|
||||
libedit
|
||||
libestr
|
||||
libffi
|
||||
libgcc
|
||||
libgcrypt
|
||||
libgomp
|
||||
libgpg-error
|
||||
libgudev1
|
||||
libidn
|
||||
libmnl
|
||||
libmodman
|
||||
libmount
|
||||
libndp
|
||||
libnetfilter_conntrack
|
||||
libnfnetlink
|
||||
libnl
|
||||
libnl3
|
||||
libnl3-cli
|
||||
libpcap
|
||||
libpciaccess
|
||||
libpipeline
|
||||
libproxy
|
||||
libpwquality
|
||||
libreport-filesystem
|
||||
libselinux
|
||||
libselinux-python
|
||||
libselinux-utils
|
||||
libsemanage
|
||||
libsepol
|
||||
libss
|
||||
libssh2
|
||||
libstdc++
|
||||
libsysfs
|
||||
libtasn1
|
||||
libteam
|
||||
libunistring
|
||||
libuser
|
||||
libutempter
|
||||
libuuid
|
||||
libverto
|
||||
libxml2
|
||||
libxslt
|
||||
linux-firmware
|
||||
lldpad
|
||||
lsscsi
|
||||
lua
|
||||
lvm2
|
||||
lvm2-libs
|
||||
lzo
|
||||
make
|
||||
man-db
|
||||
mariadb-libs
|
||||
mdadm
|
||||
microcode_ctl
|
||||
mokutil
|
||||
mozjs17
|
||||
ncurses
|
||||
ncurses-base
|
||||
ncurses-libs
|
||||
nettle
|
||||
newt
|
||||
newt-python
|
||||
nspr
|
||||
nss
|
||||
nss-softokn
|
||||
nss-softokn-freebl
|
||||
nss-sysinit
|
||||
nss-tools
|
||||
nss-util
|
||||
numactl-libs
|
||||
openscap
|
||||
openscap-scanner
|
||||
openssl
|
||||
openssl-libs
|
||||
os-prober
|
||||
p11-kit
|
||||
p11-kit-trust
|
||||
passwd
|
||||
pciutils-libs
|
||||
pcre
|
||||
pinentry
|
||||
pkgconfig
|
||||
policycoreutils
|
||||
popt
|
||||
procps-ng
|
||||
pth
|
||||
python-gobject-base
|
||||
pygpgme
|
||||
pyliblzma
|
||||
python
|
||||
python-backports
|
||||
python-backports-ssl_match_hostname
|
||||
python-configobj
|
||||
python-decorator
|
||||
python-iniparse
|
||||
python-libs
|
||||
python-perf
|
||||
python-pycurl
|
||||
python-pyudev
|
||||
python2-setuptools
|
||||
python-slip
|
||||
python-slip-dbus
|
||||
python-urlgrabber
|
||||
pyxattr
|
||||
qrencode-libs
|
||||
readline
|
||||
rootfiles
|
||||
rpm
|
||||
rpm-build-libs
|
||||
rpm-libs
|
||||
rpm-python
|
||||
sed
|
||||
shared-mime-info
|
||||
shim-x64
|
||||
slang
|
||||
snappy
|
||||
sqlite
|
||||
systemd
|
||||
systemd-libs
|
||||
systemd-sysv
|
||||
sysvinit-tools
|
||||
tar
|
||||
tcp_wrappers-libs
|
||||
teamd
|
||||
time
|
||||
trousers
|
||||
tzdata
|
||||
ustr
|
||||
util-linux
|
||||
virt-what
|
||||
which
|
||||
xfsprogs
|
||||
xml-common
|
||||
xz
|
||||
xz-libs
|
||||
zlib
|
||||
lksctp-tools
|
||||
boost-thread
|
||||
boost-system
|
@ -1,2 +0,0 @@
|
||||
# Files copied in from /import/mirrors/CentOS/7.2.1511/cloud/x86_64/openstack-kilo
|
||||
|
@ -1,112 +0,0 @@
|
||||
This document describes how to generate a DVD image (.iso) which installs
|
||||
a minimal CentOS installation where the entirety of the installed system is
|
||||
build from the provided source.
|
||||
|
||||
There are three parts to this document:
|
||||
How to build binary RPMs from source RPMS
|
||||
How to build the install disk from the binary RPMS
|
||||
How to install the minimal system
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
How to build the binary RPMs from the source RPMS
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
(note - building the binary RPMs is expected to take a long time, ~ 15 hours
|
||||
on a typical system)
|
||||
|
||||
The source RPMs in the "srcs" subdirectory are compiled in an environment
|
||||
called "mock" which builds each package in a chroot jail to ensure the output
|
||||
is not influenced by the build system. Mock is controlled by a config file.
|
||||
The example srcs/build.cfg is provided as a starting point, however it does
|
||||
to be adjusted for your build environment. In particular, the paths and repo
|
||||
locations need to be configured for your system. It is highly recommended that
|
||||
a local mirror of the CentOS repos be used for speed. The example config file
|
||||
is configured to use an localhost http mirror of the CentOS repos.
|
||||
|
||||
To build the binary RPMs from the source RPMs change to the "srcs" subdirectory
|
||||
and execute the "build.sh" script.
|
||||
|
||||
# cd srcs
|
||||
# ./build.sh
|
||||
|
||||
This will use build.cfg and mock to compile every source RPM listed in list.txt.
|
||||
The output binary RPMs will be in srcs/results. There will also be success.txt
|
||||
and fail.txt files which list any RPMs that failed to build. Debugging why RPMs
|
||||
fail to build is beyond the scope of this document, however be aware that RPMs
|
||||
often fail in the "check" phase of the build (i.e. the package compiled fine
|
||||
but tests failed). Notably, the python package may fail due to a "test_nis"
|
||||
failure, and the "attr" and "e2fsprogs" packages may or may not fail depending
|
||||
on the host file system used for compilation. These failures may or may not be
|
||||
false positives (for example, the mock environment does not have NIS configured
|
||||
which is why python's test_nis reports a failure -- the code is actually fine,
|
||||
we just can't run the test in the mock environment).
|
||||
|
||||
To disable the check phase, add the line
|
||||
|
||||
config_opts['rpmbuild_opts'] = '--nocheck'
|
||||
|
||||
to build.cfg. You can then run build.sh again with list.txt containing
|
||||
packages which failed:
|
||||
|
||||
# cp list.txt list.txt.orig
|
||||
# cp fail.txt list.txt
|
||||
# ./build.sh
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
How to build the install disk from the binary RPMS
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
Once srcs/results is populated with binary RPMs, an installation disk can be
|
||||
built. Edit the yum.conf file and place an (arbitrary) path for yum log and
|
||||
cache locations, and make sure that the repository path points to srcs/results.
|
||||
Run the build_centos.sh script to build the installation DVD:
|
||||
|
||||
# ./build_centos.sh
|
||||
|
||||
Scroll up the output to the top of the "Spawning worker" messages. You should
|
||||
observe a line indicating that there are no remaining unresolved dependencies:
|
||||
|
||||
...
|
||||
Installing PKG=dhcp-common PKG_FILE=dhcp-common-4.2.5-42.el7.centos.tis.1.x86_64.rpm PKG_REL_PATH=dhcp-common-4.2.5-42.el7.centos.tis.1.x86_64.rpm PKG_PATH=/localdisk/loadbuild/jmckenna/centos/srcs/results/dhcp-common-4.2.5-42.el7.centos.tis.1.x86_64.rpm from repo local-std
|
||||
dhcp-common
|
||||
Debug: Packages still unresolved:
|
||||
|
||||
Spawning worker 0 with 4 pkgs
|
||||
Spawning worker 1 with 4 pkgs
|
||||
Spawning worker 2 with 4 pkgs
|
||||
...
|
||||
|
||||
This is your confirmation that all required pacakges were found and installed
|
||||
on the ISO. You should also now see a new file called "centosIso.iso":
|
||||
|
||||
# ls -l centosIso.iso
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
How to install the minimal system
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
The centosIso.iso file can be burned to a DVD or booted in a virtual
|
||||
environment. It is configured to self-install on boot. After installation,
|
||||
a user with sudo access must be created manually. The system can then be
|
||||
booted.
|
||||
|
||||
Power the system on with the DVD inserted. A system install will take place
|
||||
(takes approximately 2 minutes). The system will then report an error and
|
||||
ask you if you wish to report a bug, debug, or quit. Hit control-alt-F2 to
|
||||
switch to a terminal window. Enter the following commands to change to the
|
||||
installed system root, and create a (sysadmin) with sudo access:
|
||||
|
||||
cd /mnt/sysimage
|
||||
chroot .
|
||||
groupadd -r wrs
|
||||
groupadd -f -g 345 sys_protected
|
||||
useradd -m -g wrs -G root,sys_protected,wheel -d /home/sysadmin -p cBglipPpsKwBQ -s /bin/sh sysadmin
|
||||
exit
|
||||
|
||||
Change back to the main window with control-alt-F1.
|
||||
Hit 3 <enter> (the "Quit" option). The system will reboot (make sure you eject
|
||||
the DVD or use your BIOS to boot from hard disk rather than DVD; the installer
|
||||
will re-run if the DVD boots again).
|
||||
|
||||
You can log into the system as user "sysadmin" with password "sysadmin".
|
||||
|
@ -1,5 +0,0 @@
|
||||
The files in this directory are to be used as described at
|
||||
http://twiki.wrs.com/PBUeng/DeliveryExtras#Minimal_CentOS_install
|
||||
|
||||
They include the scripts (and customer README) for building a minimual
|
||||
CentOS ISO from our modified sources.
|
@ -1,108 +0,0 @@
|
||||
config_opts['root'] = 'jmckenna-centos/mock'
|
||||
config_opts['target_arch'] = 'x86_64'
|
||||
config_opts['legal_host_arches'] = ('x86_64',)
|
||||
config_opts['chroot_setup_cmd'] = 'install @buildsys-build'
|
||||
config_opts['dist'] = 'el7' # only useful for --resultdir variable subst
|
||||
config_opts['releasever'] = '7'
|
||||
|
||||
config_opts['yum.conf'] = """
|
||||
[main]
|
||||
keepcache=1
|
||||
debuglevel=2
|
||||
reposdir=/dev/null
|
||||
logfile=/var/log/yum.log
|
||||
retries=20
|
||||
obsoletes=1
|
||||
gpgcheck=0
|
||||
assumeyes=1
|
||||
syslog_ident=mock
|
||||
syslog_device=
|
||||
|
||||
# repos
|
||||
[my-build]
|
||||
name=my-build
|
||||
baseurl=http://127.0.0.1:8088/localdisk/loadbuild/centos/src/results
|
||||
enabled=1
|
||||
skip_if_unavailable=1
|
||||
metadata_expire=0
|
||||
|
||||
[base]
|
||||
name=CentOS-$releasever - Base
|
||||
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra
|
||||
#baseurl=http://mirror.centos.org/centos/$releasever/os/$basearch/
|
||||
baseurl=http://127.0.0.1:8088/CentOS/7.2.1511/os/$basearch/
|
||||
gpgcheck=1
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
|
||||
|
||||
#released updates
|
||||
[updates]
|
||||
name=CentOS-$releasever - Updates
|
||||
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra
|
||||
#baseurl=http://mirror.centos.org/centos/$releasever/updates/$basearch/
|
||||
baseurl=http://127.0.0.1:8088/CentOS/7.2.1511/updates/$basearch/
|
||||
gpgcheck=1
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
|
||||
|
||||
#additional packages that may be useful
|
||||
[extras]
|
||||
name=CentOS-$releasever - Extras
|
||||
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras&infra=$infra
|
||||
#baseurl=http://mirror.centos.org/centos/$releasever/extras/$basearch/
|
||||
baseurl=http://127.0.0.1:8088/CentOS/7.2.1511/extras/$basearch/
|
||||
gpgcheck=1
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
|
||||
|
||||
#additional packages that extend functionality of existing packages
|
||||
[centosplus]
|
||||
name=CentOS-$releasever - Plus
|
||||
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus&infra=$infra
|
||||
#baseurl=http://mirror.centos.org/centos/$releasever/centosplus/$basearch/
|
||||
baseurl=http://127.0.0.1:8088/CentOS/7.2.1511/centosplus/$basearch/
|
||||
gpgcheck=1
|
||||
enabled=1
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
|
||||
|
||||
[epel]
|
||||
name=Extra Packages for Enterprise Linux 7 - $basearch
|
||||
baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch
|
||||
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
|
||||
failovermethod=priority
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
|
||||
|
||||
[epel-debuginfo]
|
||||
name=Extra Packages for Enterprise Linux 7 - $basearch - Debug
|
||||
baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch/debug
|
||||
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-7&arch=$basearch
|
||||
failovermethod=priority
|
||||
enabled=0
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
|
||||
gpgcheck=1
|
||||
|
||||
[epel-source]
|
||||
name=Extra Packages for Enterprise Linux 7 - $basearch - Source
|
||||
baseurl=http://download.fedoraproject.org/pub/epel/7/SRPMS
|
||||
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-7&arch=$basearch
|
||||
failovermethod=priority
|
||||
enabled=1
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
|
||||
gpgcheck=1
|
||||
|
||||
|
||||
"""
|
||||
config_opts['environment']['BUILD_BY'] = 'jmckenna'
|
||||
config_opts['environment']['BUILD_DATE'] = '2016-10-31 14:27:28 -0400'
|
||||
config_opts['environment']['REPO'] = '/localdisk/designer/jmckenna/dev0019/cgcs-root'
|
||||
config_opts['environment']['WRS_GIT_BRANCH'] = 'CGCS_DEV_0019'
|
||||
config_opts['environment']['CGCS_GIT_BRANCH'] = 'CGCS_DEV_0019'
|
||||
config_opts['macros']['%_no_cgcs_license_check'] = '1'
|
||||
config_opts['macros']['%_tis_build_type'] = 'std'
|
||||
config_opts['chroot_setup_cmd'] = 'install @buildsys-build pigz lbzip2 yum shadow-utils rpm-build lbzip2 gcc glibc-headers make gcc-c++ java-devel'
|
||||
config_opts['macros']['%__gzip'] = '/usr/bin/pigz'
|
||||
config_opts['macros']['%__bzip2'] = '/usr/bin/lbzip2'
|
||||
config_opts['macros']['%_patch_confdir'] = '%{_sysconfdir}/patching'
|
||||
config_opts['macros']['%_patch_scripts'] = '%{_patch_confdir}/patch-scripts'
|
||||
config_opts['macros']['%_runtime_patch_scripts'] = '/run/patching/patch-scripts'
|
||||
config_opts['macros']['%_tis_dist'] = '.tis'
|
||||
#config_opts['rpmbuild_opts'] = '--nocheck'
|
@ -1,64 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
CREATEREPO=$(which createrepo_c)
|
||||
if [ $? -ne 0 ]; then
|
||||
CREATEREPO="createrepo"
|
||||
fi
|
||||
|
||||
# For backward compatibility. Old repo location or new?
|
||||
CENTOS_REPO=${MY_REPO}/centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
echo "ERROR: directory ${MY_REPO}/centos-repo not found."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
LOCAL_REPO=${MY_REPO}/local-repo
|
||||
if [ ! -d ${LOCAL_REPO} ]; then
|
||||
LOCAL_REPO=${MY_REPO}/cgcs-tis-repo
|
||||
if [ ! -d ${LOCAL_REPO} ]; then
|
||||
# This one isn't fatal, LOCAL_REPO is not required
|
||||
LOCAL_REPO=${MY_REPO}/local-repo
|
||||
fi
|
||||
fi
|
||||
|
||||
# If a file listed in list.txt is missing, this function attempts to find the
|
||||
# RPM and copy it to the local directory. This should not be required normally
|
||||
# and is only used when collecting the source RPMs initially.
|
||||
function findSrc {
|
||||
local lookingFor=$1
|
||||
find ${CENTOS_REPO}/Source -name $lookingFor | xargs -I '{}' cp '{}' .
|
||||
find ${LOCAL_REPO}/Source -name $lookingFor | xargs -I '{}' cp '{}' .
|
||||
find $MY_WORKSPACE/std/rpmbuild/SRPMS -name $lookingFor | xargs -I '{}' cp '{}' .
|
||||
}
|
||||
|
||||
rm -f success.txt
|
||||
rm -f fail.txt
|
||||
rm -f missing.txt
|
||||
mkdir -p results
|
||||
infile=list.txt
|
||||
|
||||
while read p; do
|
||||
|
||||
if [ ! -f "$p" ]; then
|
||||
findSrc $p
|
||||
if [ ! -f "$p" ]; then
|
||||
echo "couldn't find" >> missing.txt
|
||||
echo "couldn't find $p" >> missing.txt
|
||||
continue
|
||||
fi
|
||||
echo "found $p"
|
||||
fi
|
||||
|
||||
mock -r build.cfg $p --resultdir=results --no-clean
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "$p" >> success.txt
|
||||
cd results
|
||||
$CREATEREPO .
|
||||
cd ..
|
||||
else
|
||||
echo "$p" >> fail.txt
|
||||
fi
|
||||
done < $infile
|
@ -1,62 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Build a basic CentOS system
|
||||
|
||||
CREATEREPO=$(which createrepo_c)
|
||||
if [ $? -ne 0 ]; then
|
||||
CREATEREPO="createrepo"
|
||||
fi
|
||||
|
||||
function final_touches {
|
||||
# create the repo
|
||||
cd ${ROOTDIR}/${DEST}/isolinux
|
||||
$CREATEREPO -g ../comps.xml .
|
||||
|
||||
# build the ISO
|
||||
printf "Building image $OUTPUT_FILE\n"
|
||||
cd ${ROOTDIR}/${DEST}
|
||||
chmod 664 isolinux/isolinux.bin
|
||||
mkisofs -o $OUTPUT_FILE \
|
||||
-R -D -A 'oe_iso_boot' -V 'oe_iso_boot' \
|
||||
-b isolinux.bin -c boot.cat -no-emul-boot \
|
||||
-boot-load-size 4 -boot-info-table \
|
||||
-eltorito-alt-boot \
|
||||
-e images/efiboot.img \
|
||||
-no-emul-boot \
|
||||
isolinux/
|
||||
|
||||
isohybrid --uefi $OUTPUT_FILE
|
||||
implantisomd5 $OUTPUT_FILE
|
||||
|
||||
cd $ROOTDIR
|
||||
}
|
||||
|
||||
function setup_disk {
|
||||
tar xJf emptyInstaller.tar.xz
|
||||
mkdir ${DEST}/isolinux/Packages
|
||||
}
|
||||
|
||||
function install_packages {
|
||||
cd ${DEST}/isolinux/Packages
|
||||
ROOT=${ROOTDIR} ../../../cgts_deps.sh --deps=../../../${MINIMAL}
|
||||
cd ${ROOTDIR}
|
||||
}
|
||||
|
||||
|
||||
ROOTDIR=$PWD
|
||||
INSTALLER_SRC=basicDisk
|
||||
DEST=newDisk
|
||||
PKGS_DIR=all_rpms
|
||||
MINIMAL=minimal_rpm_list.txt
|
||||
OUTPUT_FILE=${ROOTDIR}/centosIso.iso
|
||||
|
||||
# Make a basic install disk (no packages, at this point)
|
||||
rm -rf ${DEST}
|
||||
setup_disk
|
||||
|
||||
# install the packages (initially from minimal list, then resolve deps)
|
||||
install_packages
|
||||
|
||||
# build the .iso
|
||||
final_touches
|
||||
|
@ -1,265 +0,0 @@
|
||||
#!/bin/env bash
|
||||
|
||||
#
|
||||
# Copyright (c) 2018-2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
CGTS_DEPS_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
|
||||
|
||||
# Set REPOQUERY, REPOQUERY_SUB_COMMAND, REPOQUERY_RESOLVE and
|
||||
# REPOQUERY_WHATPROVIDES_DELIM for our build environment.
|
||||
source ${CGTS_DEPS_DIR}/../pkg-manager-utils.sh
|
||||
|
||||
function generate_dep_list {
|
||||
TMP_RPM_DB=$(mktemp -d $(pwd)/tmp_rpm_db_XXXXXX)
|
||||
mkdir -p $TMP_RPM_DB
|
||||
rpm --initdb --dbpath $TMP_RPM_DB
|
||||
rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' > $DEPLISTFILE_NEW 2>&1
|
||||
cat $DEPLISTFILE_NEW >> $DEPDETAILLISTFILE
|
||||
cat $DEPLISTFILE_NEW \
|
||||
| grep -v -e "error:" -e "warning:" -e "Preparing..." \
|
||||
-e "Verifying..." -e "installing package" \
|
||||
| sed -e "s/ is needed by.*$//" -e "s/ [<=>].*$//" \
|
||||
| sort -u > $DEPLISTFILE
|
||||
\rm -rf $TMP_RPM_DB
|
||||
}
|
||||
|
||||
join_array() {
|
||||
local IFS="$1"
|
||||
shift
|
||||
echo "$*"
|
||||
}
|
||||
|
||||
function install_deps {
|
||||
local DEP_LIST=""
|
||||
local DEP_LIST_ARRAY=()
|
||||
local DEP_LIST_FILE="$1"
|
||||
|
||||
rm -f $TMPFILE
|
||||
|
||||
while read DEP
|
||||
do
|
||||
DEP_LIST_ARRAY+=( "${DEP}" )
|
||||
done < $DEP_LIST_FILE
|
||||
|
||||
if [ "${REPOQUERY_WHATPROVIDES_DELIM}" != " " ]; then
|
||||
DEP_LIST_ARRAY=( "$(join_array "${REPOQUERY_WHATPROVIDES_DELIM}" "${DEP_LIST_ARRAY[@]}" )" )
|
||||
fi
|
||||
|
||||
echo "Debug: List of deps to resolve: ${DEP_LIST_ARRAY[@]}"
|
||||
|
||||
if [ ${#DEP_LIST_ARRAY[@]} -gt 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# go through each repo and convert deps to packages
|
||||
for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do
|
||||
echo "TMPDIR=${TMP_DIR}"\
|
||||
"${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\
|
||||
"${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
|
||||
"--qf='%{name}' --whatprovides ${DEP_LIST_ARRAY[@]}"
|
||||
TMPDIR=${TMP_DIR} \
|
||||
${REPOQUERY} --config=${YUM} --repoid=${REPOID} \
|
||||
${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \
|
||||
--qf='%{name}' --whatprovides ${DEP_LIST_ARRAY[@]} \
|
||||
| sed "s/kernel-debug/kernel/g" >> $TMPFILE
|
||||
\rm -rf $TMP_DIR/yum-$USER-*
|
||||
done
|
||||
sort $TMPFILE -u > $TMPFILE1
|
||||
rm $TMPFILE
|
||||
|
||||
DEP_LIST=""
|
||||
while read DEP
|
||||
do
|
||||
DEP_LIST+="${DEP} "
|
||||
done < $TMPFILE1
|
||||
rm $TMPFILE1
|
||||
|
||||
# next go through each repo and install packages
|
||||
local TARGETS="${DEP_LIST}"
|
||||
echo "Debug: Resolved list of deps to install: ${TARGETS}"
|
||||
local UNRESOLVED
|
||||
for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do
|
||||
UNRESOLVED=" $TARGETS "
|
||||
|
||||
if [[ ! -z "${TARGETS// }" ]]; then
|
||||
REPO_PATH=$(cat $YUM | sed -n "/^\[$REPOID\]\$/,\$p" | grep '^baseurl=' | head -n 1 | awk -F 'file://' '{print $2}' | sed 's:/$::')
|
||||
>&2 echo "TMPDIR=${TMP_DIR}"\
|
||||
"${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\
|
||||
"${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
|
||||
"--qf='%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}'"\
|
||||
"${REPOQUERY_RESOLVE} ${TARGETS}"
|
||||
TMPDIR=${TMP_DIR} \
|
||||
${REPOQUERY} --config=${YUM} --repoid=${REPOID} \
|
||||
${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \
|
||||
--qf="%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}" \
|
||||
${REPOQUERY_RESOLVE} ${TARGETS} \
|
||||
| sort -r -V >> $TMPFILE
|
||||
|
||||
\rm -rf $TMP_DIR/yum-$USER-*
|
||||
|
||||
while read STR
|
||||
do
|
||||
>&2 echo "STR=$STR"
|
||||
if [ "x$STR" == "x" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
PKG=`echo $STR | cut -d " " -f 1`
|
||||
PKG_FILE=`echo $STR | cut -d " " -f 2`
|
||||
PKG_REL_PATH=`echo $STR | cut -d " " -f 3`
|
||||
PKG_PATH="${REPO_PATH}/${PKG_REL_PATH}"
|
||||
|
||||
>&2 echo "Installing PKG=$PKG PKG_FILE=$PKG_FILE PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH from repo $REPOID"
|
||||
cp $PKG_PATH .
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo " Here's what I have to work with..."
|
||||
>&2 echo " TMPDIR=${TMP_DIR}"\
|
||||
"${REPOQUERY} -c ${YUM} --repoid=${REPOID}"\
|
||||
"${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
|
||||
"--qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\""\
|
||||
"${REPOQUERY_RESOLVE} ${PKG}"
|
||||
>&2 echo " PKG=$PKG PKG_FILE=$PKG_FILE REPO_PATH=$REPO_PATH PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH"
|
||||
fi
|
||||
|
||||
echo $UNRESOLVED | grep $PKG
|
||||
echo $UNRESOLVED | grep $PKG >> /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "$PKG found in $REPOID as $PKG" >> $BUILT_REPORT
|
||||
echo "$PKG_PATH" >> $BUILT_REPORT
|
||||
UNRESOLVED=$(echo "$UNRESOLVED" | sed "s# $PKG # #g")
|
||||
else
|
||||
echo "$PKG satisfies unknown target in $REPOID" >> $BUILT_REPORT
|
||||
echo " but it doesn't match targets, $UNRESOLVED" >> $BUILT_REPORT
|
||||
echo " path $PKG_PATH" >> $BUILT_REPORT
|
||||
FOUND_UNKNOWN=1
|
||||
fi
|
||||
done < $TMPFILE
|
||||
|
||||
\rm -rf $TMP_DIR/yum-$USER-*
|
||||
TARGETS="$UNRESOLVED"
|
||||
fi
|
||||
done
|
||||
>&2 echo "Debug: Packages still unresolved: $UNRESOLVED"
|
||||
echo "Debug: Packages still unresolved: $UNRESOLVED" >> $WARNINGS_REPORT
|
||||
echo "Debug: Packages still unresolved: $UNRESOLVED" >> $BUILT_REPORT
|
||||
>&2 echo ""
|
||||
}
|
||||
|
||||
function check_all_explicit_deps_installed {
|
||||
|
||||
PKGS_TO_CHECK=" "
|
||||
while read PKG_TO_ADD
|
||||
do
|
||||
PKGS_TO_CHECK="$PKGS_TO_CHECK ${PKG_TO_ADD}"
|
||||
done < $DEPLISTFILE
|
||||
rpm -qp ${INSTALLDIR}/*.rpm --qf="%{name}\n" > $TMPFILE
|
||||
|
||||
echo "checking... $PKGS_TO_CHECK vs ${INSTALLED_PACKAGE}"
|
||||
|
||||
while read INSTALLED_PACKAGE
|
||||
do
|
||||
echo $PKGS_TO_CHECK | grep -q "${INSTALLED_PACKAGE}"
|
||||
if [ $? -eq 0 ]; then
|
||||
PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^${INSTALLED_PACKAGE} //"`
|
||||
PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/ ${INSTALLED_PACKAGE} / /"`
|
||||
PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/ ${INSTALLED_PACKAGE}\$//"`
|
||||
PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^${INSTALLED_PACKAGE}\$//"`
|
||||
fi
|
||||
done < $TMPFILE
|
||||
|
||||
if [ -z "$PKGS_TO_CHECK" ]; then
|
||||
>&2 echo "All explicitly specified packages resolved!"
|
||||
else
|
||||
>&2 echo "Could not resolve packages: $PKGS_TO_CHECK"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
if [ "x${ROOT}" == "x" ]; then
|
||||
ROOT=/localdisk/loadbuild/centos
|
||||
fi
|
||||
|
||||
ATTEMPTED=0
|
||||
DISCOVERED=0
|
||||
OUTPUT_DIR=${ROOT}/newDisk
|
||||
YUM=${ROOT}/yum.conf
|
||||
TMP_DIR=${ROOT}/tmp
|
||||
DEPLISTFILE=${ROOT}/deps.txt
|
||||
DEPLISTFILE_NEW=${ROOT}/deps_new.txt
|
||||
DEPDETAILLISTFILE=${ROOT}/deps_detail.txt
|
||||
INSTALLDIR=${ROOT}/newDisk/isolinux/Packages
|
||||
|
||||
BUILT_REPORT=${ROOT}/local.txt
|
||||
WARNINGS_REPORT=${ROOT}/warnings.txt
|
||||
LAST_TEST=${ROOT}/last_test.txt
|
||||
TMPFILE=${ROOT}/cgts_deps_tmp.txt
|
||||
TMPFILE1=${ROOT}/cgts_deps_tmp1.txt
|
||||
|
||||
touch "$BUILT_REPORT"
|
||||
touch "$WARNINGS_REPORT"
|
||||
|
||||
for i in "$@"
|
||||
do
|
||||
case $i in
|
||||
-d=*|--deps=*)
|
||||
DEPS="${i#*=}"
|
||||
shift # past argument=value
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
mkdir -p $TMP_DIR
|
||||
|
||||
rm -f "$DEPDETAILLISTFILE"
|
||||
# FIRST PASS we are being given a list of REQUIRED dependencies
|
||||
if [ "${DEPS}x" != "x" ]; then
|
||||
cat $DEPS | grep -v "^#" > $DEPLISTFILE
|
||||
install_deps $DEPLISTFILE
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# check that we resolved them all
|
||||
check_all_explicit_deps_installed
|
||||
if [ $? -ne 0 ]; then
|
||||
>&2 echo "Error -- could not install all explicitly listed packages"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ALL_RESOLVED=0
|
||||
|
||||
while [ $ALL_RESOLVED -eq 0 ]; do
|
||||
cp $DEPLISTFILE $DEPLISTFILE.old
|
||||
generate_dep_list
|
||||
if [ ! -s $DEPLISTFILE ]; then
|
||||
# no more dependencies!
|
||||
ALL_RESOLVED=1
|
||||
else
|
||||
DIFFLINES=`diff $DEPLISTFILE.old $DEPLISTFILE | wc -l`
|
||||
if [ $DIFFLINES -eq 0 ]; then
|
||||
>&2 echo "Warning: Infinite loop detected in dependency resolution. See $DEPLISTFILE for details -- exiting"
|
||||
>&2 echo "These RPMS had problems (likely version conflicts)"
|
||||
>&2 cat $DEPLISTFILE
|
||||
|
||||
echo "Warning: Infinite loop detected in dependency resolution See $DEPLISTFILE for details -- exiting" >> $WARNINGS_REPORT
|
||||
echo "These RPMS had problems (likely version conflicts)" >> $WARNINGS_REPORT
|
||||
cat $DEPLISTFILE >> $WARNINGS_REPORT
|
||||
|
||||
date > $LAST_TEST
|
||||
|
||||
rm -f $DEPLISTFILE.old
|
||||
exit 1 # nothing fixed
|
||||
fi
|
||||
install_deps $DEPLISTFILE
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
exit 0
|
@ -1,22 +0,0 @@
|
||||
|
||||
[main]
|
||||
cachedir=/localdisk/loadbuild/jmckenna/centos/yum/cache
|
||||
keepcache=1
|
||||
debuglevel=2
|
||||
reposdir=/dev/null
|
||||
logfile=/localdisk/loadbuild/jmckenna/centos/yum/yum.log
|
||||
retries=20
|
||||
obsoletes=1
|
||||
gpgcheck=0
|
||||
assumeyes=1
|
||||
syslog_ident=mock
|
||||
syslog_device=
|
||||
|
||||
# repos
|
||||
[local-std]
|
||||
name=local-std
|
||||
baseurl=file:///localdisk/loadbuild/jmckenna/centos/srcs/results
|
||||
enabled=1
|
||||
skip_if_unavailable=1
|
||||
metadata_expire=0
|
||||
|
@ -1,55 +0,0 @@
|
||||
classify () {
|
||||
local pkg_dir="$1"
|
||||
|
||||
if [ -f $pkg_dir/centos/srpm_path ]; then
|
||||
# echo "srpm + patch: $(basename $(cat $pkg_dir/centos/srpm_path | head -n 1))"
|
||||
echo "srpm + patches"
|
||||
elif [ -f $pkg_dir/centos/*.spec ]; then
|
||||
if [ -f $pkg_dir/centos/build_srpm ]; then
|
||||
# echo "spec + custom_script: $pkg_dir"
|
||||
echo "spec + custom_script"
|
||||
elif [ -f $pkg_dir/centos/build_srpm.data ]; then
|
||||
local ALLOW_EMPTY_RPM=""
|
||||
local COPY_LIST=""
|
||||
local SRC_DIR=""
|
||||
local PKG_BASE="$pkg_dir"
|
||||
source $pkg_dir/centos/build_srpm.data
|
||||
|
||||
if [ "" != "$SRC_DIR" ] ; then
|
||||
# echo "spec + src_dir: $pkg_dir/$SRC_DIR"
|
||||
echo "spec + src_dir"
|
||||
elif [ "" != "$COPY_LIST" ] ; then
|
||||
local TARBALL=""
|
||||
for f in $COPY_LIST; do
|
||||
case $f in
|
||||
*.tar.gz) TARBALL=$f ;;
|
||||
*.tgz) TARBALL=$f ;;
|
||||
*.tar.bz2) TARBALL=$f ;;
|
||||
*.tar.xz) TARBALL=$f ;;
|
||||
*.tar) TARBALL=$f ;;
|
||||
esac
|
||||
done
|
||||
if [ "" != "$TARBALL" ]; then
|
||||
# echo "spec + tarball: $pkg_dir/$TARBALL"
|
||||
echo "spec + tarball"
|
||||
else
|
||||
# echo "spec + files: $pkg_dir"
|
||||
echo "spec + files"
|
||||
fi
|
||||
elif [ "$ALLOW_EMPTY_RPM" == "true" ] ; then
|
||||
# echo "spec + empty: $pkg_dir"
|
||||
echo "spec + empty"
|
||||
else
|
||||
# echo "spec + build_srpm.data + unknown: $pkg_dir"
|
||||
# cat $pkg_dir/centos/build_srpm.data
|
||||
echo "spec + build_srpm.data + unknown"
|
||||
fi
|
||||
else
|
||||
# echo "spec + unknown: $pkg_dir"
|
||||
echo "spec + unknown"
|
||||
fi
|
||||
else
|
||||
# echo "unknown: $pkg_dir"
|
||||
echo "unknown"
|
||||
fi
|
||||
}
|
@ -1,87 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# usage: create-yum-conf [<layer>]
|
||||
#
|
||||
|
||||
LAYER=${1:-$LAYER}
|
||||
|
||||
if [ "$MY_WORKSPACE" == "" ]; then
|
||||
echo "ERROR: MY_WORKSPACE not defined"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
if [ "$MY_REPO" == "" ]; then
|
||||
echo "ERROR: MY_REPO not defined"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
if [ "$MY_BUILD_ENVIRONMENT" == "" ]; then
|
||||
echo "ERROR: MY_BUILD_ENVIRONMENT not defined"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
if [ "$MY_BUILD_DIR" == "" ]; then
|
||||
echo "ERROR: MY_BUILD_DIR not defined"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
MY_YUM_CONF="$MY_WORKSPACE/yum.conf"
|
||||
YUM_DIR="$MY_WORKSPACE/yum"
|
||||
YUM_CACHE="$YUM_DIR/cache"
|
||||
|
||||
# For backward compatibility. Old repo location or new?
|
||||
CENTOS_REPO=${MY_REPO}/centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
echo "ERROR: directory ${MY_REPO}/centos-repo not found."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Try to find a layer specific mock.cfg.proto
|
||||
MOCK_CFG_PROTO="${CENTOS_REPO}/mock.cfg.${LAYER}.proto"
|
||||
if [ ! -f "$MOCK_CFG_PROTO" ]; then
|
||||
# Not present, Use default mock.cfg.proto
|
||||
MOCK_CFG_PROTO="${CENTOS_REPO}/mock.cfg.proto"
|
||||
fi
|
||||
|
||||
|
||||
if [ -f "$MOCK_CFG_PROTO" ]; then
|
||||
if [ -f "$MY_YUM_CONF" ]; then
|
||||
N=$(find $MOCK_CFG_PROTO $MY_REPO/build-tools/create-yum-conf -cnewer $MY_YUM_CONF | wc -l)
|
||||
if [ $N -gt 0 ]; then
|
||||
# New inputs, remove to force regeneration of yum.conf
|
||||
\rm -f "$MY_YUM_CONF"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -f "$MY_YUM_CONF" ]; then
|
||||
if [ -f "$MOCK_CFG_PROTO" ]; then
|
||||
mock_cfg_to_yum_conf.py "$MOCK_CFG_PROTO" > "$MY_YUM_CONF"
|
||||
sed -i "s%\[main\]%&\ncachedir=$YUM_CACHE%" "$MY_YUM_CONF"
|
||||
sed -i "s%logfile=.*%logfile=$YUM_DIR/yum.log%" "$MY_YUM_CONF"
|
||||
# eg: LOCAL_BASE/MY_BUILD_DIR => file:///MY_BUILD_DIR
|
||||
sed -i "s%LOCAL_BASE%file://%g" "$MY_YUM_CONF"
|
||||
sed -i "s%MIRROR_BASE%file:///import/mirrors%g" "$MY_YUM_CONF"
|
||||
sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g" "$MY_YUM_CONF"
|
||||
# eg: file:///MY_BUILD_DIR => file:///localdisk/loadbuild/...
|
||||
sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR%g" "$MY_YUM_CONF"
|
||||
sed -i "s%/MY_REPO_DIR%$MY_REPO%g" "$MY_YUM_CONF"
|
||||
# eg = MY_BUILD_DIR/xyz => /localdisk/loadbuild/.../xyz
|
||||
sed -i "s%MY_BUILD_DIR%$MY_BUILD_DIR%g" "$MY_YUM_CONF"
|
||||
sed -i "s%MY_REPO_DIR%$MY_REPO%g" "$MY_YUM_CONF"
|
||||
else
|
||||
echo "ERROR: Could not find yum.conf or MOCK_CFG_PROTO"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -d "$YUM_CACHE" ]; then
|
||||
mkdir -p "$YUM_CACHE"
|
||||
fi
|
||||
|
||||
echo "$MY_YUM_CONF"
|
||||
exit 0
|
@ -1,716 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# Create a RPM dependency cache frpm the RPMS found in
|
||||
# 1) $MY_REPO/centos-repo
|
||||
# 2) $MY_WORKSPACE/$BUILD_TYPE/rpmbuild/
|
||||
#
|
||||
# Cache files are written to $MY_REPO/local-repo/dependancy-cache
|
||||
# unless an alternate path is supplied.
|
||||
#
|
||||
# The cache is a set of files that are easily digested by
|
||||
# common shell script tools. Each file has format
|
||||
# <rpm-name>;<comma-seperated-list-of-rpm-names>
|
||||
#
|
||||
# The files created are:
|
||||
# RPM-direct-descendants RPMS that have a direct Requires on X
|
||||
# RPM-transitive-descendants RPMS that have a possibly indirect need for X
|
||||
#
|
||||
# RPM-direct-requires RPMS directly Required by X
|
||||
# RPM-transitive-requires RPMS possibly indirectly Required by X
|
||||
#
|
||||
# SRPM-direct-descendants SRPMS whos RPMS have a direct Requires on RPMS built by X
|
||||
# SRPM-transitive-descendants SRPMS whos RPMS have a possibly indirect need for RPMS built by X
|
||||
#
|
||||
# SRPM-direct-requires SRPMS whos RPMS satisfy a direct BuildRequires of X
|
||||
# SRPM-transitive-requires SRPMS whos RPMS satisfy an indirect BuildRequires of X
|
||||
#
|
||||
# SRPM-direct-requires-rpm RPMS that satisfy a direct BuildRequires of X
|
||||
# SRPM-transitive-requires-rpm RPMS that satisfy an indirect BuildRequires of X
|
||||
#
|
||||
# rpm-to-srpm Map RPM back to the SRPM that created it
|
||||
# srpm-to-rpm Map a SRPM to the set of RPMS it builds
|
||||
#
|
||||
|
||||
import xml.etree.ElementTree as ET
|
||||
import fnmatch
|
||||
import os
|
||||
import shutil
|
||||
import gzip
|
||||
import sys
|
||||
import string
|
||||
from optparse import OptionParser
|
||||
|
||||
ns = { 'root': 'http://linux.duke.edu/metadata/common',
|
||||
'filelists': 'http://linux.duke.edu/metadata/filelists',
|
||||
'rpm': 'http://linux.duke.edu/metadata/rpm' }
|
||||
|
||||
build_types=['std', 'rt']
|
||||
rpm_types=['RPM', 'SRPM']
|
||||
default_arch = 'x86_64'
|
||||
default_arch_list = [ 'x86_64', 'noarch' ]
|
||||
default_arch_by_type = {'RPM': [ 'x86_64', 'noarch' ],
|
||||
'SRPM': [ 'src' ]
|
||||
}
|
||||
|
||||
repodata_dir="/export/jenkins/mirrors"
|
||||
if not os.path.isdir(repodata_dir):
|
||||
repodata_dir="/import/mirrors"
|
||||
if not os.path.isdir(repodata_dir):
|
||||
print("ERROR: directory not found %s" % repodata_dir)
|
||||
sys.exit(1)
|
||||
|
||||
old_cache_dir="%s/cgcs-tis-repo/dependancy-cache" % os.environ['MY_REPO']
|
||||
publish_cache_dir="%s/local-repo/dependancy-cache" % os.environ['MY_REPO']
|
||||
|
||||
workspace_repo_dirs={}
|
||||
for rt in rpm_types:
|
||||
workspace_repo_dirs[rt]={}
|
||||
for bt in build_types:
|
||||
workspace_repo_dirs[rt][bt]="%s/%s/rpmbuild/%sS" % (os.environ['MY_WORKSPACE'], bt, rt)
|
||||
|
||||
if not os.path.isdir(os.environ['MY_REPO']):
|
||||
print("ERROR: directory not found MY_REPO=%s" % os.environ['MY_REPO'])
|
||||
sys.exit(1)
|
||||
|
||||
centos_repo_dir="%s/centos-repo" % os.environ['MY_REPO']
|
||||
if not os.path.isdir(centos_repo_dir):
|
||||
# Test for the old path
|
||||
centos_repo_dir="%s/cgcs-centos-repo" % os.environ['MY_REPO']
|
||||
if not os.path.isdir(centos_repo_dir):
|
||||
# That doesn't exist either
|
||||
centos_repo_dir="%s/centos-repo" % os.environ['MY_REPO']
|
||||
print("ERROR: directory not found %s" % centos_repo_dir)
|
||||
sys.exit(1)
|
||||
|
||||
bin_rpm_mirror_roots = ["%s/Binary" % centos_repo_dir]
|
||||
src_rpm_mirror_roots = ["%s/Source" % centos_repo_dir]
|
||||
|
||||
for bt in build_types:
|
||||
bin_rpm_mirror_roots.append(workspace_repo_dirs['RPM'][bt])
|
||||
src_rpm_mirror_roots.append(workspace_repo_dirs['SRPM'][bt])
|
||||
|
||||
parser = OptionParser('create_dependancy_cache')
|
||||
parser.add_option('-c', '--cache_dir', action='store', type='string',
|
||||
dest='cache_dir', help='set cache directory')
|
||||
parser.add_option('-t', '--third_party_repo_dir', action='store',
|
||||
type='string', dest='third_party_repo_dir',
|
||||
help='set third party directory')
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.cache_dir:
|
||||
publish_cache_dir = options.cache_dir
|
||||
|
||||
if options.third_party_repo_dir:
|
||||
third_party_repo_dir = options.third_party_repo_dir
|
||||
bin_rpm_mirror_roots.append(third_party_repo_dir)
|
||||
src_rpm_mirror_roots.append(third_party_repo_dir)
|
||||
if not os.path.isdir(third_party_repo_dir):
|
||||
print("ERROR: directory not found %s" % third_party_repo_dir)
|
||||
sys.exit(1)
|
||||
|
||||
# Create directory if required
|
||||
if not os.path.isdir(publish_cache_dir):
|
||||
if os.path.isdir(old_cache_dir):
|
||||
print("Relocating old dependency directory: %s -> %s" % (old_cache_dir, publish_cache_dir))
|
||||
os.makedirs(os.path.abspath(os.path.join(publish_cache_dir, os.pardir)))
|
||||
shutil.move(old_cache_dir, publish_cache_dir)
|
||||
else:
|
||||
print("Creating directory: %s" % publish_cache_dir)
|
||||
os.makedirs(publish_cache_dir, 0o755)
|
||||
|
||||
# The Main data structure
|
||||
pkg_data={}
|
||||
|
||||
for rpm_type in rpm_types:
|
||||
pkg_data[rpm_type]={}
|
||||
|
||||
# map provided_name -> pkg_name
|
||||
pkg_data[rpm_type]['providers']={}
|
||||
|
||||
# map pkg_name -> required_names ... could be a pkg, capability or file
|
||||
pkg_data[rpm_type]['requires']={}
|
||||
|
||||
# map file_name -> pkg_name
|
||||
pkg_data[rpm_type]['file_owners']={}
|
||||
|
||||
# map pkg_name -> file_name
|
||||
pkg_data[rpm_type]['files']={}
|
||||
|
||||
# map pkg_name -> required_pkg_names ... only pkg names, and only direct requirement
|
||||
pkg_data[rpm_type]['pkg_direct_requires']={}
|
||||
|
||||
# map pkg_name -> required_pkg_names ... only pkg names, but this is the transitive list of all requirements
|
||||
pkg_data[rpm_type]['pkg_transitive_requires']={}
|
||||
|
||||
# map pkg_name -> descendant_pkgs ... only packages the directly require this package
|
||||
pkg_data[rpm_type]['pkg_direct_descendants']={}
|
||||
|
||||
# map pkg_name -> descendant_pkgs ... packages that have a transitive requiremant on this package
|
||||
pkg_data[rpm_type]['pkg_transitive_descendants']={}
|
||||
|
||||
# Map package name to a source rpm file name
|
||||
pkg_data[rpm_type]['sourcerpm']={}
|
||||
pkg_data[rpm_type]['binrpm']={}
|
||||
|
||||
# Map file name to package name
|
||||
pkg_data[rpm_type]['fn_to_name']={}
|
||||
|
||||
pkg_data['SRPM']['pkg_direct_requires_rpm']={}
|
||||
pkg_data['SRPM']['pkg_transitive_requires_rpm']={}
|
||||
|
||||
|
||||
# Return a list of file paths, starting in 'dir', matching 'pattern'
|
||||
# dir= directory to search under
|
||||
# pattern= search for file or directory matching pattern, wildcards allowed
|
||||
# recursive_depth= how many levels of directory before giving up
|
||||
def file_search(dir, pattern, recursive_depth=0):
|
||||
match_list = []
|
||||
new_depth = recursive_depth - 1
|
||||
# print "file_search(%s,%s,%s)" % (dir, pattern, recursive_depth)
|
||||
for file in os.listdir(dir):
|
||||
path = "%s/%s" % (dir, file)
|
||||
if fnmatch.fnmatch(file, pattern):
|
||||
print(path)
|
||||
match_list.append(path)
|
||||
elif (recursive_depth > 0) and os.path.isdir(path):
|
||||
sub_list = []
|
||||
sub_list = file_search(path, pattern, recursive_depth=new_depth)
|
||||
match_list.extend(sub_list)
|
||||
return match_list
|
||||
|
||||
# Return the list of .../repodate/*primary.xml.gz files
|
||||
# rpm_type= 'RPM' or 'SRPM'
|
||||
# arch= e.g. x86_64, only relevant of rpm_type=='RPM'
|
||||
def get_repo_primary_data_list(rpm_type='RPM', arch_list=default_arch_list):
|
||||
rpm_repodata_roots = []
|
||||
repodata_list = []
|
||||
|
||||
if rpm_type == 'RPM':
|
||||
for d in bin_rpm_mirror_roots:
|
||||
if os.path.isdir(d):
|
||||
sub_list = file_search(d, 'repodata', 25)
|
||||
rpm_repodata_roots.extend(sub_list)
|
||||
elif rpm_type == 'SRPM':
|
||||
for d in src_rpm_mirror_roots:
|
||||
if os.path.isdir(d):
|
||||
sub_list = file_search(d, 'repodata', 5)
|
||||
rpm_repodata_roots.extend(sub_list)
|
||||
else:
|
||||
print("invalid rpm_type '%s', valid types are %s" % (rpm_type, str(rpm_types)))
|
||||
return repodata_list
|
||||
|
||||
for d in rpm_repodata_roots:
|
||||
sub_list = file_search(d, '*primary.xml.gz', 2)
|
||||
repodata_list.extend(sub_list)
|
||||
|
||||
return repodata_list
|
||||
|
||||
|
||||
# Return the list of .../repodate/*filelists.xml.gz files
|
||||
# rpm_type= 'RPM' or 'SRPM'
|
||||
# arch= e.g. x86_64, only relevant of rpm_type=='RPM'
|
||||
def get_repo_filelists_data_list(rpm_type='RPM', arch_list=default_arch_list):
|
||||
rpm_repodata_roots = []
|
||||
repodata_list = []
|
||||
|
||||
if rpm_type == 'RPM':
|
||||
for d in bin_rpm_mirror_roots:
|
||||
if os.path.isdir(d):
|
||||
sub_list = file_search(d, 'repodata', 25)
|
||||
rpm_repodata_roots.extend(sub_list)
|
||||
elif rpm_type == 'SRPM':
|
||||
for d in src_rpm_mirror_roots:
|
||||
if os.path.isdir(d):
|
||||
sub_list = file_search(d, 'repodata', 5)
|
||||
rpm_repodata_roots.extend(sub_list)
|
||||
else:
|
||||
print "invalid rpm_type '%s', valid types are %s" % (rpm_type, str(rpm_types))
|
||||
return repodata_list
|
||||
|
||||
for d in rpm_repodata_roots:
|
||||
sub_list = file_search(d, '*filelists.xml.gz', 2)
|
||||
repodata_list.extend(sub_list)
|
||||
|
||||
return repodata_list
|
||||
|
||||
|
||||
|
||||
# Process a list of repodata files (*filelists.xml.gz) and extract package data.
|
||||
# Data is saved to the global 'pkg_data'.
|
||||
def read_data_from_repodata_filelists_list(repodata_list, rpm_type='RPM', arch=default_arch):
|
||||
for repodata_path in repodata_list:
|
||||
read_data_from_filelists_xml_gz(repodata_path, rpm_type=rpm_type, arch=arch)
|
||||
|
||||
# Process a single repodata file (*filelists.xml.gz) and extract package data.
|
||||
# Data is saved to the global 'pkg_data'.
|
||||
def read_data_from_filelists_xml_gz(repodata_path, rpm_type='RPM', arch=default_arch):
|
||||
# print "repodata_path=%s" % repodata_path
|
||||
infile = gzip.open(repodata_path)
|
||||
root = ET.parse(infile).getroot()
|
||||
for pkg in root.findall('filelists:package', ns):
|
||||
name=pkg.get('name')
|
||||
pkg_arch=pkg.get('arch')
|
||||
|
||||
version=""
|
||||
release=""
|
||||
|
||||
if arch is not None:
|
||||
if pkg_arch is None:
|
||||
continue
|
||||
if pkg_arch != arch:
|
||||
continue
|
||||
|
||||
v=pkg.find('filelists:version', ns)
|
||||
if v is not None:
|
||||
version=v.get('ver')
|
||||
release=v.get('rel')
|
||||
else:
|
||||
print("%s: %s.%s has no 'filelists:version'" % (repodata_path, name, pkg_arch))
|
||||
|
||||
# print "%s %s %s %s " % (name, pkg_arch, version, release)
|
||||
|
||||
for f in pkg.findall('filelists:file', ns):
|
||||
fn=f.text
|
||||
# print " fn=%s -> plg=%s" % (fn, name)
|
||||
if not name in pkg_data[rpm_type]['files']:
|
||||
pkg_data[rpm_type]['files'][name]=[]
|
||||
pkg_data[rpm_type]['files'][name].append(fn)
|
||||
if not fn in pkg_data[rpm_type]['file_owners']:
|
||||
pkg_data[rpm_type]['file_owners'][fn]=[]
|
||||
pkg_data[rpm_type]['file_owners'][fn]=name
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Process a list of repodata files (*primary.xml.gz) and extract package data.
|
||||
# Data is saved to the global 'pkg_data'.
|
||||
def read_data_from_repodata_primary_list(repodata_list, rpm_type='RPM', arch=default_arch):
|
||||
for repodata_path in repodata_list:
|
||||
read_data_from_primary_xml_gz(repodata_path, rpm_type=rpm_type, arch=arch)
|
||||
|
||||
# Process a single repodata file (*primary.xml.gz) and extract package data.
|
||||
# Data is saved to the global 'pkg_data'.
|
||||
def read_data_from_primary_xml_gz(repodata_path, rpm_type='RPM', arch=default_arch):
|
||||
# print "repodata_path=%s" % repodata_path
|
||||
infile = gzip.open(repodata_path)
|
||||
root = ET.parse(infile).getroot()
|
||||
for pkg in root.findall('root:package', ns):
|
||||
name=pkg.find('root:name', ns).text
|
||||
pkg_arch=pkg.find('root:arch', ns).text
|
||||
version=""
|
||||
release=""
|
||||
license=""
|
||||
sourcerpm=""
|
||||
|
||||
if arch is not None:
|
||||
if pkg_arch is None:
|
||||
continue
|
||||
if pkg_arch != arch:
|
||||
continue
|
||||
|
||||
pkg_data[rpm_type]['providers'][name]=name
|
||||
pkg_data[rpm_type]['files'][name]=[]
|
||||
pkg_data[rpm_type]['requires'][name] = []
|
||||
pkg_data[rpm_type]['requires'][name].append(name)
|
||||
|
||||
url=pkg.find('root:url', ns).text
|
||||
v=pkg.find('root:version', ns)
|
||||
if v is not None:
|
||||
version=v.get('ver')
|
||||
release=v.get('rel')
|
||||
else:
|
||||
print("%s: %s.%s has no 'root:version'" % (repodata_path, name, pkg_arch))
|
||||
|
||||
fn="%s-%s-%s.%s.rpm" % (name, version, release, arch)
|
||||
pkg_data[rpm_type]['fn_to_name'][fn]=name
|
||||
|
||||
# SAL print "%s %s %s %s " % (name, pkg_arch, version, release)
|
||||
print("%s %s %s %s " % (name, pkg_arch, version, release))
|
||||
f=pkg.find('root:format', ns)
|
||||
if f is not None:
|
||||
license=f.find('rpm:license', ns).text
|
||||
sourcerpm=f.find('rpm:sourcerpm', ns).text
|
||||
if sourcerpm != "":
|
||||
pkg_data[rpm_type]['sourcerpm'][name] = sourcerpm
|
||||
# SAL print "--- requires ---"
|
||||
print("--- requires ---")
|
||||
r=f.find('rpm:requires', ns)
|
||||
if r is not None:
|
||||
for rr in r.findall('rpm:entry', ns):
|
||||
required_name=rr.get('name')
|
||||
# SAL print " %s" % required_name
|
||||
print " %s" % required_name
|
||||
pkg_data[rpm_type]['requires'][name].append(required_name)
|
||||
else:
|
||||
print("%s: %s.%s has no 'rpm:requires'" % (repodata_path, name, pkg_arch))
|
||||
# print "--- provides ---"
|
||||
p=f.find('rpm:provides', ns)
|
||||
if p is not None:
|
||||
for pp in p.findall('rpm:entry', ns):
|
||||
provided_name=pp.get('name')
|
||||
# print " %s" % provided_name
|
||||
if name == "kernel-rt" and provided_name in pkg_data[rpm_type]['providers'] and pkg_data[rpm_type]['providers'][provided_name] == "kernel":
|
||||
continue
|
||||
if name.startswith('kernel-rt'):
|
||||
alt_name=string.replace(name, 'kernel-rt', 'kernel')
|
||||
if provided_name in pkg_data[rpm_type]['providers'] and pkg_data[rpm_type]['providers'][provided_name] == alt_name:
|
||||
continue
|
||||
pkg_data[rpm_type]['providers'][provided_name]=name
|
||||
else:
|
||||
print("%s: %s.%s has no 'rpm:provides'" % (repodata_path, name, pkg_arch))
|
||||
# print "--- files ---"
|
||||
for fn in f.findall('root:file', ns):
|
||||
file_name=fn.text
|
||||
# print " %s" % file_name
|
||||
pkg_data[rpm_type]['files'][name].append(file_name)
|
||||
if name == "kernel-rt" and file_name in pkg_data[rpm_type]['file_owners'] and pkg_data[rpm_type]['file_owners'][file_name] == "kernel":
|
||||
continue
|
||||
if name.startswith('kernel-rt'):
|
||||
alt_name=string.replace(name, 'kernel-rt', 'kernel')
|
||||
if provided_name in pkg_data[rpm_type]['file_owners'] and pkg_data[rpm_type]['file_owners'][file_name] == alt_name:
|
||||
continue
|
||||
pkg_data[rpm_type]['file_owners'][file_name]=name
|
||||
else:
|
||||
print("%s: %s.%s has no 'root:format'" % (repodata_path, name, pkg_arch))
|
||||
# print "%s %s %s %s %s" % (name, pkg_arch, version, release, license)
|
||||
infile.close
|
||||
|
||||
def calulate_all_direct_requires_and_descendants(rpm_type='RPM'):
|
||||
# print "calulate_all_direct_requires_and_descendants rpm_type=%s" % rpm_type
|
||||
for name in pkg_data[rpm_type]['requires']:
|
||||
calulate_pkg_direct_requires_and_descendants(name, rpm_type=rpm_type)
|
||||
|
||||
def calulate_pkg_direct_requires_and_descendants(name, rpm_type='RPM'):
|
||||
print("%s needs:" % name)
|
||||
if not rpm_type in pkg_data:
|
||||
print("Error: unknown rpm_type '%s'" % rpm_type)
|
||||
return
|
||||
|
||||
if not name in pkg_data[rpm_type]['requires']:
|
||||
print("Note: No requires data for '%s'" % name)
|
||||
return
|
||||
|
||||
for req in pkg_data[rpm_type]['requires'][name]:
|
||||
pro = '???'
|
||||
if rpm_type == 'RPM':
|
||||
if req in pkg_data[rpm_type]['providers']:
|
||||
pro = pkg_data[rpm_type]['providers'][req]
|
||||
elif req in pkg_data[rpm_type]['file_owners']:
|
||||
pro = pkg_data[rpm_type]['file_owners'][req]
|
||||
else:
|
||||
pro = '???'
|
||||
print("package %s has unresolved requirement '%s'" % (name, req))
|
||||
else:
|
||||
# i.e. rpm_type == 'SRPM'
|
||||
rpm_pro = '???'
|
||||
if req in pkg_data['RPM']['providers']:
|
||||
rpm_pro = pkg_data['RPM']['providers'][req]
|
||||
elif req in pkg_data['RPM']['file_owners']:
|
||||
rpm_pro = pkg_data['RPM']['file_owners'][req]
|
||||
else:
|
||||
rpm_pro = '???'
|
||||
print("package %s has unresolved requirement '%s'" % (name, req))
|
||||
|
||||
if rpm_pro is not None and rpm_pro != '???':
|
||||
if not name in pkg_data[rpm_type]['pkg_direct_requires_rpm']:
|
||||
pkg_data[rpm_type]['pkg_direct_requires_rpm'][name] = []
|
||||
if not rpm_pro in pkg_data[rpm_type]['pkg_direct_requires_rpm'][name]:
|
||||
pkg_data[rpm_type]['pkg_direct_requires_rpm'][name].append(rpm_pro)
|
||||
|
||||
if rpm_pro in pkg_data['RPM']['sourcerpm']:
|
||||
fn = pkg_data['RPM']['sourcerpm'][rpm_pro]
|
||||
if fn in pkg_data['SRPM']['fn_to_name']:
|
||||
pro = pkg_data['SRPM']['fn_to_name'][fn]
|
||||
else:
|
||||
pro = '???'
|
||||
print("package %s requires srpm file name %s" % (name,fn))
|
||||
else:
|
||||
pro = '???'
|
||||
print("package %s requires rpm %s, but that rpm has no known srpm" % (name,rpm_pro))
|
||||
|
||||
if pro is not None and pro != '???':
|
||||
if not name in pkg_data[rpm_type]['pkg_direct_requires']:
|
||||
pkg_data[rpm_type]['pkg_direct_requires'][name] = []
|
||||
if not pro in pkg_data[rpm_type]['pkg_direct_requires'][name]:
|
||||
pkg_data[rpm_type]['pkg_direct_requires'][name].append(pro)
|
||||
if not pro in pkg_data[rpm_type]['pkg_direct_descendants']:
|
||||
pkg_data[rpm_type]['pkg_direct_descendants'][pro] = []
|
||||
if not name in pkg_data[rpm_type]['pkg_direct_descendants'][pro]:
|
||||
pkg_data[rpm_type]['pkg_direct_descendants'][pro].append(name)
|
||||
|
||||
print(" %s -> %s" % (req, pro))
|
||||
|
||||
|
||||
|
||||
def calulate_all_transitive_requires(rpm_type='RPM'):
|
||||
for name in pkg_data[rpm_type]['pkg_direct_requires']:
|
||||
calulate_pkg_transitive_requires(name, rpm_type=rpm_type)
|
||||
|
||||
def calulate_pkg_transitive_requires(name, rpm_type='RPM'):
|
||||
if not rpm_type in pkg_data:
|
||||
print("Error: unknown rpm_type '%s'" % rpm_type)
|
||||
return
|
||||
|
||||
if not name in pkg_data[rpm_type]['pkg_direct_requires']:
|
||||
print("Note: No direct_requires data for '%s'" % name)
|
||||
return
|
||||
|
||||
pkg_data[rpm_type]['pkg_transitive_requires'][name]=[]
|
||||
if rpm_type != 'RPM':
|
||||
pkg_data[rpm_type]['pkg_transitive_requires_rpm'][name]=[]
|
||||
unresolved = []
|
||||
unresolved.append(name)
|
||||
|
||||
while unresolved:
|
||||
n = unresolved.pop(0)
|
||||
# print "%s: remove %s" % (name, n)
|
||||
if rpm_type == 'RPM':
|
||||
direct_requires='pkg_direct_requires'
|
||||
transitive_requires='pkg_transitive_requires'
|
||||
else:
|
||||
direct_requires='pkg_direct_requires_rpm'
|
||||
transitive_requires='pkg_transitive_requires_rpm'
|
||||
if n in pkg_data[rpm_type][direct_requires]:
|
||||
for r in pkg_data[rpm_type][direct_requires][n]:
|
||||
if r != name:
|
||||
if not r in pkg_data[rpm_type][transitive_requires][name]:
|
||||
pkg_data[rpm_type][transitive_requires][name].append(r)
|
||||
if r in pkg_data['RPM']['pkg_transitive_requires']:
|
||||
for r2 in pkg_data['RPM']['pkg_transitive_requires'][r]:
|
||||
if r2 != name:
|
||||
if not r2 in pkg_data[rpm_type][transitive_requires][name]:
|
||||
pkg_data[rpm_type][transitive_requires][name].append(r2)
|
||||
else:
|
||||
if rpm_type == 'RPM':
|
||||
unresolved.append(r)
|
||||
else:
|
||||
print("WARNING: calulate_pkg_transitive_requires: can't append rpm to SRPM list, name=%s, r=%s" % (name, r))
|
||||
# print "%s: add %s" % (name, r)
|
||||
if rpm_type != 'RPM':
|
||||
for r in pkg_data[rpm_type]['pkg_transitive_requires_rpm'][name]:
|
||||
if r in pkg_data['RPM']['sourcerpm']:
|
||||
fn = pkg_data['RPM']['sourcerpm'][r]
|
||||
if fn in pkg_data['SRPM']['fn_to_name']:
|
||||
s = pkg_data['SRPM']['fn_to_name'][fn]
|
||||
pkg_data[rpm_type]['pkg_transitive_requires'][name].append(s)
|
||||
else:
|
||||
print("package %s requires srpm file name %s, but srpm name is not known" % (name, fn))
|
||||
else:
|
||||
print("package %s requires rpm %s, but that rpm has no known srpm" % (name, r))
|
||||
|
||||
def calulate_all_transitive_descendants(rpm_type='RPM'):
|
||||
for name in pkg_data[rpm_type]['pkg_direct_descendants']:
|
||||
calulate_pkg_transitive_descendants(name, rpm_type=rpm_type)
|
||||
|
||||
def calulate_pkg_transitive_descendants(name, rpm_type='RPM'):
|
||||
if not rpm_type in pkg_data:
|
||||
print("Error: unknown rpm_type '%s'" % rpm_type)
|
||||
return
|
||||
|
||||
if not name in pkg_data[rpm_type]['pkg_direct_descendants']:
|
||||
print("Note: No direct_requires data for '%s'" % name)
|
||||
return
|
||||
|
||||
pkg_data[rpm_type]['pkg_transitive_descendants'][name]=[]
|
||||
unresolved = []
|
||||
unresolved.append(name)
|
||||
|
||||
while unresolved:
|
||||
n = unresolved.pop(0)
|
||||
# print "%s: remove %s" % (name, n)
|
||||
if n in pkg_data[rpm_type]['pkg_direct_descendants']:
|
||||
for r in pkg_data[rpm_type]['pkg_direct_descendants'][n]:
|
||||
if r != name:
|
||||
if not r in pkg_data[rpm_type]['pkg_transitive_descendants'][name]:
|
||||
pkg_data[rpm_type]['pkg_transitive_descendants'][name].append(r)
|
||||
if r in pkg_data[rpm_type]['pkg_transitive_descendants']:
|
||||
for n2 in pkg_data[rpm_type]['pkg_transitive_descendants'][r]:
|
||||
if n2 != name:
|
||||
if not n2 in pkg_data[rpm_type]['pkg_transitive_descendants'][name]:
|
||||
pkg_data[rpm_type]['pkg_transitive_descendants'][name].append(n2)
|
||||
else:
|
||||
unresolved.append(r)
|
||||
# print "%s: add %s" % (name, r)
|
||||
|
||||
def create_dest_rpm_data():
|
||||
for name in sorted(pkg_data['RPM']['sourcerpm']):
|
||||
fn=pkg_data['RPM']['sourcerpm'][name]
|
||||
if fn in pkg_data['SRPM']['fn_to_name']:
|
||||
sname = pkg_data['SRPM']['fn_to_name'][fn]
|
||||
if not sname in pkg_data['SRPM']['binrpm']:
|
||||
pkg_data['SRPM']['binrpm'][sname]=[]
|
||||
pkg_data['SRPM']['binrpm'][sname].append(name)
|
||||
|
||||
def create_cache(cache_dir):
|
||||
for rpm_type in rpm_types:
|
||||
print("")
|
||||
print("==== %s ====" % rpm_type)
|
||||
print("")
|
||||
rpm_repodata_primary_list = get_repo_primary_data_list(rpm_type=rpm_type, arch_list=default_arch_by_type[rpm_type])
|
||||
for arch in default_arch_by_type[rpm_type]:
|
||||
read_data_from_repodata_primary_list(rpm_repodata_primary_list, rpm_type=rpm_type, arch=arch)
|
||||
rpm_repodata_filelists_list = get_repo_filelists_data_list(rpm_type=rpm_type, arch_list=default_arch_by_type[rpm_type])
|
||||
for arch in default_arch_by_type[rpm_type]:
|
||||
read_data_from_repodata_filelists_list(rpm_repodata_filelists_list, rpm_type=rpm_type, arch=arch)
|
||||
calulate_all_direct_requires_and_descendants(rpm_type=rpm_type)
|
||||
calulate_all_transitive_requires(rpm_type=rpm_type)
|
||||
calulate_all_transitive_descendants(rpm_type=rpm_type)
|
||||
|
||||
cache_name="%s/%s-direct-requires" % (cache_dir, rpm_type)
|
||||
f=open(cache_name, "w")
|
||||
for name in sorted(pkg_data[rpm_type]['pkg_direct_requires']):
|
||||
print("%s needs %s" % (name, pkg_data[rpm_type]['pkg_direct_requires'][name]))
|
||||
f.write("%s;" % name)
|
||||
first=True
|
||||
for req in sorted(pkg_data[rpm_type]['pkg_direct_requires'][name]):
|
||||
if first:
|
||||
first=False
|
||||
f.write("%s" % req)
|
||||
else:
|
||||
f.write(",%s" % req)
|
||||
f.write("\n")
|
||||
f.close()
|
||||
|
||||
cache_name="%s/%s-direct-descendants" % (cache_dir, rpm_type)
|
||||
f=open(cache_name, "w")
|
||||
for name in sorted(pkg_data[rpm_type]['pkg_direct_descendants']):
|
||||
print("%s informs %s" % (name, pkg_data[rpm_type]['pkg_direct_descendants'][name]))
|
||||
f.write("%s;" % name)
|
||||
first=True
|
||||
for req in sorted(pkg_data[rpm_type]['pkg_direct_descendants'][name]):
|
||||
if first:
|
||||
first=False
|
||||
f.write("%s" % req)
|
||||
else:
|
||||
f.write(",%s" % req)
|
||||
f.write("\n")
|
||||
f.close()
|
||||
|
||||
cache_name="%s/%s-transitive-requires" % (cache_dir, rpm_type)
|
||||
f=open(cache_name, "w")
|
||||
for name in sorted(pkg_data[rpm_type]['pkg_transitive_requires']):
|
||||
f.write("%s;" % name)
|
||||
first=True
|
||||
for req in sorted(pkg_data[rpm_type]['pkg_transitive_requires'][name]):
|
||||
if first:
|
||||
first=False
|
||||
f.write("%s" % req)
|
||||
else:
|
||||
f.write(",%s" % req)
|
||||
f.write("\n")
|
||||
f.close()
|
||||
|
||||
cache_name="%s/%s-transitive-descendants" % (cache_dir, rpm_type)
|
||||
f=open(cache_name, "w")
|
||||
for name in sorted(pkg_data[rpm_type]['pkg_transitive_descendants']):
|
||||
f.write("%s;" % name)
|
||||
first=True
|
||||
for req in sorted(pkg_data[rpm_type]['pkg_transitive_descendants'][name]):
|
||||
if first:
|
||||
first=False
|
||||
f.write("%s" % req)
|
||||
else:
|
||||
f.write(",%s" % req)
|
||||
f.write("\n")
|
||||
f.close()
|
||||
|
||||
if rpm_type != 'RPM':
|
||||
cache_name="%s/%s-direct-requires-rpm" % (cache_dir, rpm_type)
|
||||
f=open(cache_name, "w")
|
||||
for name in sorted(pkg_data[rpm_type]['pkg_direct_requires_rpm']):
|
||||
print("%s needs rpm %s" % (name, pkg_data[rpm_type]['pkg_direct_requires_rpm'][name]))
|
||||
f.write("%s;" % name)
|
||||
first=True
|
||||
for req in sorted(pkg_data[rpm_type]['pkg_direct_requires_rpm'][name]):
|
||||
if first:
|
||||
first=False
|
||||
f.write("%s" % req)
|
||||
else:
|
||||
f.write(",%s" % req)
|
||||
f.write("\n")
|
||||
f.close()
|
||||
|
||||
cache_name="%s/%s-transitive-requires-rpm" % (cache_dir, rpm_type)
|
||||
f=open(cache_name, "w")
|
||||
for name in sorted(pkg_data[rpm_type]['pkg_transitive_requires_rpm']):
|
||||
f.write("%s;" % name)
|
||||
first=True
|
||||
for req in sorted(pkg_data[rpm_type]['pkg_transitive_requires_rpm'][name]):
|
||||
if first:
|
||||
first=False
|
||||
f.write("%s" % req)
|
||||
else:
|
||||
f.write(",%s" % req)
|
||||
f.write("\n")
|
||||
f.close()
|
||||
|
||||
cache_name="%s/rpm-to-srpm" % cache_dir
|
||||
f=open(cache_name, "w")
|
||||
for name in sorted(pkg_data['RPM']['sourcerpm']):
|
||||
f.write("%s;" % name)
|
||||
fn=pkg_data['RPM']['sourcerpm'][name]
|
||||
if fn in pkg_data['SRPM']['fn_to_name']:
|
||||
sname = pkg_data['SRPM']['fn_to_name'][fn]
|
||||
f.write("%s" % sname)
|
||||
f.write("\n")
|
||||
f.close()
|
||||
|
||||
create_dest_rpm_data()
|
||||
cache_name="%s/srpm-to-rpm" % cache_dir
|
||||
f=open(cache_name, "w")
|
||||
for name in sorted(pkg_data['SRPM']['binrpm']):
|
||||
f.write("%s;" % name)
|
||||
first=True
|
||||
for bname in sorted(pkg_data['SRPM']['binrpm'][name]):
|
||||
if first:
|
||||
first=False
|
||||
f.write("%s" % bname)
|
||||
else:
|
||||
f.write(",%s" % bname)
|
||||
f.write("\n")
|
||||
f.close()
|
||||
|
||||
|
||||
|
||||
def test():
|
||||
for rpm_type in rpm_types:
|
||||
print("")
|
||||
print("==== %s ====" % rpm_type)
|
||||
print("")
|
||||
rpm_repodata_primary_list = get_repo_primary_data_list(rpm_type=rpm_type, arch_list=default_arch_by_type[rpm_type])
|
||||
for arch in default_arch_by_type[rpm_type]:
|
||||
read_data_from_repodata_primary_list(rpm_repodata_primary_list, rpm_type=rpm_type, arch=arch)
|
||||
rpm_repodata_filelists_list = get_repo_filelists_data_list(rpm_type=rpm_type, arch_list=default_arch_by_type[rpm_type])
|
||||
for arch in default_arch_by_type[rpm_type]:
|
||||
read_data_from_repodata_filelists_list(rpm_repodata_filelists_list, rpm_type=rpm_type, arch=arch)
|
||||
calulate_all_direct_requires_and_descendants(rpm_type=rpm_type)
|
||||
calulate_all_transitive_requires(rpm_type=rpm_type)
|
||||
calulate_all_transitive_descendants(rpm_type=rpm_type)
|
||||
|
||||
for name in pkg_data[rpm_type]['pkg_direct_requires']:
|
||||
print("%s needs %s" % (name, pkg_data[rpm_type]['pkg_direct_requires'][name]))
|
||||
|
||||
for name in pkg_data[rpm_type]['pkg_direct_descendants']:
|
||||
print("%s informs %s" % (name, pkg_data[rpm_type]['pkg_direct_descendants'][name]))
|
||||
|
||||
for name in pkg_data[rpm_type]['pkg_transitive_requires']:
|
||||
print("%s needs %s" % (name, pkg_data[rpm_type]['pkg_transitive_requires'][name]))
|
||||
print("")
|
||||
|
||||
for name in pkg_data[rpm_type]['pkg_transitive_descendants']:
|
||||
print("%s informs %s" % (name, pkg_data[rpm_type]['pkg_transitive_descendants'][name]))
|
||||
print("")
|
||||
|
||||
|
||||
if os.path.isdir(publish_cache_dir):
|
||||
create_cache(publish_cache_dir)
|
||||
else:
|
||||
print("ERROR: Directory not found '%s" % publish_cache_dir)
|
@ -1,277 +0,0 @@
|
||||
#!/bin/bash
|
||||
# set -x
|
||||
|
||||
#
|
||||
# Copyright (c) 2018-2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
source "$SRC_BASE/build-tools/spec-utils"
|
||||
source "$SRC_BASE/build-tools/srpm-utils"
|
||||
|
||||
CUR_DIR=`pwd`
|
||||
BUILD_DIR="$RPMBUILD_BASE"
|
||||
|
||||
if [ "x$DATA" == "x" ]; then
|
||||
echo "ERROR: default_build_srpm (${LINENO}): Environment variable 'DATA' not defined."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
srpm_source_build_data "$DATA" "$SRC_BUILD_TYPE" "$SRPM_OR_SPEC_PATH"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: default_build_srpm (${LINENO}): Failed to source build data from $DATA"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "x$PBR_VERSION" != "x" ] && [ "x$PBR_VERSION" != "xNA" ]; then
|
||||
VERSION=$PBR_VERSION
|
||||
fi
|
||||
|
||||
if [ "x$VERSION" == "x" ]; then
|
||||
for SPEC in `find $SPECS_BASE -name '*.spec' | sort -V`; do
|
||||
SPEC_PATH="$SPEC"
|
||||
|
||||
VERSION_DERIVED=`spec_evaluate '%{version}' "$SPEC_PATH" 2>> /dev/null`
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: default_build_srpm (${LINENO}): '%{version}' not found in '$PKG_BASE/$SPEC_PATH'"
|
||||
VERSION_DERIVED=""
|
||||
fi
|
||||
|
||||
if [ "x$VERSION_DERIVED" != "x" ]; then
|
||||
if [ "x$VERSION" == "x" ]; then
|
||||
VERSION=$VERSION_DERIVED
|
||||
else
|
||||
if [ "x$SRC_DIR" != "x" ]; then
|
||||
echo "ERROR: default_build_srpm (${LINENO}): multiple spec files found, can't set VERSION automatically"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "x$VERSION" == "x" ]; then
|
||||
if [ -f $SRC_DIR/PKG-INFO ]; then
|
||||
VERSION=$(grep '^Version:' $SRC_DIR/PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//')
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "x$VERSION" != "x" ]; then
|
||||
echo "Derived VERSION=$VERSION"
|
||||
else
|
||||
echo "ERROR: default_build_srpm (${LINENO}): Failed to derive a good VERSION from SPEC file, and none provided."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "x$TAR_NAME" == "x" ]; then
|
||||
for SPEC in `find $SPECS_BASE -name '*.spec' | sort -V`; do
|
||||
SPEC_PATH="$SPEC"
|
||||
|
||||
SERVICE=`spec_find_global service "$SPEC_PATH" 2>> /dev/null`
|
||||
if [ $? -eq 0 ]; then
|
||||
if [ "x$TAR_NAME" == "x" ]; then
|
||||
TAR_NAME=$SERVICE
|
||||
else
|
||||
if [ "x$SRC_DIR" != "x" ]; then
|
||||
echo "ERROR: default_build_srpm (${LINENO}): multiple spec files found, can't set TAR_NAME automatically"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
NAME=`spec_find_tag Name "$SPEC_PATH" 2>> /dev/null`
|
||||
if [ $? -eq 0 ]; then
|
||||
if [ "x$TAR_NAME" == "x" ]; then
|
||||
TAR_NAME=$NAME
|
||||
else
|
||||
if [ "x$SRC_DIR" != "x" ]; then
|
||||
echo "ERROR: default_build_srpm (${LINENO}): multiple spec files found, can't set TAR_NAME automatically"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "WARNING: default_build_srpm (${LINENO}): 'Name' not found in '$SPEC_PATH'"
|
||||
NAME=""
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "x$TAR_NAME" == "x" ]; then
|
||||
if [ -f $SRC_DIR/PKG-INFO ]; then
|
||||
TAR_NAME=$(grep '^Name:' $SRC_DIR/PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//')
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "x$TAR_NAME" != "x" ]; then
|
||||
echo "Derived TAR_NAME=$TAR_NAME"
|
||||
else
|
||||
echo "ERROR: default_build_srpm (${LINENO}): Failed to derive a good TAR_NAME from SPEC file, and none provided."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "x$TAR" == "x" ]; then
|
||||
TAR="$TAR_NAME-$VERSION.tar.gz"
|
||||
fi
|
||||
|
||||
SOURCE_PATH="$BUILD_DIR/SOURCES"
|
||||
TAR_PATH="$SOURCE_PATH/$TAR"
|
||||
STAGING=""
|
||||
|
||||
if [ "x$COPY_LIST_TO_TAR" != "x" ] || [ "x$EXCLUDE_LIST_FROM_TAR" != "x" ]; then
|
||||
STAGING="$BUILD_DIR/staging"
|
||||
mkdir -p $STAGING
|
||||
fi
|
||||
|
||||
mkdir -p "$BUILD_DIR/SRPMS"
|
||||
mkdir -p "$SOURCE_PATH"
|
||||
|
||||
if [ "x$SRC_DIR" == "x" -a "x$COPY_LIST" == "x" -a "$ALLOW_EMPTY_RPM" != "true" ]; then
|
||||
echo "ERROR: default_build_srpm (${LINENO}): '$PWD/$DATA' failed to provide at least one of 'SRC_DIR' or 'COPY_LIST'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "x$SRC_DIR" != "x" ]; then
|
||||
if [ ! -d "$SRC_DIR" ]; then
|
||||
echo "ERROR: default_build_srpm (${LINENO}): directory not found: '$SRC_DIR'"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "x$COPY_LIST" != "x" ]; then
|
||||
echo "COPY_LIST: $COPY_LIST"
|
||||
for p in $COPY_LIST; do
|
||||
# echo "COPY_LIST: $p"
|
||||
\cp -L -u -r -v $p $SOURCE_PATH
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: default_build_srpm (${LINENO}): COPY_LIST: file not found: '$p'"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ "x$STAGING" != "x" ]; then
|
||||
\cp -L -u -r -v $SRC_DIR $STAGING
|
||||
echo "COPY_LIST_TO_TAR: $COPY_LIST_TO_TAR"
|
||||
for p in $COPY_LIST_TO_TAR; do
|
||||
# echo "COPY_LIST_TO_TAR: $p"
|
||||
\cp -L -u -r -v $p $STAGING/$SRC_DIR
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: default_build_srpm (${LINENO}): COPY_LIST_TO_TAR: file not found: '$p'"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo "EXCLUDE_LIST_FROM_TAR: $EXCLUDE_LIST_FROM_TAR"
|
||||
for p in $EXCLUDE_LIST_FROM_TAR; do
|
||||
# echo "EXCLUDE_LIST_FROM_TAR: $p"
|
||||
echo "rm -rf $STAGING/$SRC_DIR/$p"
|
||||
\rm -rf $STAGING/$SRC_DIR/$p
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: default_build_srpm (${LINENO}): EXCLUDE_LIST_FROM_TAR: could not remove file: '$p'"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
fi
|
||||
|
||||
TRANSFORM=`echo "$SRC_DIR" | sed 's/^\./\\./' | sed 's:^/::' | sed 's#^.*/\.\./##'`
|
||||
|
||||
if [ "x$STAGING" != "x" ]; then
|
||||
pushd $STAGING
|
||||
fi
|
||||
|
||||
TAR_NEEDED=0
|
||||
if [ "x$SRC_DIR" != "x" ]; then
|
||||
echo "SRC_DIR=$SRC_DIR"
|
||||
if [ -f $TAR_PATH ]; then
|
||||
n=`find . -cnewer $TAR_PATH -and ! -path './.git*' \
|
||||
-and ! -path './build/*' \
|
||||
-and ! -path './.pc/*' \
|
||||
-and ! -path './patches/*' \
|
||||
-and ! -path "./$DISTRO/*" \
|
||||
-and ! -path './pbr-*.egg/*' \
|
||||
| wc -l`
|
||||
if [ $n -gt 0 ]; then
|
||||
TAR_NEEDED=1
|
||||
fi
|
||||
else
|
||||
TAR_NEEDED=1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $TAR_NEEDED -gt 0 ]; then
|
||||
echo "Creating tar file: $TAR_PATH ..."
|
||||
echo "tar --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude='$SRC_DIR/$DISTRO' --exclude='pbr-*.egg' --transform 's,^$TRANSFORM,$TAR_NAME-$VERSION,' -czf $TAR_PATH $SRC_DIR"
|
||||
tar --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude="$SRC_DIR/$DISTRO" --exclude='pbr-*.egg' --transform "s,^$TRANSFORM,$TAR_NAME-$VERSION," -czf "$TAR_PATH" "$SRC_DIR"
|
||||
if [ $? -ne 0 ]; then
|
||||
if [ "x$STAGING" != "x" ]; then
|
||||
popd
|
||||
fi
|
||||
|
||||
echo "ERROR: default_build_srpm (${LINENO}): failed to create tar file, cmd: tar --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude="$SRC_DIR/$DISTRO" --exclude='pbr-*.egg' --transform \"s,^$TRANSFORM,$TAR_NAME-$VERSION,\" -czf '$TAR_PATH' '$SRC_DIR'"
|
||||
exit 1
|
||||
fi
|
||||
echo "Created tar file: $TAR_PATH"
|
||||
else
|
||||
echo "Tar file not needed."
|
||||
fi
|
||||
|
||||
if [ "x$STAGING" != "x" ]; then
|
||||
popd
|
||||
fi
|
||||
|
||||
if [ ! -d $BUILD_DIR/SPECS ]; then
|
||||
echo "Spec directory '$BUILD_DIR/SPECS' does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $(ls -1 $BUILD_DIR/SPECS/*.spec | wc -l) -eq 0 ]; then
|
||||
echo "No spec files found in spec directory '$BUILD_DIR/SPECS'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for SPEC in `ls -1 $BUILD_DIR/SPECS`; do
|
||||
SPEC_PATH="$BUILD_DIR/SPECS/$SPEC"
|
||||
RELEASE=`spec_find_tag Release "$SPEC_PATH" 2>> /dev/null`
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: default_build_srpm (${LINENO}): 'Release' not found in '$SPEC_PATH'"
|
||||
fi
|
||||
NAME=`spec_find_tag Name "$SPEC_PATH" 2>> /dev/null`
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: default_build_srpm (${LINENO}): 'Name' not found in '$SPEC_PATH'"
|
||||
fi
|
||||
SRPM="$NAME-$VERSION-$RELEASE.src.rpm"
|
||||
SRPM_PATH="$BUILD_DIR/SRPMS/$SRPM"
|
||||
|
||||
spec_validate_tis_release $SPEC_PATH
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "TIS Validation of $SPEC_PATH failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BUILD_NEEDED=0
|
||||
if [ -f $SRPM_PATH ]; then
|
||||
n=`find . -cnewer $SRPM_PATH | wc -l`
|
||||
if [ $n -gt 0 ]; then
|
||||
BUILD_NEEDED=1
|
||||
fi
|
||||
else
|
||||
BUILD_NEEDED=1
|
||||
fi
|
||||
|
||||
if [ $BUILD_NEEDED -gt 0 ]; then
|
||||
echo "SPEC file: $SPEC_PATH"
|
||||
echo "SRPM build directory: $BUILD_DIR"
|
||||
echo "TIS_PATCH_VER: $TIS_PATCH_VER"
|
||||
echo "PBR_VERSION: $PBR_VERSION"
|
||||
|
||||
sed -i -e "1 i%define _tis_build_type $BUILD_TYPE" $SPEC_PATH
|
||||
sed -i -e "1 i%define tis_patch_ver $TIS_PATCH_VER" $SPEC_PATH
|
||||
sed -i -e "1 i%define pbr_version $PBR_VERSION" $SPEC_PATH
|
||||
rpmbuild -bs $SPEC_PATH --define="%_topdir $BUILD_DIR" --undefine=dist --define="_tis_dist .tis"
|
||||
else
|
||||
echo "SRPM build not needed"
|
||||
fi
|
||||
done
|
||||
|
||||
|
@ -1,59 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
for r in $(find $MY_WORKSPACE/*/rpmbuild/RPMS -name '*.rpm'); do
|
||||
f=$(basename $r)
|
||||
find $MY_WORKSPACE/export/dist/isolinux/Packages | grep $f >> /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
continue
|
||||
fi
|
||||
n=$(rpm -q --qf='%{NAME}\n' -p $r)
|
||||
d=$(dirname $r)
|
||||
# echo "f=$f"
|
||||
for f in $(rpm -q -p -l $r | grep '[.]ko$' | head -n 1); do
|
||||
FOUND=0
|
||||
s=$(rpm -q --info -p $r | grep 'Source RPM :' | awk -F: '{print $2}' | tr -d '[[:space:]]')
|
||||
NAME=$(rpm -q --qf='%{NAME}\n' -p $d/$s)
|
||||
# echo "NAME=$NAME"
|
||||
for s2 in $(find $MY_WORKSPACE/*/rpmbuild/SRPMS -name "$NAME-[0-9]*.src.rpm"); do
|
||||
NAME2=$(rpm -q --qf='%{NAME}\n' -p $s2)
|
||||
# echo "NAME2=$NAME2"
|
||||
if [ "${NAME}" == "${NAME2}" ]; then
|
||||
echo $NAME | grep '[-]rt' >> /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo $NAME
|
||||
FOUND=1
|
||||
break
|
||||
fi
|
||||
# SIMPLE_NAME=$(echo $NAME | sed 's#-kmod##' | sed 's#-kernel##' | sed 's#^kernel$#linux#' | sed 's#^kernel-rt$#linux-rt#')
|
||||
SIMPLE_NAME=$(echo $NAME | sed 's#^kernel$#linux#' | sed 's#^kernel-rt$#linux-rt#')
|
||||
# echo "SIMPLE_NAME=$SIMPLE_NAME"
|
||||
grep "[/]$SIMPLE_NAME$" $(for g in $(find $MY_REPO -type d -name .git); do d=$(dirname $g); find $d -name 'centos_pkg_dirs*'; done) >> /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
echo $NAME
|
||||
FOUND=1
|
||||
break
|
||||
fi
|
||||
SIMPLE_NAME=$(echo $NAME | sed 's#-rt$##' )
|
||||
# echo "SIMPLE_NAME=$SIMPLE_NAME"
|
||||
grep "[/]$SIMPLE_NAME$" $(for g in $(find $MY_REPO -type d -name .git); do d=$(dirname $g); find $d -name 'centos_pkg_dirs*'; done) >> /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
echo $SIMPLE_NAME
|
||||
FOUND=1
|
||||
break
|
||||
fi
|
||||
SIMPLE_NAME2=$(echo $SIMPLE_NAME | sed 's#-kmod##' )
|
||||
# echo "SIMPLE_NAME2=$SIMPLE_NAME2"
|
||||
grep "[/-]$SIMPLE_NAME2$" $(for g in $(find $MY_REPO -type d -name .git); do d=$(dirname $g); find $d -name 'centos_pkg_dirs*'; done) >> /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
echo $SIMPLE_NAME
|
||||
FOUND=1
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [ $FOUND -eq 1 ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
# done
|
||||
done | sort --unique
|
@ -1,54 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
for f in `find $MY_REPO -name srpm_path`; do
|
||||
orig_line=`cat $f`
|
||||
first=`echo $orig_line | awk -F : '{print $1}'`
|
||||
orig_path="/import/mirrors/$orig_line"
|
||||
if [ "$first" == "mirror" ]; then
|
||||
orig_path="/import/mirrors/"$(echo $orig_line | awk -F : '{print $2}');
|
||||
fi
|
||||
if [ "$first" == "repo" ]; then
|
||||
orig_path="$MY_REPO/"$(echo $orig_line | awk -F : '{print $2}')
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ ! -f $orig_path ]; then
|
||||
echo "ERROR: bad srpm path: '$orig_path' derived from '$f'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
orig_dir=$(dirname $orig_path)
|
||||
repodata_dir=$orig_dir/repodata
|
||||
if [ ! -d $repodata_dir ]; then
|
||||
repodata_dir=$orig_dir/../repodata
|
||||
if [ ! -d $repodata_dir ]; then
|
||||
repodata_dir=$orig_dir/../../repodata
|
||||
if [ ! -d $repodata_dir ]; then
|
||||
echo "ERROR: couldn't find repodata for '$orig_path'"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# echo "'$orig_path' -> '$repodata_dir'"
|
||||
name=$(rpm -q --queryformat '%{NAME}\n' -p $orig_path 2>> /dev/null)
|
||||
version=$(rpm -q --queryformat '%{VERSION}\n' -p $orig_path 2>> /dev/null)
|
||||
release=$(rpm -q --queryformat '%{RELEASE}\n' -p $orig_path 2>> /dev/null)
|
||||
orig_name=$(basename $orig_path)
|
||||
best_name="$orig_name"
|
||||
for n in `find $orig_dir -name $name-*`; do
|
||||
if [ "$n" != "$orig_path" ]; then
|
||||
new_name=$(rpm -q --queryformat '%{NAME}\n' -p $n)
|
||||
if [ "$name" == "$new_name" ]; then
|
||||
rpmdev-vercmp $(basename $n) $best_name >> /dev/null
|
||||
if [ $? -eq 11 ]; then
|
||||
best_name=$(basename $n)
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [ "$best_name" != "$orig_name" ]; then
|
||||
echo "$f: $orig_name ==> $best_name"
|
||||
fi
|
||||
done
|
||||
|
@ -39,14 +39,14 @@
|
||||
# list-of-image-record-files: one or more files containing image records
|
||||
#
|
||||
# e.g.
|
||||
# cat $MY_WORKSPACE/std/build-images/images-centos-stable-versioned.lst
|
||||
# docker.io/starlingx/stx-keystone-api-proxy:master-centos-stable-20200811T002300Z.0
|
||||
# docker.io/starlingx/stx-nova-client:master-centos-stable-20200811T002300Z.0
|
||||
# cat $MY_WORKSPACE/std/build-images/images-debian-stable-versioned.lst
|
||||
# docker.io/starlingx/stx-keystone-api-proxy:master-debian-stable-20200811T002300Z.0
|
||||
# docker.io/starlingx/stx-nova-client:master-debian-stable-20200811T002300Z.0
|
||||
# ...
|
||||
#
|
||||
# Sample usage:
|
||||
# helm_chart_modify.py <input-yaml-file> <output-yaml-file> \
|
||||
# $MY_WORKSPACE/std/build-images/images-centos-stable-versioned.lst
|
||||
# $MY_WORKSPACE/std/build-images/images-debian-stable-versioned.lst
|
||||
|
||||
import collections
|
||||
import sys
|
||||
|
@ -50,7 +50,7 @@ get_bsp_dir () {
|
||||
# Parameters:
|
||||
# build_target: One of 'iso', 'guest' ...
|
||||
# list_type: One of 'std', 'dev', 'layer'
|
||||
# distro: One of 'centos', ...
|
||||
# distro: One of 'debian', ...
|
||||
# layer: One of 'compiler', 'distro', 'flock', ...
|
||||
# Only required if list_type == layer
|
||||
#
|
||||
@ -68,7 +68,7 @@ image_inc_list () {
|
||||
if [ "${list_type}" = "layer" ]; then
|
||||
local required_layer_cfg_name="required_layer_${build_target}_inc.cfg"
|
||||
local layer_cfg_name="${distro}_build_layer.cfg"
|
||||
local root_dir="${MY_REPO}/../stx-tools/centos-mirror-tools/config/${distro}/${layer}"
|
||||
local root_dir="${MY_REPO}/../stx-tools/${distro}-mirror-tools/config/${distro}/${layer}"
|
||||
local layer_cfgs=""
|
||||
|
||||
layer_cfgs=$(find $(for x in $GIT_LIST; do echo $x/; done) -maxdepth 1 -name ${layer_cfg_name})
|
||||
|
@ -1,523 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import csv
|
||||
import os
|
||||
import rpm
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import getopt
|
||||
|
||||
|
||||
class BinPackage(object):
|
||||
def __init__(self, path, ts):
|
||||
fdno = os.open(path, os.O_RDONLY)
|
||||
hdr = ts.hdrFromFdno(path)
|
||||
os.close(fdno)
|
||||
|
||||
self.source = hdr[rpm.RPMTAG_SOURCERPM]
|
||||
self.desc = hdr[rpm.RPMTAG_DESCRIPTION].replace('\n', ' ')
|
||||
self.dirname = os.path.dirname(path)
|
||||
self.filename = os.path.basename(path)
|
||||
self.path = path
|
||||
self.kernel_module = False
|
||||
self.name = hdr[rpm.RPMTAG_NAME]
|
||||
|
||||
# Does the package contain kernel modules?
|
||||
for filename in hdr[rpm.RPMTAG_BASENAMES]:
|
||||
assert isinstance(filename, basestring)
|
||||
if filename.endswith('.ko'):
|
||||
self.kernel_module = True
|
||||
break
|
||||
|
||||
|
||||
class SrcPackage(object):
|
||||
def __init__(self, path=None):
|
||||
self.bin_pkg = None
|
||||
self.original_src = None
|
||||
self.sha = 'SHA'
|
||||
if path is None:
|
||||
self.filename = None
|
||||
self.path = None
|
||||
else:
|
||||
self.filename = os.path.basename(path)
|
||||
self.path = path
|
||||
ts = rpm.TransactionSet()
|
||||
ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES)
|
||||
fdno = os.open(self.path, os.O_RDONLY)
|
||||
hdr = ts.hdrFromFdno(self.path)
|
||||
os.close(fdno)
|
||||
self.desc = hdr[rpm.RPMTAG_DESCRIPTION].replace('\n', ' ')
|
||||
self.version = hdr[rpm.RPMTAG_VERSION] + '-' + hdr[rpm.RPMTAG_RELEASE]
|
||||
self.licences = hdr[rpm.RPMTAG_LICENSE]
|
||||
self.name = hdr[rpm.RPMTAG_NAME]
|
||||
self.url = hdr[rpm.RPMTAG_URL]
|
||||
|
||||
self.modified = None
|
||||
self.kernel_module = False
|
||||
self.disclosed_by = 'Jason McKenna'
|
||||
self.shipped_as = 'Binary'
|
||||
self.origin = 'Unknown'
|
||||
self.notes = ''
|
||||
self.wrs = False
|
||||
|
||||
def __lt__(self, other):
|
||||
me = self.name.lower()
|
||||
them = other.name.lower()
|
||||
if me == them:
|
||||
return self.name < other.name
|
||||
else:
|
||||
return me < them
|
||||
|
||||
|
||||
class IPReport(object):
|
||||
__KNOWN_PATHS = [
|
||||
# CentOS 7.4
|
||||
['/import/mirrors/CentOS/7.4.1708/os/Source/SPackages',
|
||||
'http://vault.centos.org/7.4.1708/os/Source/SPackages'],
|
||||
['/import/mirrors/CentOS/vault.centos.org/7.4.1708/updates/Source/SPackages',
|
||||
'http://vault.centos.org/7.4.1708/updates/Source/SPackages'],
|
||||
['/import/mirrors/CentOS/vault.centos.org/7.4.1708/cloud/Source/openstack-newton/common',
|
||||
'http://vault.centos.org/7.4.1708/cloud/Source/openstack-newton/common'],
|
||||
['/import/mirrors/CentOS/vault.centos.org/7.4.1708/cloud/Source/openstack-newton',
|
||||
'http://vault.centos.org/7.4.1708/cloud/Source/openstack-newton'],
|
||||
['/import/mirrors/CentOS/vault.centos.org/7.4.1708/cloud/Source/openstack-mitaka/common',
|
||||
'http://vault.centos.org/7.4.1708/cloud/Source/openstack-mitaka/common'],
|
||||
['/import/mirrors/CentOS/vault.centos.org/7.4.1708/cloud/Source/openstack-mitaka',
|
||||
'http://vault.centos.org/7.4.1708/cloud/Source/openstack-mitaka'],
|
||||
['/import/mirrors/CentOS/7.4.1708/extras/Source/SPackages',
|
||||
'http://vault.centos.org/7.4.1708/extras/Source/SPackages'],
|
||||
# CentOS 7.3
|
||||
['/import/mirrors/CentOS/7.3.1611/os/Source/SPackages',
|
||||
'http://vault.centos.org/7.3.1611/os/Source/SPackages'],
|
||||
['/import/mirrors/CentOS/vault.centos.org/7.3.1611/updates/Source/SPackages',
|
||||
'http://vault.centos.org/7.3.1611/updates/Source/SPackages'],
|
||||
['/import/mirrors/CentOS/vault.centos.org/7.3.1611/cloud/Source/openstack-newton/common',
|
||||
'http://vault.centos.org/7.3.1611/cloud/Source/openstack-newton/common'],
|
||||
['/import/mirrors/CentOS/vault.centos.org/7.3.1611/cloud/Source/openstack-newton',
|
||||
'http://vault.centos.org/7.3.1611/cloud/Source/openstack-newton'],
|
||||
['/import/mirrors/CentOS/vault.centos.org/7.3.1611/cloud/Source/openstack-mitaka/common',
|
||||
'http://vault.centos.org/7.3.1611/cloud/Source/openstack-mitaka/common'],
|
||||
['/import/mirrors/CentOS/vault.centos.org/7.3.1611/cloud/Source/openstack-mitaka',
|
||||
'http://vault.centos.org/7.3.1611/cloud/Source/openstack-mitaka'],
|
||||
['/import/mirrors/CentOS/7.3.1611/extras/Source/SPackages',
|
||||
'http://vault.centos.org/7.3.1611/extras/Source/SPackages'],
|
||||
# CentOS 7.2
|
||||
['/import/mirrors/CentOS/7.2.1511/os/Source/SPackages', 'http://vault.centos.org/7.2.1511/os/Source/SPackages'],
|
||||
['/import/mirrors/CentOS/vault.centos.org/7.2.1511/updates/Source/SPackages',
|
||||
'http://vault.centos.org/7.2.1511/updates/Source/SPackages'],
|
||||
['/import/mirrors/CentOS/vault.centos.org/7.2.1511/cloud/Source/openstack-mitaka/common',
|
||||
'http://vault.centos.org/7.2.1511/cloud/Source/openstack-mitaka/common'],
|
||||
['/import/mirrors/CentOS/vault.centos.org/7.2.1511/cloud/Source/openstack-mitaka',
|
||||
'http://vault.centos.org/7.2.1511/cloud/Source/openstack-mitaka'],
|
||||
['/import/mirrors/CentOS/7.2.1511/extras/Source/SPackages',
|
||||
'http://vault.centos.org/7.2.1511/extras/Source/SPackages'],
|
||||
['/import/mirrors/CentOS/tis-r4-CentOS/newton/Source', 'Unknown'],
|
||||
['/import/mirrors/CentOS/tis-r4-CentOS/tis-r4-3rd-Party', 'Unknown']
|
||||
|
||||
]
|
||||
|
||||
def __init__(self, workspace=None, repo=None):
|
||||
self.workspace = None
|
||||
self.repo = None
|
||||
self.shipped_binaries = list()
|
||||
self.built_binaries = list()
|
||||
self.check_env()
|
||||
if workspace is not None:
|
||||
self.workspace = workspace
|
||||
if repo is not None:
|
||||
self.repo = repo
|
||||
|
||||
# Generate a list of binaries that we shipped
|
||||
for filename in os.listdir(self.workspace + '/export/dist/isolinux/Packages'):
|
||||
if filename.endswith('rpm'):
|
||||
self.shipped_binaries.append(filename)
|
||||
|
||||
# Generate a list of binaries that we built ourselves
|
||||
for build in ['rt', 'std']:
|
||||
for filename in os.listdir(self.workspace + '/' + build + '/rpmbuild/RPMS/'):
|
||||
if filename.endswith('rpm'):
|
||||
self.built_binaries.append(filename)
|
||||
|
||||
print('Looking up packages for which we have source...')
|
||||
self.original_src_pkgs = dict()
|
||||
self.build_original_src_pkgs()
|
||||
print('Looking up packages we built...')
|
||||
self.built_src_pkgs = dict()
|
||||
self.build_built_src_pkgs()
|
||||
print('Looking up packages we built...')
|
||||
self.hardcoded_lookup_dict = dict()
|
||||
self.build_hardcoded_lookup_dict()
|
||||
|
||||
def build_hardcoded_lookup_dict(self):
|
||||
with open(self.repo + '/build-tools/source_lookup.txt', 'r') as lookup_file:
|
||||
for line in lookup_file:
|
||||
line = line.rstrip()
|
||||
words = line.split()
|
||||
if (words is not None) and (len(words) >= 2):
|
||||
self.hardcoded_lookup_dict[words[1]] = (words[0], False)
|
||||
|
||||
with open(self.repo + '/build-tools/wrs_orig.txt', 'r') as lookup_file:
|
||||
for line in lookup_file:
|
||||
line = line.rstrip()
|
||||
words = line.split()
|
||||
if (words is not None) and (len(words) >= 1):
|
||||
self.hardcoded_lookup_dict[words[0]] = ('No download', True)
|
||||
|
||||
@staticmethod
|
||||
def path_to_origin(filepath):
|
||||
for path in IPReport.__KNOWN_PATHS:
|
||||
if filepath.startswith(path[0]) and (not path[1].lower().startswith('unknown')):
|
||||
return path[1] + '/' + os.path.basename(filepath)
|
||||
return 'Unknown'
|
||||
|
||||
def hardcoded_lookup(self, package_name):
|
||||
if package_name in self.hardcoded_lookup_dict.keys():
|
||||
return self.hardcoded_lookup_dict[package_name]
|
||||
return None, False
|
||||
|
||||
def check_env(self):
|
||||
if 'MY_WORKSPACE' in os.environ:
|
||||
self.workspace = os.environ['MY_WORKSPACE']
|
||||
else:
|
||||
print('Could not find $MY_WORKSPACE')
|
||||
raise IOError('Could not fine $MY_WORKSPACE')
|
||||
|
||||
if 'MY_REPO' in os.environ:
|
||||
self.repo = os.environ['MY_REPO']
|
||||
else:
|
||||
print('Could not find $MY_REPO')
|
||||
raise IOError('Could not fine $MY_REPO')
|
||||
|
||||
def do_bin_pkgs(self):
|
||||
print('Gathering binary package information')
|
||||
self.read_bin_pkgs()
|
||||
|
||||
def read_bin_pkgs(self):
|
||||
self.bin_pkgs = list()
|
||||
ts = rpm.TransactionSet()
|
||||
ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES)
|
||||
for filename in self.shipped_binaries:
|
||||
if filename.endswith('rpm'):
|
||||
bin_pkg = BinPackage(self.workspace + '/export/dist/isolinux/Packages/' + filename, ts)
|
||||
self.bin_pkgs.append(bin_pkg)
|
||||
|
||||
def do_src_report(self, copy_packages=False, do_wrs=True, delta_file=None, output_path=None, strip_unchanged=False):
|
||||
self.bin_to_src()
|
||||
self.src_pkgs.sort()
|
||||
|
||||
if delta_file is not None:
|
||||
self.delta(delta_file)
|
||||
|
||||
if output_path is None:
|
||||
output_path = self.workspace + '/export/ip_report'
|
||||
|
||||
# Create output dir (if required)
|
||||
if not os.path.exists(output_path):
|
||||
os.makedirs(output_path)
|
||||
|
||||
# Create paths for RPMs (if required)
|
||||
if copy_packages:
|
||||
if not os.path.exists(output_path + '/non_wrs'):
|
||||
shutil.rmtree(output_path + '/non_wrs', True)
|
||||
os.makedirs(output_path + '/non_wrs')
|
||||
if do_wrs:
|
||||
shutil.rmtree(output_path + '/wrs', True)
|
||||
os.makedirs(output_path + '/wrs')
|
||||
|
||||
with open(output_path + '/srcreport.csv', 'wb') as src_report_file:
|
||||
src_report_writer = csv.writer(src_report_file)
|
||||
|
||||
# Write header row
|
||||
src_report_writer.writerow(
|
||||
['Package File', 'File Name', 'Package Name', 'Version', 'SHA1', 'Disclosed By',
|
||||
'Description', 'Part Of (Runtime, Host, Both)', 'Modified (Yes, No)', 'Hardware Interfacing (Yes, No)',
|
||||
'License(s) Found', 'Package Download URL', 'Kernel module', 'Notes'])
|
||||
|
||||
for src_pkg in self.src_pkgs:
|
||||
if src_pkg.modified:
|
||||
modified_string = 'Yes'
|
||||
else:
|
||||
modified_string = 'No'
|
||||
if src_pkg.kernel_module:
|
||||
kmod_string = 'Yes'
|
||||
else:
|
||||
kmod_string = 'No'
|
||||
|
||||
# Copy the pacakge and get the SHA
|
||||
if copy_packages:
|
||||
if src_pkg.wrs is False:
|
||||
shutil.copyfile(src_pkg.path, output_path + '/non_wrs/' + src_pkg.filename)
|
||||
shasumout = subprocess.check_output(
|
||||
['shasum', output_path + '/non_wrs/' + src_pkg.filename]).split()[0]
|
||||
src_pkg.sha = shasumout
|
||||
if strip_unchanged and (src_pkg.notes.lower().startswith('unchanged')):
|
||||
os.remove(output_path + '/non_wrs/' + src_pkg.filename)
|
||||
else:
|
||||
if do_wrs:
|
||||
shutil.copyfile(src_pkg.path, output_path + '/wrs/' + src_pkg.filename)
|
||||
shasumout = subprocess.check_output(
|
||||
['shasum', output_path + '/wrs/' + src_pkg.filename]).split()[0]
|
||||
src_pkg.sha = shasumout
|
||||
if strip_unchanged and (src_pkg.notes.lower().startswith('unchanged')):
|
||||
os.remove(output_path + '/wrs/' + src_pkg.filename)
|
||||
|
||||
if do_wrs or (src_pkg.wrs is False):
|
||||
src_report_writer.writerow(
|
||||
[src_pkg.filename, src_pkg.name, src_pkg.version, src_pkg.sha, src_pkg.disclosed_by,
|
||||
src_pkg.desc, 'Runtime', src_pkg.shipped_as, modified_string, 'No', src_pkg.licences,
|
||||
src_pkg.origin, kmod_string, src_pkg.notes])
|
||||
if 'unknown' in src_pkg.origin.lower():
|
||||
print(
|
||||
'Warning: Could not determine origin of ' + src_pkg.name + '. Please investigate/populate manually')
|
||||
|
||||
def bin_to_src(self):
|
||||
self.src_pkgs = list()
|
||||
src_pkg_names = list()
|
||||
for bin_pkg in self.bin_pkgs:
|
||||
if src_pkg_names.__contains__(bin_pkg.source):
|
||||
if bin_pkg.kernel_module:
|
||||
for src_pkg in self.src_pkgs:
|
||||
if src_pkg.filename == bin_pkg.source:
|
||||
src_pkg.kernel_module = True
|
||||
break
|
||||
|
||||
continue
|
||||
|
||||
# if we reach here, then the source package is not yet in our db.
|
||||
# we first search for the source package in the built-rpms
|
||||
if 'shim-signed' in bin_pkg.source:
|
||||
for tmp in self.built_src_pkgs:
|
||||
if 'shim-signed' in tmp:
|
||||
print('shim-signed hack -- ' + bin_pkg.source + ' to ' + tmp)
|
||||
bin_pkg.source = tmp
|
||||
break
|
||||
if 'shim-unsigned' in bin_pkg.source:
|
||||
for tmp in self.built_src_pkgs:
|
||||
if 'shim-0' in tmp:
|
||||
print('shim-unsigned hack -- ' + bin_pkg.source + ' to ' + tmp)
|
||||
bin_pkg.source = tmp
|
||||
break
|
||||
if 'grub2-efi-pxeboot' in bin_pkg.source:
|
||||
for tmp in self.built_src_pkgs:
|
||||
if 'grub2-2' in tmp:
|
||||
print('grub2-efi-pxeboot hack -- ' + bin_pkg.source + ' to ' + tmp)
|
||||
bin_pkg.source = tmp
|
||||
break
|
||||
|
||||
if bin_pkg.source in self.built_src_pkgs:
|
||||
src_pkg = self.built_src_pkgs[bin_pkg.source]
|
||||
src_pkg.modified = True
|
||||
|
||||
# First guess, we see if there's an original source with the source package name
|
||||
# (this is 99% of the cases)
|
||||
src_pkg_orig_name = src_pkg.name
|
||||
if src_pkg_orig_name in self.original_src_pkgs:
|
||||
src_pkg.original_src = self.original_src_pkgs[src_pkg_orig_name]
|
||||
src_pkg.origin = src_pkg.original_src.origin
|
||||
|
||||
else:
|
||||
src_pkg_path = self.locate_in_mirror(bin_pkg.source)
|
||||
if not os.path.isabs(src_pkg_path):
|
||||
continue
|
||||
src_pkg = SrcPackage(src_pkg_path)
|
||||
src_pkg.origin = IPReport.path_to_origin(src_pkg_path)
|
||||
src_pkg.modified = False
|
||||
|
||||
if bin_pkg.kernel_module:
|
||||
src_pkg.kernel_module = True
|
||||
|
||||
src_pkg_names.append(bin_pkg.source)
|
||||
self.src_pkgs.append(src_pkg)
|
||||
|
||||
if src_pkg.origin.lower() == 'unknown':
|
||||
if 'windriver' in src_pkg.licences.lower():
|
||||
src_pkg.origin = 'No download'
|
||||
else:
|
||||
if src_pkg.url is not None:
|
||||
src_pkg.origin = src_pkg.url
|
||||
|
||||
if 'unknown' in src_pkg.origin.lower():
|
||||
(orig, is_wrs) = self.hardcoded_lookup(src_pkg.name)
|
||||
if orig is not None:
|
||||
src_pkg.origin = orig
|
||||
src_pkg.wrs = is_wrs
|
||||
|
||||
if (src_pkg.origin.lower() == 'no download') and ('windriver' in src_pkg.licences.lower()):
|
||||
src_pkg.wrs = True
|
||||
|
||||
def locate_in_mirror(self, filename):
|
||||
""" takes an RPM filename and finds the full path of the file """
|
||||
|
||||
fullpath = None
|
||||
|
||||
# Old or new location of centos repo?
|
||||
if os.path.isdir(self.repo + '/centos-repo/'):
|
||||
filename = filename.replace('mirror:', self.repo + '/centos-repo/')
|
||||
elif os.path.isdir(self.repo + '/cgts-centos-repo/'):
|
||||
filename = filename.replace('mirror:', self.repo + '/cgcs-centos-repo/')
|
||||
else:
|
||||
filename = filename.replace('mirror:', self.repo + '/centos-repo/')
|
||||
|
||||
filename = filename.replace('repo:', self.repo + '/')
|
||||
|
||||
# At this point, filename could be a complete path (incl symlink), or just a filename
|
||||
best_guess = filename
|
||||
filename = os.path.basename(filename)
|
||||
|
||||
for path in IPReport.__KNOWN_PATHS:
|
||||
if os.path.exists(path[0] + '/' + filename):
|
||||
fullpath = path[0] + '/' + filename
|
||||
break
|
||||
|
||||
if fullpath is not None:
|
||||
return fullpath
|
||||
else:
|
||||
return best_guess
|
||||
|
||||
def build_original_src_pkgs(self):
|
||||
for root, dirs, files in os.walk(self.repo):
|
||||
for name in files:
|
||||
if name == 'srpm_path':
|
||||
with open(os.path.join(root, 'srpm_path'), 'r') as srpm_path_file:
|
||||
original_srpm_file = srpm_path_file.readline().rstrip()
|
||||
original_src_pkg_path = self.locate_in_mirror(original_srpm_file)
|
||||
original_src_pkg = SrcPackage(original_src_pkg_path)
|
||||
original_src_pkg.origin = IPReport.path_to_origin(original_src_pkg_path)
|
||||
self.original_src_pkgs[original_src_pkg.name] = original_src_pkg
|
||||
|
||||
def build_built_src_pkgs(self):
|
||||
""" Create a dict of any source package that we built ourselves """
|
||||
for build in ['std', 'rt']:
|
||||
for root, dirs, files in os.walk(self.workspace + '/' + build + '/rpmbuild/SRPMS'):
|
||||
for name in files:
|
||||
if name.endswith('.src.rpm'):
|
||||
built_src_pkg = SrcPackage(os.path.join(root, name))
|
||||
self.built_src_pkgs[built_src_pkg.filename] = built_src_pkg
|
||||
|
||||
def delta(self, orig_report):
|
||||
if orig_report is None:
|
||||
return
|
||||
delta_src_pkgs = self.read_last_report(orig_report)
|
||||
|
||||
for pkg in self.src_pkgs:
|
||||
if pkg.name in delta_src_pkgs:
|
||||
old_pkg = delta_src_pkgs[pkg.name]
|
||||
if old_pkg.version == pkg.version:
|
||||
pkg.notes = 'Unchanged'
|
||||
else:
|
||||
pkg.notes = 'New version'
|
||||
else:
|
||||
pkg.notes = 'New package'
|
||||
|
||||
def read_last_report(self, orig_report):
|
||||
orig_pkg_dict = dict()
|
||||
with open(orig_report, 'rb') as orig_report_file:
|
||||
orig_report_reader = csv.reader(orig_report_file)
|
||||
doneHeader = False
|
||||
for row in orig_report_reader:
|
||||
if (not doneHeader) and ('package file name' in row[0].lower()):
|
||||
doneHeader = True
|
||||
continue
|
||||
doneHeader = True
|
||||
orig_pkg = SrcPackage()
|
||||
orig_pkg.filename = row[0]
|
||||
orig_pkg.name = row[1]
|
||||
orig_pkg.version = row[2]
|
||||
# sha = row[3]
|
||||
orig_pkg.disclosed_by = row[4]
|
||||
orig_pkg.desc = row[5]
|
||||
# runtime = row[6]
|
||||
orig_pkg.shipped_as = row[7]
|
||||
if row[8].lower is 'yes':
|
||||
orig_pkg.modified = True
|
||||
else:
|
||||
orig_pkg.modifed = False
|
||||
# hardware interfacing = row[9]
|
||||
orig_pkg.licences = row[10]
|
||||
orig_pkg.origin = row[11]
|
||||
if row[12].lower is 'yes':
|
||||
orig_pkg.kernel_module = True
|
||||
else:
|
||||
orig_pkg.kernel_module = False
|
||||
orig_pkg_dict[orig_pkg.name] = orig_pkg
|
||||
|
||||
return orig_pkg_dict
|
||||
|
||||
|
||||
def main(argv):
|
||||
# handle command line arguments
|
||||
# -h/--help -- help
|
||||
# -n/--no-copy -- do not copy files (saves time)
|
||||
# -d/--delta= -- compare with an ealier report
|
||||
# -o/--output= -- output report/binaries to specified path
|
||||
# -w/--workspace= -- use specified workspace instead of $WORKSPACE
|
||||
# -r/--repo= -- use sepeciied repo instead of $MY_REPO
|
||||
# -s -- strip (remove) unchanged packages from copy out directory
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(argv, "hnd:o:w:r:s",
|
||||
["delta=", "help", "no-copy", "workspace=", "repo=", "output=", "--strip"])
|
||||
except getopt.GetoptError:
|
||||
# todo - output help
|
||||
sys.exit(2)
|
||||
delta_file = None
|
||||
do_copy = True
|
||||
workspace = None
|
||||
repo = None
|
||||
output_path = None
|
||||
strip_unchanged = False
|
||||
|
||||
for opt, arg in opts:
|
||||
if opt in ('-h', '--help'):
|
||||
print('usage:')
|
||||
print(' ip_report.py [options]')
|
||||
print(' Creates and IP report in $MY_WORKSPACE/export/ip_report ')
|
||||
print(' Source RPMs (both Wind River and non WR) are placed in subdirs within that path')
|
||||
print('')
|
||||
print('Options:')
|
||||
print(' -h/--help - this help')
|
||||
print(' -d <file>/--delta=<file> - create "notes" field, comparing report with a previous report')
|
||||
print(' -n/--no-copy - do not copy files into subdirs (this is faster, but means you')
|
||||
print(' don\'t get SHA sums for files)')
|
||||
print(' -w <path>/--workspace=<path> - use the specified path as workspace, instead of $MY_WORKSPACE')
|
||||
print(' -r <path>/--repo=<path> - use the specified path as repo, instead of $MY_REPO')
|
||||
print(' -o <path>/--output=<path> - output to specified path (instead of $MY_WORKSPACE/export/ip_report)')
|
||||
print(' -s/--strip - strip (remove) unchanged files if copied')
|
||||
exit()
|
||||
elif opt in ('-d', '--delta'):
|
||||
delta_file = os.path.normpath(arg)
|
||||
delta_file = os.path.expanduser(delta_file)
|
||||
if not os.path.exists(delta_file):
|
||||
print('Cannot locate ' + delta_file)
|
||||
exit(1)
|
||||
elif opt in ('-w', '--workspace'):
|
||||
workspace = os.path.normpath(arg)
|
||||
workspace = os.path.expanduser(workspace)
|
||||
elif opt in ('-r', '--repo'):
|
||||
repo = os.path.normpath(arg)
|
||||
repo = os.path.expanduser(repo)
|
||||
elif opt in ('-o', '--output'):
|
||||
output_path = os.path.normpath(arg)
|
||||
output_path = os.path.expanduser(output_path)
|
||||
elif opt in ('-n', '--no-copy'):
|
||||
do_copy = False
|
||||
elif opt in ('-s', '--strip-unchanged'):
|
||||
strip_unchanged = True
|
||||
|
||||
print('Doing IP report')
|
||||
if delta_file is not None:
|
||||
print('Delta from ' + delta_file)
|
||||
else:
|
||||
print('No delta specified')
|
||||
ip_report = IPReport(workspace=workspace, repo=repo)
|
||||
|
||||
ip_report.do_bin_pkgs()
|
||||
ip_report.do_src_report(copy_packages=do_copy,
|
||||
delta_file=delta_file,
|
||||
output_path=output_path,
|
||||
strip_unchanged=strip_unchanged)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[1:])
|
@ -1,343 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
## this script is called by "update-pxe-network-installer" and run in "sudo"
|
||||
## created by Yong Hu (yong.hu@intel.com), 05/24/2018
|
||||
|
||||
function clean_rootfs {
|
||||
rootfs_dir=$1
|
||||
echo "--> remove old files in original rootfs"
|
||||
conf="$(ls ${rootfs_dir}/etc/ld.so.conf.d/kernel-*.conf)"
|
||||
echo "conf basename = $(basename $conf)"
|
||||
old_version="tbd"
|
||||
if [ -f $conf ]; then
|
||||
old_version="$(echo $(basename $conf) | rev | cut -d'.' -f2- | rev | cut -d'-' -f2-)"
|
||||
fi
|
||||
echo "old version is $old_version"
|
||||
# remove old files in original initrd.img
|
||||
# do this in chroot to avoid accidentialy wrong operations on host root
|
||||
chroot $rootfs_dir /bin/bash -x <<EOF
|
||||
rm -rf ./boot/ ./etc/modules-load.d/
|
||||
if [ -n $old_version ] && [ -f ./etc/ld.so.conf.d/kernel-${old_version}.conf ]; then
|
||||
rm -rf ./etc/ld.so.conf.d/kernel-${old_version}.conf
|
||||
rm -rf ./lib/modules/${old_version}
|
||||
fi
|
||||
if [ -d ./usr/lib64/python2.7/site-packages/pyanaconda/ ];then
|
||||
rm -rf usr/lib64/python2.7/site-packages/pyanaconda/
|
||||
fi
|
||||
if [ -d ./usr/lib64/python2.7/site-packages/rpm/ ];then
|
||||
rm -rf usr/lib64/python2.7/site-packages/rpm/
|
||||
fi
|
||||
#find old .pyo files and delete them
|
||||
all_pyo="`find ./usr/lib64/python2.7/site-packages/pyanaconda/ usr/lib64/python2.7/site-packages/rpm/ -name *.pyo`"
|
||||
if [ -n $all ]; then
|
||||
for pyo in $all_pyo;do
|
||||
rm -f $pyo
|
||||
done
|
||||
fi
|
||||
exit
|
||||
EOF
|
||||
#back to previous folder
|
||||
}
|
||||
|
||||
|
||||
echo "This script makes new initrd.img, vmlinuz and squashfs.img."
|
||||
echo "NOTE: it has to be executed with *root*!"
|
||||
|
||||
if [ $# -lt 1 ];then
|
||||
echo "$0 <work_dir>"
|
||||
exit -1;
|
||||
fi
|
||||
|
||||
work_dir=$1
|
||||
output_dir=$work_dir/output
|
||||
if [ ! -d $output_dir ]; then
|
||||
mkdir -p $output_dir;
|
||||
fi
|
||||
|
||||
timestamp=$(date +%F_%H%M)
|
||||
|
||||
echo "---------------- start to make new initrd.img and vmlinuz -------------"
|
||||
ORIG_INITRD=$work_dir/orig/initrd.img
|
||||
if [ ! -f $ORIG_INITRD ];then
|
||||
echo "ERROR: $ORIG_INITRD does NOT exist!"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
kernel_rpms_dir=$work_dir/kernel-rpms
|
||||
if [ ! -d $kernel_rpms_dir ];then
|
||||
echo "ERROR: $kernel_rpms_dir does NOT exist!"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
firmware_rpms_dir=${work_dir}/firmware-rpms
|
||||
if [ ! -d ${firmware_rpms_dir} ];then
|
||||
echo "ERROR: ${firmware_rpms_dir} does NOT exist!"
|
||||
exit -1
|
||||
fi
|
||||
firmware_list_file=${work_dir}/firmware-list
|
||||
|
||||
|
||||
initrd_root=$work_dir/initrd.work
|
||||
if [ -d $initrd_root ];then
|
||||
rm -rf $initrd_root
|
||||
fi
|
||||
mkdir -p $initrd_root
|
||||
|
||||
cd $initrd_root
|
||||
# uncompress initrd.img
|
||||
echo "--> uncompress original initrd.img"
|
||||
/usr/bin/xzcat $ORIG_INITRD | cpio -i
|
||||
|
||||
echo "--> clean up $initrd_root"
|
||||
clean_rootfs $initrd_root
|
||||
|
||||
echo "--> extract files from new kernel and its modular rpms to initrd root"
|
||||
for kf in ${kernel_rpms_dir}/std/*.rpm ; do rpm2cpio $kf | cpio -idu; done
|
||||
|
||||
echo "--> extract files from new firmware rpms to initrd root"
|
||||
if [ -f ${firmware_list_file} ]; then
|
||||
echo "--> extract files from new firmware rpm to initrd root"
|
||||
firmware_list=`cat ${firmware_list_file}`
|
||||
for fw in ${firmware_rpms_dir}/std/*.rpm ; do rpm2cpio ${fw} | cpio -iduv ${firmware_list}; done
|
||||
fi
|
||||
|
||||
# by now new kernel and its modules exist!
|
||||
# find new kernel in /boot/vmlinuz-* or /lib/modules/*/vmlinuz
|
||||
echo "--> get new kernel image: vmlinuz"
|
||||
new_kernel="$(ls ./boot/vmlinuz-* 2>/dev/null || ls ./lib/modules/*/vmlinuz 2>/dev/null || true)"
|
||||
echo "New kernel: \"${new_kernel}\""
|
||||
if [ -f "${new_kernel}" ];then
|
||||
# copy out the new kernel
|
||||
if [ -f $output_dir/new-vmlinuz ]; then
|
||||
mv -f $output_dir/new-vmlinuz $output_dir/vmlinuz-backup-$timestamp
|
||||
fi
|
||||
cp -f $new_kernel $output_dir/new-vmlinuz
|
||||
|
||||
if echo "${new_kernel}" | grep -q '^\./boot/vmlinuz'; then
|
||||
kernel_name=$(basename $new_kernel)
|
||||
new_ver=$(echo $kernel_name | cut -d'-' -f2-)
|
||||
system_map="boot/System.map-${new_ver}"
|
||||
elif echo "${new_kernel}" | grep -q '^\./lib/modules/'; then
|
||||
new_ver="$(echo "${new_kernel}" | sed 's#^\./lib/modules/\([^/]\+\)/.*$#\1#')"
|
||||
system_map="lib/modules/${new_ver}/System.map"
|
||||
else
|
||||
echo "Unrecognized new kernel path: ${new_kernel}"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
if [ -z "${new_ver}" ]; then
|
||||
echo "Could not determine new kernel version"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
echo "New kernel version: ${new_ver}"
|
||||
|
||||
if ! [ -f "${system_map}" ]; then
|
||||
echo "Could not find System.map file at: ${system_map}"
|
||||
exit -1
|
||||
fi
|
||||
else
|
||||
echo "ERROR: new kernel is NOT found!"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
echo "-->check module dependencies in new initrd.img in chroot context"
|
||||
chroot $initrd_root /bin/bash -x <<EOF
|
||||
/usr/sbin/depmod -aeF "/${system_map}" "$new_ver"
|
||||
if [ $? == 0 ]; then echo "module dependencies are satisfied!" ; fi
|
||||
## Remove the biosdevname package!
|
||||
rm -f ./usr/lib/udev/rules.d/71-biosdevname.rules ./usr/sbin/biosdevname
|
||||
exit
|
||||
EOF
|
||||
|
||||
echo "-->patch usr/lib/net-lib.sh with IPv6 improvements from newer dracut"
|
||||
patch usr/lib/net-lib.sh <<EOF
|
||||
--- ../initrd.orig/usr/lib/net-lib.sh 2020-08-18 19:37:17.063163840 -0400
|
||||
+++ usr/lib/net-lib.sh 2020-08-19 09:47:15.237089800 -0400
|
||||
@@ -645,7 +645,8 @@
|
||||
timeout=\$((\$timeout*10))
|
||||
|
||||
while [ \$cnt -lt \$timeout ]; do
|
||||
- [ -z "\$(ip -6 addr show dev "\$1" scope link tentative)" ] \\
|
||||
+ [ -n "\$(ip -6 addr show dev "\$1" scope link)" ] \\
|
||||
+ && [ -z "\$(ip -6 addr show dev "\$1" scope link tentative)" ] \\
|
||||
&& return 0
|
||||
[ -n "\$(ip -6 addr show dev "\$1" scope link dadfailed)" ] \\
|
||||
&& return 1
|
||||
@@ -662,7 +663,9 @@
|
||||
timeout=\$((\$timeout*10))
|
||||
|
||||
while [ \$cnt -lt \$timeout ]; do
|
||||
- [ -z "\$(ip -6 addr show dev "\$1" tentative)" ] \\
|
||||
+ [ -n "\$(ip -6 addr show dev "\$1")" ] \\
|
||||
+ && [ -z "\$(ip -6 addr show dev "\$1" tentative)" ] \\
|
||||
+ && [ -n "\$(ip -6 route list proto ra dev "\$1" | grep ^default)" ] \\
|
||||
&& return 0
|
||||
[ -n "\$(ip -6 addr show dev "\$1" dadfailed)" ] \\
|
||||
&& return 1
|
||||
@@ -679,8 +682,9 @@
|
||||
timeout=\$((\$timeout*10))
|
||||
|
||||
while [ \$cnt -lt \$timeout ]; do
|
||||
- [ -z "\$(ip -6 addr show dev "\$1" tentative)" ] \\
|
||||
- && [ -n "\$(ip -6 route list proto ra dev "\$1")" ] \\
|
||||
+ [ -n "\$(ip -6 addr show dev "\$1")" ] \\
|
||||
+ && [ -z "\$(ip -6 addr show dev "\$1" tentative)" ] \\
|
||||
+ && [ -n "\$(ip -6 route list proto ra dev "\$1" | grep ^default)" ] \\
|
||||
&& return 0
|
||||
sleep 0.1
|
||||
cnt=\$((\$cnt+1))
|
||||
EOF
|
||||
|
||||
echo "-->patch usr/lib/dracut/hooks/pre-trigger/03-lldpad.sh with rd.fcoe disabling support"
|
||||
patch usr/lib/dracut/hooks/pre-trigger/03-lldpad.sh <<EOF
|
||||
--- ../initrd.orig/usr/lib/dracut/hooks/pre-trigger/03-lldpad.sh 2021-05-12 16:32:44.007007124 -0400
|
||||
+++ usr/lib/dracut/hooks/pre-trigger/03-lldpad.sh 2021-05-12 16:35:31.321509139 -0400
|
||||
@@ -1,5 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
+if ! getargbool 0 rd.fcoe -d -n rd.nofcoe; then
|
||||
+ info "rd.fcoe=0: skipping lldpad activation"
|
||||
+ return 0
|
||||
+fi
|
||||
+
|
||||
# Note lldpad will stay running after switchroot, the system initscripts
|
||||
# are to kill it and start a new lldpad to take over. Data is transfered
|
||||
# between the 2 using a shm segment
|
||||
EOF
|
||||
|
||||
echo "-->patch usr/lib/dracut/hooks/cmdline/99-parse-fcoe.sh with rd.fcoe disabling support"
|
||||
patch usr/lib/dracut/hooks/cmdline/99-parse-fcoe.sh <<EOF
|
||||
--- ../initrd.orig/usr/lib/dracut/hooks/cmdline/99-parse-fcoe.sh 2021-05-12 16:32:44.008007121 -0400
|
||||
+++ usr/lib/dracut/hooks/cmdline/99-parse-fcoe.sh 2021-05-12 16:36:56.874254504 -0400
|
||||
@@ -20,6 +20,10 @@
|
||||
# If it's not set we don't continue
|
||||
[ -z "$fcoe" ] && return
|
||||
|
||||
+if ! getargbool 0 rd.fcoe -d -n rd.nofcoe; then
|
||||
+ info "rd.fcoe=0: skipping fcoe"
|
||||
+ return 0
|
||||
+fi
|
||||
|
||||
# BRCM: Later, should check whether bnx2x is loaded first before loading bnx2fc so do not load bnx2fc when there are no Broadcom adapters
|
||||
[ -e /sys/bus/fcoe/ctlr_create ] || modprobe -b -a fcoe || die "FCoE requested but kernel/initrd does not support FCoE"
|
||||
EOF
|
||||
|
||||
echo "--> Rebuild the initrd"
|
||||
if [ -f $output_dir/new-initrd.img ]; then
|
||||
mv -f $output_dir/new-initrd.img $output_dir/initrd.img-backup-$timestamp
|
||||
fi
|
||||
find . | cpio -o -H newc | xz --check=crc32 --x86 --lzma2=dict=512KiB > $output_dir/new-initrd.img
|
||||
if [ $? != 0 ];then
|
||||
echo "ERROR: failed to create new initrd.img"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
cd $work_dir
|
||||
|
||||
if [ -f $output_dir/new-initrd.img ];then
|
||||
ls -l $output_dir/new-initrd.img
|
||||
else
|
||||
echo "ERROR: new-initrd.img is not generated!"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
if [ -f $output_dir/new-vmlinuz ];then
|
||||
ls -l $output_dir/new-vmlinuz
|
||||
else
|
||||
echo "ERROR: new-vmlinuz is not generated!"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
echo "---------------- start to make new squashfs.img -------------"
|
||||
ORIG_SQUASHFS=$work_dir/orig/squashfs.img
|
||||
if [ ! -f $ORIG_SQUASHFS ];then
|
||||
echo "ERROR: $ORIG_SQUASHFS does NOT exist!"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
rootfs_rpms_dir=$work_dir/rootfs-rpms
|
||||
if [ ! -d $rootfs_rpms_dir ];then
|
||||
echo "ERROR: $rootfs_rpms_dir does NOT exist!"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
# make squashfs.mnt and ready and umounted
|
||||
if [ ! -d $work_dir/squashfs.mnt ];then
|
||||
mkdir -p $work_dir/squashfs.mnt
|
||||
else
|
||||
# in case it was mounted previously
|
||||
mnt_path=$(mount | grep "squashfs.mnt" | cut -d' ' -f3-3)
|
||||
if [ x"$mnt_path" != "x" ] && [ "$(basename $mnt_path)" == "squashfs.mnt" ];then
|
||||
umount $work_dir/squashfs.mnt
|
||||
fi
|
||||
fi
|
||||
|
||||
# make squashfs.work ready and umounted
|
||||
squashfs_root="$work_dir/squashfs.work"
|
||||
# Now mount the rootfs.img file:
|
||||
if [ ! -d $squashfs_root ];then
|
||||
mkdir -p $squashfs_root
|
||||
else
|
||||
# in case it was mounted previously
|
||||
mnt_path=$(mount | grep "$(basename $squashfs_root)" | cut -d' ' -f3-3)
|
||||
if [ x"$mnt_path" != "x" ] && [ "$(basename $mnt_path)" == "$(basename $squashfs_root)" ];then
|
||||
umount $squashfs_root
|
||||
fi
|
||||
fi
|
||||
|
||||
echo $ORIG_SQUASHFS
|
||||
mount -o loop -t squashfs $ORIG_SQUASHFS $work_dir/squashfs.mnt
|
||||
|
||||
if [ ! -d ./LiveOS ]; then
|
||||
mkdir -p ./LiveOS
|
||||
fi
|
||||
|
||||
echo "--> copy rootfs.img from original squashfs.img to LiveOS folder"
|
||||
cp -f ./squashfs.mnt/LiveOS/rootfs.img ./LiveOS/.
|
||||
|
||||
echo "--> done to copy rootfs.img, umount squashfs.mnt"
|
||||
umount ./squashfs.mnt
|
||||
|
||||
echo "--> mount rootfs.img into $squashfs_root"
|
||||
mount -o loop LiveOS/rootfs.img $squashfs_root
|
||||
|
||||
echo "--> clean up ./squashfs-rootfs from original squashfs.img in chroot context"
|
||||
clean_rootfs $squashfs_root
|
||||
|
||||
cd $squashfs_root
|
||||
echo "--> extract files from rootfs-rpms to squashfs root"
|
||||
for ff in $rootfs_rpms_dir/*.rpm ; do rpm2cpio $ff | cpio -idu; done
|
||||
|
||||
echo "--> extract files from kernel and its modular rpms to squashfs root"
|
||||
for kf in ${kernel_rpms_dir}/std/*.rpm ; do rpm2cpio $kf | cpio -idu; done
|
||||
|
||||
echo "-->check module dependencies in new squashfs.img in chroot context"
|
||||
#we are using the same new kernel-xxx.rpm, so the $new_ver is the same
|
||||
chroot $squashfs_root /bin/bash -x <<EOF
|
||||
/usr/sbin/depmod -aeF "/${system_map}" "$new_ver"
|
||||
if [ $? == 0 ]; then echo "module dependencies are satisfied!" ; fi
|
||||
## Remove the biosdevname package!
|
||||
rm -f ./usr/lib/udev/rules.d/71-biosdevname.rules ./usr/sbin/biosdevname
|
||||
exit
|
||||
EOF
|
||||
|
||||
# come back to the original work dir
|
||||
cd $work_dir
|
||||
|
||||
echo "--> unmount $squashfs_root"
|
||||
umount $squashfs_root
|
||||
#rename the old version
|
||||
if [ -f $output_dir/new-squashfs.img ]; then
|
||||
mv -f $output_dir/new-squashfs.img $output_dir/squashfs.img-backup-$timestamp
|
||||
fi
|
||||
|
||||
echo "--> make the new squashfs image"
|
||||
mksquashfs LiveOS $output_dir/new-squashfs.img -keep-as-directory -comp xz -b 1M
|
||||
if [ $? == 0 ];then
|
||||
ls -l $output_dir/new-squashfs.img
|
||||
else
|
||||
echo "ERROR: failed to make a new squashfs.img"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
echo "--> done successfully!"
|
@ -1,31 +0,0 @@
|
||||
|
||||
#
|
||||
# this makefile is used by the build-iso process to add file signature to all rpms
|
||||
#
|
||||
# it requires a private key, passed as the variable KEY
|
||||
|
||||
PKGS_LIST := $(wildcard *.rpm)
|
||||
|
||||
# we need to skip the signature of some packages that
|
||||
# might be installed in file systems that do not support extended attributes
|
||||
# in the case of shim- and grub2-efi-, the UEFI configuration installs them in a VFAT file system
|
||||
PKGS_TO_SKIP := $(wildcard grub2-efi-[0-9]*.x86_64.rpm grub2-efi-x64-[0-9]*.x86_64.rpm shim-[0-9]*.x86_64.rpm shim-x64-[0-9]*.x86_64.rpm shim-ia32-[0-9]*.x86_64)
|
||||
|
||||
PKGS_TO_SIGN = $(filter-out $(PKGS_TO_SKIP),$(PKGS_LIST))
|
||||
|
||||
define _pkg_sign_tmpl
|
||||
|
||||
_sign_$1 :
|
||||
@ rpmsign --signfiles --fskpath=$(KEY) $1
|
||||
@ chown mockbuild $1
|
||||
@ chgrp users $1
|
||||
|
||||
sign : _sign_$1
|
||||
|
||||
endef
|
||||
|
||||
sign :
|
||||
@echo signed all packages
|
||||
|
||||
$(foreach file,$(PKGS_TO_SIGN),$(eval $(call _pkg_sign_tmpl,$(file))))
|
||||
|
@ -1,114 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# The following tries to choose the best mockchain-parallel-* implementation
|
||||
# to use, based on the version of /usr/bin/mockchain
|
||||
#
|
||||
# We want to use a compatable API, and to use the same python version.
|
||||
#
|
||||
|
||||
interpreter_path () {
|
||||
local path=${1}
|
||||
if [ ! -f ${path} ]; then
|
||||
return 1
|
||||
fi
|
||||
readlink -f $(head -n 1 ${path} | sed 's/^#!//' | awk '{ print $1 }' )
|
||||
}
|
||||
|
||||
get__version__ () {
|
||||
local path=${1}
|
||||
local var=""
|
||||
if [ ! -f ${path} ]; then
|
||||
return 1
|
||||
fi
|
||||
if file ${path} | grep -q 'Python script'; then
|
||||
ver=$(grep __VERSION__= ${path} | cut -d '=' -f 2 | sed 's/"//g')
|
||||
else
|
||||
ver=$(${path} --version 2> /dev/null)
|
||||
fi
|
||||
echo $ver
|
||||
}
|
||||
|
||||
VC_LESS_THAN=0
|
||||
VC_EQUAL=1
|
||||
VC_GREATER_THAN=2
|
||||
ver_comp () {
|
||||
local v1=${1}
|
||||
local v2=${2}
|
||||
local v_greater=""
|
||||
|
||||
if [ "${v1}" == "${v2}" ]; then
|
||||
echo $VC_EQUAL
|
||||
return
|
||||
fi
|
||||
|
||||
v_greater=$((echo ${v1}; echo ${v2}) | sort -rV | head -n 1)
|
||||
if [ "${v1}" == "${v_greater}" ]; then
|
||||
echo $VC_GREATER_THAN
|
||||
return
|
||||
fi
|
||||
|
||||
echo $VC_LESS_THAN
|
||||
}
|
||||
|
||||
MOCKCHAIN_PATH="/usr/bin/mockchain"
|
||||
MOCKCHAIN_PARALLEL_PATH_ROOT="${MY_REPO}/build-tools/mockchain-parallel"
|
||||
DEFAULT_MOCKCHAIN_PARALLEL_PATH="${MOCKCHAIN_PARALLEL_PATH_ROOT}-1.3.4"
|
||||
|
||||
MOCKCHAIN_INTERPRETER_PATH=$(interpreter_path ${MOCKCHAIN_PATH})
|
||||
MOCKCHAIN_VER=$(get__version__ ${MOCKCHAIN_PATH})
|
||||
if [ -z "${MOCKCHAIN_VER}" ]; then
|
||||
MOCKCHAIN_VER=$(rpm -q --queryformat '%{VERSION}' mock)
|
||||
if [ -z "${MOCKCHAIN_VER}" ]; then
|
||||
echo "Error: Failed to determine version of '${MOCKCHAIN_PATH}'"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
BEST_VER=""
|
||||
BEST_MOCKCHAIN_PARALLEL_PATH=""
|
||||
|
||||
for MOCKCHAIN_PARALLEL_PATH in $(ls -1 ${MOCKCHAIN_PARALLEL_PATH_ROOT}-*); do
|
||||
MOCKCHAIN_PARALLEL_VER=$(get__version__ ${MOCKCHAIN_PARALLEL_PATH})
|
||||
if [ -z "${MOCKCHAIN_PARALLEL_VER}" ]; then
|
||||
echo "Warning: Failed to determine version of '${MOCKCHAIN_PARALLEL_PATH}'"
|
||||
continue
|
||||
fi
|
||||
COMP=$(ver_comp "${MOCKCHAIN_VER}" "${MOCKCHAIN_PARALLEL_VER}")
|
||||
echo $MOCKCHAIN_PARALLEL_PATH $MOCKCHAIN_PARALLEL_VER $COMP
|
||||
if [ $COMP -eq $VC_EQUAL ]; then
|
||||
BEST_VER=${MOCKCHAIN_PARALLEL_VER}
|
||||
BEST_MOCKCHAIN_PARALLEL_PATH=${MOCKCHAIN_PARALLEL_PATH}
|
||||
break
|
||||
fi
|
||||
if [ $COMP -gt $VC_EQUAL ]; then
|
||||
if [ "${BEST_VER}" == "" ]; then
|
||||
BEST_VER=${MOCKCHAIN_PARALLEL_VER}
|
||||
BEST_MOCKCHAIN_PARALLEL_PATH=${MOCKCHAIN_PARALLEL_PATH}
|
||||
continue
|
||||
fi
|
||||
|
||||
COMP=$(ver_comp ${MOCKCHAIN_PARALLEL_VER} ${BEST_VER})
|
||||
if [ $COMP -gt $VC_EQUAL ]; then
|
||||
BEST_VER=${MOCKCHAIN_PARALLEL_VER}
|
||||
BEST_MOCKCHAIN_PARALLEL_PATH=${MOCKCHAIN_PARALLEL_PATH}
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
MOCKCHAIN_PARALLEL_INTERPRETER_PATH=${BEST_MOCKCHAIN_PARALLEL_INTERPRETER_PATH}
|
||||
MOCKCHAIN_PARALLEL_PATH=${BEST_MOCKCHAIN_PARALLEL_PATH}
|
||||
|
||||
if [ -z "${MOCKCHAIN_PARALLEL_PATH}" ]; then
|
||||
MOCKCHAIN_PARALLEL_PATH="${DEFAULT_MOCKCHAIN_PARALLEL_PATH}"
|
||||
fi
|
||||
|
||||
echo "PYTHONDONTWRITEBYTECODE=true exec ${MOCKCHAIN_PARALLEL_INTERPRETER_PATH} ${MOCKCHAIN_PARALLEL_PATH} $@"
|
||||
PYTHONDONTWRITEBYTECODE=true exec ${MOCKCHAIN_PARALLEL_INTERPRETER_PATH} ${MOCKCHAIN_PARALLEL_PATH} "$@"
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,205 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
#
|
||||
# Copyright (c) 2018-2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# This script modifies a mock configuration file (typically $MY_BUILD_CFG)
|
||||
# to add build time environment variables to the mock environment (things
|
||||
# like what branch we're building on, etc).
|
||||
#
|
||||
# For reasons of security, the host environment variables cannot normally be
|
||||
# passed through to the mock environment, so this scripts sets the variables
|
||||
# to literal values.
|
||||
#
|
||||
# usage: modify-build-cfg [file.cfg] [<layer>]
|
||||
#
|
||||
|
||||
MODIFY_BUILD_CFG_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
|
||||
|
||||
# Set PKG_MANAGER for our build environment.
|
||||
source "${MODIFY_BUILD_CFG_DIR}/pkg-manager-utils.sh"
|
||||
|
||||
LAYER=${2:-$LAYER}
|
||||
|
||||
# For backward compatibility. Old repo location or new?
|
||||
CENTOS_REPO=${MY_REPO}/centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
echo "ERROR: directory ${MY_REPO}/centos-repo not found."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Preferred python
|
||||
if rpm -q --whatprovides --quiet python3; then
|
||||
PYTHON_PKG=python3
|
||||
else
|
||||
PYTHON_PKG=python2
|
||||
fi
|
||||
|
||||
# Try to find a layer specific mock.cfg.proto
|
||||
MOCK_CFG_PROTO="${CENTOS_REPO}/mock.cfg.${LAYER}.proto"
|
||||
echo "==== Try MOCK_CFG_PROTO=$MOCK_CFG_PROTO ===="
|
||||
if [ ! -f "$MOCK_CFG_PROTO" ]; then
|
||||
# Not present, Use default mock.cfg.proto
|
||||
MOCK_CFG_PROTO="${CENTOS_REPO}/mock.cfg.proto"
|
||||
fi
|
||||
|
||||
echo "==== Use MOCK_CFG_PROTO=$MOCK_CFG_PROTO ===="
|
||||
if [ ! -f "$MOCK_CFG_PROTO" ]; then
|
||||
echo "ERROR: Couldn't find mock config prototype at '$MOCK_CFG_PROTO'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${1}x" == "x" ]; then
|
||||
FILE=$MY_BUILD_CFG
|
||||
else
|
||||
FILE=$1
|
||||
fi
|
||||
|
||||
if [ -f $MOCK_CFG_PROTO ]; then
|
||||
if [ -f $FILE ]; then
|
||||
NEWER=$(find "$MOCK_CFG_PROTO" -newer "$FILE")
|
||||
if [ "x$NEWER" != "x" ]; then
|
||||
\rm -f -v "$FILE"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -f $FILE ]; then
|
||||
if [ -z $MY_BUILD_ENVIRONMENT ] || [ -z $MY_BUILD_DIR ] || [ -z $MY_REPO ]; then
|
||||
echo "Can't create $FILE without MY_BUILD_ENVIRONMENT, MY_BUILD_DIR and MY_REPO environment variables"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Recreating $FILE"
|
||||
\cp -f -v "$MOCK_CFG_PROTO" "$FILE"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Couldn't find config file '$FILE', nor construct it from '$MOCK_CFG_PROTO'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# eg: LOCAL_BASE/MY_BUILD_DIR => http://127.0.0.1:8088/MY_BUILD_DIR
|
||||
sed -i "s%LOCAL_BASE%http://127.0.0.1:8088%g" "$FILE"
|
||||
sed -i "s%MIRROR_BASE%http://127.0.0.1:8088%g" "$FILE"
|
||||
sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g" "$FILE"
|
||||
# eg http://127.0.0.1:8088/MY_BUILD_DIR => http://12.0.0.1:8088/localdisk/loadbuild/...
|
||||
sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR_TOP%g" "$FILE"
|
||||
sed -i "s%/MY_REPO_DIR%$MY_REPO%g" "$FILE"
|
||||
# eg = MY_BUILD_DIR/xyz => /localdisk/loadbuild/.../xyz
|
||||
sed -i "s%MY_BUILD_DIR%$MY_BUILD_DIR_TOP%g" "$FILE"
|
||||
sed -i "s%MY_REPO_DIR%$MY_REPO%g" "$FILE"
|
||||
|
||||
# Disable all local-* repos for the build-types other than the current one
|
||||
for bt in std rt; do
|
||||
if [ "$bt" != "$BUILD_TYPE" ]; then
|
||||
# Use the range of lines starting with pattern [local-$bt] until the next line starting with []
|
||||
sed -i "/^\[local-$bt\]/,/^\[/ s/enabled=1/enabled=0/" $FILE
|
||||
sed -i "/^\[StxCentos7Distro-$bt\]/,/^\[/ s/enabled=1/enabled=0/" $FILE
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
|
||||
# Add environment variables to mock config if they don't exist
|
||||
grep -q "config_opts\['environment'\]\['BUILD_BY'\]" $FILE || \
|
||||
echo "config_opts['environment']['BUILD_BY']" >> $FILE
|
||||
|
||||
grep -q "config_opts\['environment'\]\['BUILD_DATE'\]" $FILE || \
|
||||
echo "config_opts['environment']['BUILD_DATE']" >> $FILE
|
||||
|
||||
grep -q "config_opts\['environment'\]\['REPO'\]" $FILE || \
|
||||
echo "config_opts['environment']['REPO']" >> $FILE
|
||||
|
||||
grep -q "config_opts\['environment'\]\['WRS_GIT_BRANCH'\]" $FILE || \
|
||||
echo "config_opts['environment']['WRS_GIT_BRANCH']" >> $FILE
|
||||
|
||||
grep -q "config_opts\['environment'\]\['CGCS_GIT_BRANCH'\]" $FILE || \
|
||||
echo "config_opts['environment']['CGCS_GIT_BRANCH']" >> $FILE
|
||||
|
||||
if [ -z $FORMAL_BUILD ]; then
|
||||
grep -q "config_opts\['macros'\]\['%_no_cgcs_license_check'\] = '1'" $FILE || \
|
||||
echo "config_opts['macros']['%_no_cgcs_license_check'] = '1'" >> $FILE
|
||||
else
|
||||
sed -i "/config_opts\['macros'\]\['%_no_cgcs_license_check'\] = '1'/d" $FILE
|
||||
fi
|
||||
|
||||
grep -q "config_opts\['macros'\]\['%_tis_build_type'\] = '$BUILD_TYPE'" $FILE || \
|
||||
echo "config_opts['macros']['%_tis_build_type'] = '$BUILD_TYPE'" >> $FILE
|
||||
|
||||
if [ -f /usr/lib64/nosync/nosync.so ]; then
|
||||
grep -q "config_opts\['nosync'\] = True" $FILE || \
|
||||
echo "config_opts['nosync'] = True" >> $FILE
|
||||
fi
|
||||
|
||||
NETWORK_PKGS=""
|
||||
if [ "containers" == "$BUILD_TYPE" ]; then
|
||||
NETWORK_PKGS="bind-utils"
|
||||
fi
|
||||
|
||||
BUILD_PKGS=''
|
||||
if [ "${PKG_MANAGER}" == "yum" ]; then
|
||||
BUILD_PKGS='@buildsys-build'
|
||||
elif [ "${PKG_MANAGER}" == "dnf" ]; then
|
||||
# buildsys-build group was dropped when Centos-8 switched to dnf.
|
||||
# We must list all the members plus a few new ones (fedpkg-minimal, epel-rpm-macros).
|
||||
BUILD_PKGS='bash bzip2 coreutils cpio diffutils epel-release epel-rpm-macros fedpkg-minimal findutils gawk gcc gcc-c++ grep gzip info make patch redhat-rpm-config redhat-release rpm-build sed shadow-utils tar unzip util-linux which xz'
|
||||
fi
|
||||
|
||||
STX_PKGS='pigz lbzip2 bash'
|
||||
|
||||
PKGS="${BUILD_PKGS} ${STX_PKGS} ${PKG_MANAGER} ${PYTHON_PKG} ${NETWORK_PKGS}"
|
||||
|
||||
grep -q "config_opts\['chroot_setup_cmd'\] = 'install ${PKGS}'" $FILE || \
|
||||
echo "config_opts['chroot_setup_cmd'] = 'install ${PKGS}'" >> $FILE
|
||||
|
||||
# Special case for containers.
|
||||
# rpmbuild_networking is required for invoking helm commands within mock
|
||||
# building containers requires the std repo to be enabled.
|
||||
if [ "containers" == "$BUILD_TYPE" ]; then
|
||||
grep -q "config_opts\['rpmbuild_networking'\] = True" $FILE || \
|
||||
echo "config_opts['rpmbuild_networking'] = True" >> $FILE
|
||||
|
||||
grep -q "config_opts\['use_host_resolv'\] = True" $FILE || \
|
||||
echo "config_opts['use_host_resolv'] = True" >> $FILE
|
||||
|
||||
sed -i "/^\[local-std\]/,/^\[/ s/enabled=0/enabled=1/" $FILE
|
||||
fi
|
||||
|
||||
#
|
||||
# Read macros from tis.macros to add to the build config file,
|
||||
# for use in RPM spec files
|
||||
#
|
||||
RPM_MACROS=$MY_REPO/build-tools/tis.macros
|
||||
sed 's/#.*//' $RPM_MACROS | grep '=' | while IFS='=' read name value; do
|
||||
# Check if the entry already exists. If so, go to next line
|
||||
grep -q "^config_opts\['macros'\]\['${name}'\] = '${value}'$" $FILE && continue
|
||||
|
||||
# Update or add the entry
|
||||
grep -q "^config_opts\['macros'\]\['${name}'\]" $FILE
|
||||
if [ $? -eq 0 ]; then
|
||||
sed -i -r "s#^(config_opts\['macros'\]\['${name}'\]).*#\1 = '${value}'#" $FILE
|
||||
else
|
||||
echo "config_opts['macros']['${name}'] = '${value}'" >> $FILE
|
||||
fi
|
||||
done
|
||||
|
||||
# okay, now we have lines for each env var. Generate the correct values
|
||||
|
||||
BUILD_DATE=`date "+%F %T %z"`
|
||||
CGCS_GIT_BRANCH=`cd $MY_REPO/stx/; git rev-parse --abbrev-ref HEAD`
|
||||
WRS_GIT_BRANCH=`cd $MY_REPO; git rev-parse --abbrev-ref HEAD`
|
||||
REPO=$MY_REPO
|
||||
|
||||
# Finally, our good friend sed will place the values in the mock config file
|
||||
sed -i \
|
||||
-e "s#config_opts\['environment'\]\['BUILD_BY'\].*#config_opts\['environment'\]\['BUILD_BY'\] = '$USER'#" \
|
||||
-e "s#config_opts\['environment'\]\['BUILD_DATE'\].*#config_opts\['environment'\]\['BUILD_DATE'\] = '$BUILD_DATE'#" \
|
||||
-e "s#config_opts\['environment'\]\['REPO'\].*#config_opts\['environment'\]\['REPO'\] = '$REPO'#" \
|
||||
-e "s#config_opts\['environment'\]\['WRS_GIT_BRANCH'\].*#config_opts\['environment'\]\['WRS_GIT_BRANCH'\] = '$WRS_GIT_BRANCH'#" \
|
||||
-e "s#config_opts\['environment'\]\['CGCS_GIT_BRANCH'\].*#config_opts\['environment'\]\['CGCS_GIT_BRANCH'\] = '$CGCS_GIT_BRANCH'#" \
|
||||
$FILE
|
@ -1,427 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2018-2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Utility for adding patches to an unpatched ISO
|
||||
#
|
||||
|
||||
source "$(dirname $0)/image-utils.sh"
|
||||
|
||||
if [ -z "${MY_REPO}" ]; then
|
||||
echo "Required environment variable MY_REPO is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${MY_WORKSPACE}" ]; then
|
||||
echo "Required environment variable MY_WORKSPACE is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
STX_DIR=${MY_REPO}/stx
|
||||
SETUP_PATCH_REPO=${STX_DIR}/update/extras/scripts/setup_patch_repo.sh
|
||||
if [ ! -x ${SETUP_PATCH_REPO} ]; then
|
||||
echo "Cannot find or execute ${SETUP_PATCH_REPO}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create temp dir if necessary
|
||||
export TMPDIR="$MY_WORKSPACE/tmp"
|
||||
mkdir -p $TMPDIR
|
||||
|
||||
REPO_UPGRADES_DIR=${STX_DIR}/metal/bsp-files/upgrades
|
||||
RELEASE_INFO="$(get_release_info)"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed to find a release info file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PLATFORM_RELEASE=$(source $RELEASE_INFO && echo $PLATFORM_RELEASE)
|
||||
|
||||
function usage() {
|
||||
echo ""
|
||||
echo "Usage: "
|
||||
echo " $(basename $0) -i <input bootimage.iso> -o <output bootimage.iso> [ -u ] <patch> ..."
|
||||
echo " -i <file>: Specify input ISO file"
|
||||
echo " -o <file>: Specify output ISO file"
|
||||
echo " -u : Update with upgrades files from ${REPO_UPGRADES_DIR}"
|
||||
echo ""
|
||||
}
|
||||
|
||||
function extract_pkg_from_patch_repo() {
|
||||
local repodir=${BUILDDIR}/patches
|
||||
local pkgname=$1
|
||||
local pkgfile=$(repoquery --disablerepo=* --repofrompath local,${repodir} --enablerepo=local --location -q ${pkgname})
|
||||
if [ -z "${pkgfile}" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
rpm2cpio ${pkgfile/file://} | cpio -idmv
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to extract $pkgname files from ${pkgfile/file://}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
declare INPUT_ISO=
|
||||
declare OUTPUT_ISO=
|
||||
declare ORIG_PWD=$PWD
|
||||
declare DO_UPGRADES=1
|
||||
|
||||
while getopts "i:o:u" opt; do
|
||||
case $opt in
|
||||
i)
|
||||
INPUT_ISO=$OPTARG
|
||||
;;
|
||||
o)
|
||||
OUTPUT_ISO=$OPTARG
|
||||
;;
|
||||
u)
|
||||
DO_UPGRADES=0
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$INPUT_ISO" -o -z "$OUTPUT_ISO" ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f ${INPUT_ISO} ]; then
|
||||
echo "Input file does not exist: ${INPUT_ISO}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -f ${OUTPUT_ISO} ]; then
|
||||
echo "Output file already exists: ${OUTPUT_ISO}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
shift $((OPTIND-1))
|
||||
|
||||
if [ $# -le 0 ]; then
|
||||
usage
|
||||
exit
|
||||
fi
|
||||
|
||||
for pf in $@; do
|
||||
if [ ! -f $pf ]; then
|
||||
echo "Patch file $pf does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! $pf =~ \.patch$ ]]; then
|
||||
echo "Specified file $pf does not have .patch extension"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
declare MNTDIR=
|
||||
declare BUILDDIR=
|
||||
declare WORKDIR=
|
||||
|
||||
function check_requirements {
|
||||
local -a required_utils=(
|
||||
rsync
|
||||
mkisofs
|
||||
isohybrid
|
||||
implantisomd5
|
||||
)
|
||||
if [ $UID -ne 0 ]; then
|
||||
# If running as non-root user, additional utils are required
|
||||
required_utils+=(
|
||||
guestmount
|
||||
guestunmount
|
||||
)
|
||||
fi
|
||||
|
||||
local -i missing=0
|
||||
|
||||
for req in ${required_utils[@]}; do
|
||||
which ${req} >&/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Unable to find required utility: ${req}" >&2
|
||||
let missing++
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${missing} -gt 0 ]; then
|
||||
echo "One or more required utilities are missing. Aborting..." >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function mount_iso {
|
||||
if [ $UID -eq 0 ]; then
|
||||
# Mount the ISO
|
||||
mount -o loop ${INPUT_ISO} ${MNTDIR}
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to mount ${INPUT_ISO}" >&2
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
# As non-root user, mount the ISO using guestmount
|
||||
guestmount -a ${INPUT_ISO} -m /dev/sda1 --ro ${MNTDIR}
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
# Add a retry
|
||||
echo "Call to guestmount failed with rc=$rc. Retrying once..."
|
||||
|
||||
guestmount -a ${INPUT_ISO} -m /dev/sda1 --ro ${MNTDIR}
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
echo "Call to guestmount failed with rc=$rc. Aborting..."
|
||||
exit $rc
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function unmount_iso {
|
||||
if [ $UID -eq 0 ]; then
|
||||
umount ${MNTDIR}
|
||||
else
|
||||
guestunmount ${MNTDIR}
|
||||
fi
|
||||
rmdir ${MNTDIR}
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then
|
||||
unmount_iso
|
||||
fi
|
||||
|
||||
if [ -n "$BUILDDIR" -a -d "$BUILDDIR" ]; then
|
||||
\rm -rf $BUILDDIR
|
||||
fi
|
||||
|
||||
if [ -n "$WORKDIR" -a -d "$WORKDIR" ]; then
|
||||
\rm -rf $WORKDIR
|
||||
fi
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
MNTDIR=$(mktemp -d -p $PWD patchiso_mnt_XXXXXX)
|
||||
if [ -z "${MNTDIR}" -o ! -d ${MNTDIR} ]; then
|
||||
echo "Failed to create mntdir. Aborting..."
|
||||
exit $rc
|
||||
fi
|
||||
|
||||
BUILDDIR=$(mktemp -d -p $PWD patchiso_build_XXXXXX)
|
||||
if [ -z "${BUILDDIR}" -o ! -d ${BUILDDIR} ]; then
|
||||
echo "Failed to create builddir. Aborting..."
|
||||
exit $rc
|
||||
fi
|
||||
|
||||
# Mount the ISO
|
||||
mount_iso
|
||||
|
||||
rsync -a ${MNTDIR}/ ${BUILDDIR}/
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
echo "Call to rsync ISO content. Aborting..."
|
||||
exit $rc
|
||||
fi
|
||||
|
||||
unmount_iso
|
||||
|
||||
# Setup the patch repo
|
||||
${SETUP_PATCH_REPO} -o ${BUILDDIR}/patches $@
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
echo "Call to $(basename ${SETUP_PATCH_REPO}) failed with rc=$rc. Aborting..."
|
||||
exit $rc
|
||||
fi
|
||||
|
||||
# Look for components that need modification
|
||||
#extract_pkg_from_patch_repo
|
||||
WORKDIR=$(mktemp -d -p $PWD patchiso_work_XXXXXX)
|
||||
if [ -z "${WORKDIR}" -o ! -d ${WORKDIR} ]; then
|
||||
echo "Failed to create workdir. Aborting..."
|
||||
exit $rc
|
||||
fi
|
||||
|
||||
\cd ${WORKDIR}
|
||||
\mkdir extract
|
||||
\cd extract
|
||||
|
||||
# Changes to copied files here must also be reflected in build-iso
|
||||
|
||||
extract_pkg_from_patch_repo platform-kickstarts
|
||||
if [ $? -eq 0 ]; then
|
||||
# Replace files
|
||||
\rm -f ${BUILDDIR}/*ks.cfg &&
|
||||
\cp --preserve=all var/www/pages/feed/rel-*/*.cfg ${BUILDDIR}/ &&
|
||||
\cp --preserve=all ${BUILDDIR}/controller_ks.cfg ${BUILDDIR}/ks.cfg
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to copy extracted kickstarts"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
\cd ${WORKDIR}
|
||||
\rm -rf extract
|
||||
|
||||
\mkdir extract
|
||||
\cd extract
|
||||
extract_pkg_from_patch_repo platform-kickstarts-pxeboot
|
||||
if [ $? -eq 0 ]; then
|
||||
# Replace files
|
||||
\rm -f ${BUILDDIR}/var/pxeboot/pxeboot_controller.cfg \
|
||||
${BUILDDIR}/var/pxeboot/pxeboot_smallsystem.cfg \
|
||||
${BUILDDIR}/var/pxeboot/pxeboot_smallsystem_lowlatency.cfg &&
|
||||
\cp --preserve=all pxeboot/* ${BUILDDIR}/var/pxeboot/
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to copy extracted pxeboot kickstarts"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
\cd ${WORKDIR}
|
||||
\rm -rf extract
|
||||
|
||||
\mkdir extract
|
||||
\cd extract
|
||||
extract_pkg_from_patch_repo pxe-network-installer
|
||||
if [ $? -eq 0 ]; then
|
||||
# Replace files
|
||||
\rm -f ${BUILDDIR}/pxeboot/pxelinux.0 \
|
||||
${BUILDDIR}/pxeboot/menu.c32 \
|
||||
${BUILDDIR}/pxeboot/chain.c32 &&
|
||||
\cp --preserve=all var/pxeboot/pxelinux.0 var/pxeboot/menu.c32 var/pxeboot/chain.c32 ${BUILDDIR}/pxeboot/
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Could not copy all files from installer"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
\rm -f ${BUILDDIR}/LiveOS/squashfs.img &&
|
||||
\cp --preserve=all var/www/pages/feed/rel-*/LiveOS/squashfs.img ${BUILDDIR}/LiveOS/
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Could not copy squashfs from LiveOS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Replace vmlinuz and initrd.img with our own pre-built ones
|
||||
\rm -f \
|
||||
${BUILDDIR}/vmlinuz \
|
||||
${BUILDDIR}/images/pxeboot/vmlinuz \
|
||||
${BUILDDIR}/initrd.img \
|
||||
${BUILDDIR}/images/pxeboot/initrd.img &&
|
||||
\cp --preserve=all var/pxeboot/rel-*/installer-bzImage_1.0 \
|
||||
${BUILDDIR}/vmlinuz &&
|
||||
\cp --preserve=all var/pxeboot/rel-*/installer-bzImage_1.0 \
|
||||
${BUILDDIR}/images/pxeboot/vmlinuz &&
|
||||
\cp --preserve=all var/pxeboot/rel-*/installer-intel-x86-64-initrd_1.0 \
|
||||
${BUILDDIR}/initrd.img &&
|
||||
\cp --preserve=all var/pxeboot/rel-*/installer-intel-x86-64-initrd_1.0 \
|
||||
${BUILDDIR}/images/pxeboot/initrd.img
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to copy installer images"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
\cd ${WORKDIR}
|
||||
\rm -rf extract
|
||||
|
||||
\mkdir extract
|
||||
\cd extract
|
||||
extract_pkg_from_patch_repo grub2-efi-x64-pxeboot
|
||||
if [ $? -eq 0 ]; then
|
||||
# Replace files
|
||||
\rm -f ${BUILDDIR}/var/pxeboot/EFI/grubx64.efi &&
|
||||
\cp --preserve=all pxeboot/EFI/grubx64.efi ${BUILDDIR}/var/pxeboot/EFI/
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to copy grub2-efi-x64-pxeboot files"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
\cd ${WORKDIR}
|
||||
\rm -rf extract
|
||||
|
||||
\mkdir extract
|
||||
\cd extract
|
||||
extract_pkg_from_patch_repo grub2-common
|
||||
if [ $? -eq 0 ]; then
|
||||
# Replace files
|
||||
for f in usr/lib/grub/x86_64-efi/*; do
|
||||
f_base=$(basename $f)
|
||||
\rm -f ${BUILDDIR}/var/pxeboot/EFI/$f_base &&
|
||||
\cp --preserve=all ${f} ${BUILDDIR}/var/pxeboot/EFI/
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to copy grub2-common files"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
fi
|
||||
\cd ${WORKDIR}
|
||||
\rm -rf extract
|
||||
|
||||
\mkdir extract
|
||||
\cd extract
|
||||
extract_pkg_from_patch_repo grub2-efi-x64-modules
|
||||
if [ $? -eq 0 ]; then
|
||||
# Replace files
|
||||
for f in usr/lib/grub/x86_64-efi/*; do
|
||||
f_base=$(basename $f)
|
||||
\rm -f ${BUILDDIR}/var/pxeboot/EFI/$f_base &&
|
||||
\cp --preserve=all ${f} ${BUILDDIR}/var/pxeboot/EFI/
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to copy grub2-efi-x64-modules files"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
fi
|
||||
\cd ${WORKDIR}
|
||||
\rm -rf extract
|
||||
|
||||
\cd ${ORIG_PWD}
|
||||
|
||||
if [ ${DO_UPGRADES} -eq 0 ]; then
|
||||
# Changes to copied files here must also be reflected in build-iso
|
||||
|
||||
echo "Updating upgrade support files"
|
||||
ISO_UPGRADES_DIR="${BUILDDIR}/upgrades"
|
||||
\rm -rf ${ISO_UPGRADES_DIR}
|
||||
\mkdir ${ISO_UPGRADES_DIR}
|
||||
\cp ${REPO_UPGRADES_DIR}/* ${ISO_UPGRADES_DIR}
|
||||
sed -i "s/xxxSW_VERSIONxxx/${PLATFORM_RELEASE}/g" ${ISO_UPGRADES_DIR}/metadata.xml
|
||||
chmod +x ${ISO_UPGRADES_DIR}/*.sh
|
||||
# Write the version out (used in upgrade scripts - this is the same as SW_VERSION)
|
||||
echo "VERSION=$PLATFORM_RELEASE" > ${ISO_UPGRADES_DIR}/version
|
||||
fi
|
||||
|
||||
# Rebuild the ISO
|
||||
mkisofs -o ${OUTPUT_ISO} \
|
||||
-R -D -A 'oe_iso_boot' -V 'oe_iso_boot' \
|
||||
-quiet \
|
||||
-b isolinux.bin -c boot.cat -no-emul-boot \
|
||||
-boot-load-size 4 -boot-info-table \
|
||||
-eltorito-alt-boot \
|
||||
-e images/efiboot.img \
|
||||
-no-emul-boot \
|
||||
${BUILDDIR}
|
||||
|
||||
isohybrid --uefi ${OUTPUT_ISO}
|
||||
implantisomd5 ${OUTPUT_ISO}
|
||||
|
||||
# Sign the .iso with the developer private key
|
||||
# Signing with the formal key is only to be done for customer release
|
||||
# and is a manual step afterwards, as with the GA ISO
|
||||
openssl dgst -sha256 \
|
||||
-sign ${MY_REPO}/build-tools/signing/dev-private-key.pem \
|
||||
-binary \
|
||||
-out ${OUTPUT_ISO/%.iso/.sig} \
|
||||
${OUTPUT_ISO}
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
echo "Call to $(basename ${SETUP_PATCH_REPO}) failed with rc=$rc. Aborting..."
|
||||
exit $rc
|
||||
fi
|
||||
|
||||
echo "Patched ISO: ${OUTPUT_ISO}"
|
||||
|
@ -1,140 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Start an edit session for packages to be upgraded - pre upgrade version
|
||||
#
|
||||
|
||||
# For backward compatibility. Old repo location or new?
|
||||
CENTOS_REPO=${MY_REPO}/centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
echo "ERROR: directory ${MY_REPO}/centos-repo not found."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
usage () {
|
||||
echo ""
|
||||
echo "Step 1: Start an edit session for packages to be upgraded - pre upgrade version"
|
||||
echo ""
|
||||
echo "Usage: "
|
||||
echo " patch_rebase_1 [--origin_branch <branch>] [--working_branch <branch>] [--upversion_data <file>]"
|
||||
echo ""
|
||||
echo "Assumes $(basename ${CENTOS_REPO}) already has a working_branch commit that sets the new symlinks."
|
||||
echo ""
|
||||
echo "The upversion_data file has data on all the src.rpm being updated in the format:"
|
||||
echo " export UPVERSION_DATA=$MY_WORKSPACE/upversion.log"
|
||||
echo " PKG=lighttpd"
|
||||
echo " OLD_SRC_RPM=lighttpd-1.4.41-1.el7.src.rpm"
|
||||
echo " NEW_SRC_RPM=lighttpd-1.4.41-2.el7.src.rpm"
|
||||
echo " SRPM_PATH=$MY_REPO/stx/integ/extended/lighttpd/centos/srpm_path"
|
||||
echo " echo \"\$PKG#\$SRPM_PATH##\$OLD_SRC_RPM#\$NEW_SRC_RPM\" > UPVERSION_DATA"
|
||||
echo ""
|
||||
}
|
||||
|
||||
TEMP=`getopt -o h --long origin_branch:,working_branch:,upversion_data:,help -n 'test.sh' -- "$@"`
|
||||
eval set -- "$TEMP"
|
||||
|
||||
ORIGIN_BRANCH=""
|
||||
WORKING_BRANCH=""
|
||||
UPVERSION_LOG=""
|
||||
HELP=0
|
||||
|
||||
while true ; do
|
||||
case "$1" in
|
||||
--origin_branch) shift ; ORIGIN_BRANCH="$1" ; shift ;;
|
||||
--working_branch) shift ; WORKING_BRANCH="$1" ; shift ;;
|
||||
--upversion_data) shift ; UPVERSION_LOG="$1" ; shift ;;
|
||||
-h|--help) HELP=1 ; shift ;;
|
||||
--) shift ; break ;;
|
||||
*) usage; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ $HELP -eq 1 ]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$UPVERSION_LOG" == "" ]; then
|
||||
UPVERSION_LOG=$UPVERSION_DATA
|
||||
fi
|
||||
|
||||
if [ "$UPVERSION_LOG" == "" ]; then
|
||||
echo "ERROR: please specify location of upversion data"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "$UPVERSION_LOG" ]; then
|
||||
echo "File not found: '$UPVERSION_LOG'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
|
||||
ORIGIN_BRANCH=$PATCH_SOURCE_BRANCH
|
||||
WORKING_BRANCH=$MY_PATCH_BRANCH
|
||||
fi
|
||||
|
||||
if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
|
||||
ORIGIN_BRANCH=$SOURCE_BRANCH
|
||||
WORKING_BRANCH=$MY_BRANCH
|
||||
fi
|
||||
|
||||
if [ "$ORIGIN_BRANCH" == "" ]; then
|
||||
echo "ERROR: please specify a origin branch"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$WORKING_BRANCH" == "" ]; then
|
||||
echo "ERROR: please specify a working branch"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# One step back to see the old symlinks
|
||||
cd ${CENTOS_REPO}
|
||||
git checkout $WORKING_BRANCH
|
||||
if [ $? != 0 ]; then
|
||||
echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '$(pwd)'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git checkout HEAD^
|
||||
|
||||
FAILED=""
|
||||
for dat in $(cat $UPVERSION_LOG); do
|
||||
name=$(echo $dat | awk -F '#' '{print $1}')
|
||||
srpm_path=$(echo $dat | awk -F '#' '{print $2}')
|
||||
old_src_rpm=$(echo $dat | awk -F '#' '{print $4}')
|
||||
new_src_rpm=$(echo $dat | awk -F '#' '{print $5}')
|
||||
|
||||
echo "$name $old_src_rpm $new_src_rpm"
|
||||
|
||||
build-pkgs --edit --clean $name
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed cmd 'build-pkgs --edit --clean $name'"
|
||||
FAILED="$name $FAILED"
|
||||
break
|
||||
fi
|
||||
echo "$? <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
|
||||
build-pkgs --edit $name
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed cmd 'build-pkgs --edit $name'"
|
||||
FAILED="$name $FAILED"
|
||||
break
|
||||
fi
|
||||
echo "$? <=<=<=<=<=<=<=<=<=<=<=<=<=<=<=<="
|
||||
done
|
||||
|
||||
cd ${CENTOS_REPO}
|
||||
git checkout $WORKING_BRANCH
|
||||
|
||||
if [ "$FAILED" != "" ]; then
|
||||
echo "Failed build-pkgs --edit for ... $FAILED"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -1,158 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Update srpm_path for packages to be upgraded
|
||||
#
|
||||
|
||||
# For backward compatibility. Old repo location or new?
|
||||
CENTOS_REPO=${MY_REPO}/centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
echo "ERROR: directory ${MY_REPO}/centos-repo not found."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
usage () {
|
||||
echo ""
|
||||
echo "Step 2: Update srpm_path for packages to be upgraded"
|
||||
echo ""
|
||||
echo "Usage: "
|
||||
echo " patch_rebase_2 [--origin_branch <branch>] [--working_branch <branch>] [--upversion_data <file>]"
|
||||
echo ""
|
||||
echo "Assumes $(basename ${CENTOS_REPO}) already has a working_branch commit that sets the new symlinks."
|
||||
echo ""
|
||||
echo "The upversion_data file has data on all the src.rpm being updated in the format:"
|
||||
echo " export UPVERSION_DATA=$MY_WORKSPACE/upversion.log"
|
||||
echo " PKG=lighttpd"
|
||||
echo " OLD_SRC_RPM=lighttpd-1.4.41-1.el7.src.rpm"
|
||||
echo " NEW_SRC_RPM=lighttpd-1.4.41-2.el7.src.rpm"
|
||||
echo " SRPM_PATH=$MY_REPO/stx/integ/extended/lighttpd/centos/srpm_path"
|
||||
echo " echo \"\$PKG#\$SRPM_PATH##\$OLD_SRC_RPM#\$NEW_SRC_RPM\" > UPVERSION_DATA"
|
||||
echo ""
|
||||
}
|
||||
|
||||
|
||||
TEMP=`getopt -o h --long origin_branch:,working_branch:,upversion_data:,help -n 'test.sh' -- "$@"`
|
||||
eval set -- "$TEMP"
|
||||
|
||||
ORIGIN_BRANCH=""
|
||||
WORKING_BRANCH=""
|
||||
UPVERSION_LOG=""
|
||||
HELP=0
|
||||
|
||||
while true ; do
|
||||
case "$1" in
|
||||
--origin_branch) shift ; ORIGIN_BRANCH="$1" ; shift ;;
|
||||
--working_branch) shift ; WORKING_BRANCH="$1" ; shift ;;
|
||||
--upversion_data) shift ; UPVERSION_LOG="$1" ; shift ;;
|
||||
-h|--help) HELP=1 ; shift ;;
|
||||
--) shift ; break ;;
|
||||
*) usage; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ $HELP -eq 1 ]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$UPVERSION_LOG" == "" ]; then
|
||||
UPVERSION_LOG=$UPVERSION_DATA
|
||||
fi
|
||||
|
||||
if [ "$UPVERSION_LOG" == "" ]; then
|
||||
echo "ERROR: please specify location of upversion data"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "$UPVERSION_LOG" ]; then
|
||||
echo "File not found: '$UPVERSION_LOG'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
|
||||
ORIGIN_BRANCH=$PATCH_SOURCE_BRANCH
|
||||
WORKING_BRANCH=$MY_PATCH_BRANCH
|
||||
fi
|
||||
|
||||
if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
|
||||
ORIGIN_BRANCH=$SOURCE_BRANCH
|
||||
WORKING_BRANCH=$MY_BRANCH
|
||||
fi
|
||||
|
||||
if [ "$ORIGIN_BRANCH" == "" ]; then
|
||||
echo "ERROR: please specify a origin branch"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$WORKING_BRANCH" == "" ]; then
|
||||
echo "ERROR: please specify a working branch"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# One step back to see the old symlinks
|
||||
cd $MY_REPO
|
||||
|
||||
FAILED=""
|
||||
for dat in $(cat $UPVERSION_LOG); do
|
||||
name=$(echo $dat | awk -F '#' '{print $1}')
|
||||
srpm_path=$(echo $dat | awk -F '#' '{print $2}')
|
||||
old_src_rpm=$(echo $dat | awk -F '#' '{print $4}')
|
||||
new_src_rpm=$(echo $dat | awk -F '#' '{print $5}')
|
||||
|
||||
(
|
||||
cd $(dirname $srpm_path)
|
||||
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
|
||||
if [ "$CURRENT_BRANCH" != "$WORKING_BRANCH" ]; then
|
||||
git checkout $WORKING_BRANCH
|
||||
if [ $? -ne 0 ]; then
|
||||
git checkout $ORIGIN_BRANCH
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: Can't checkout branch '$ORIGIN_BRANCH' in directory '$(pwd)'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git checkout -b $WORKING_BRANCH
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed to 'git checkout -b $WORKING_BRANCH' from '$(pwd)'"
|
||||
exit 1
|
||||
else
|
||||
echo "created branch '$WORKING_BRANCH' at '$(pwd)'"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
sed -i "s#$old_src_rpm#$new_src_rpm#" $srpm_path
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: sed failed '$old_src_rpm' -> '$new_src_rpm'"
|
||||
exit 1
|
||||
else
|
||||
echo "updated $srpm_path: '$old_src_rpm' -> '$new_src_rpm'"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
)
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed while working on package '$name' at '$srpm_path'"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
for d in $(for dat in $(cat $UPVERSION_LOG); do srpm_path=$(echo $dat | awk -F '#' '{print $2}'); ( cd $(dirname $srpm_path); git rev-parse --show-toplevel ); done | sort --unique); do
|
||||
(
|
||||
cd $d
|
||||
echo "cd $d"
|
||||
for f in $(git status --porcelain | grep 'srpm_path$' | awk '{print $2}'); do
|
||||
echo "git add $f";
|
||||
done
|
||||
echo "git commit -m 'srpm_path updates for patch $PATCH_ID'"
|
||||
)
|
||||
done
|
||||
echo ""
|
@ -1,129 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Start an edit session for packages to be upgraded - post upgrade version
|
||||
#
|
||||
|
||||
# For backward compatibility. Old repo location or new?
|
||||
CENTOS_REPO=${MY_REPO}/centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
echo "ERROR: directory ${MY_REPO}/centos-repo not found."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
usage () {
|
||||
echo ""
|
||||
echo "Step 3: Start an edit session for packages to be upgraded - post upgrade version"
|
||||
echo ""
|
||||
echo "Usage: "
|
||||
echo " patch_rebase_3 [--origin_branch <branch>] [--working_branch <branch>] [--upversion_data <file>]"
|
||||
echo ""
|
||||
echo "Assumes $(basename ${CENTOS_REPO}) already has a working_branch commit that sets the new symlinks."
|
||||
echo ""
|
||||
echo "The upversion_data file has data on all the src.rpm being updated in the format:"
|
||||
echo " export UPVERSION_DATA=$MY_WORKSPACE/upversion.log"
|
||||
echo " PKG=lighttpd"
|
||||
echo " OLD_SRC_RPM=lighttpd-1.4.41-1.el7.src.rpm"
|
||||
echo " NEW_SRC_RPM=lighttpd-1.4.41-2.el7.src.rpm"
|
||||
echo " SRPM_PATH=$MY_REPO/stx/integ/extended/lighttpd/centos/srpm_path"
|
||||
echo " echo \"\$PKG#\$SRPM_PATH##\$OLD_SRC_RPM#\$NEW_SRC_RPM\" > UPVERSION_DATA"
|
||||
echo ""
|
||||
}
|
||||
|
||||
|
||||
TEMP=`getopt -o h --long origin_branch:,working_branch:,upversion_data:,help -n 'test.sh' -- "$@"`
|
||||
eval set -- "$TEMP"
|
||||
|
||||
ORIGIN_BRANCH=""
|
||||
WORKING_BRANCH=""
|
||||
UPVERSION_LOG=""
|
||||
HELP=0
|
||||
|
||||
while true ; do
|
||||
case "$1" in
|
||||
--origin_branch) shift ; ORIGIN_BRANCH="$1" ; shift ;;
|
||||
--working_branch) shift ; WORKING_BRANCH="$1" ; shift ;;
|
||||
--upversion_data) shift ; UPVERSION_LOG="$1" ; shift ;;
|
||||
-h|--help) HELP=1 ; shift ;;
|
||||
--) shift ; break ;;
|
||||
*) usage; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ $HELP -eq 1 ]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$UPVERSION_LOG" == "" ]; then
|
||||
UPVERSION_LOG=$UPVERSION_DATA
|
||||
fi
|
||||
|
||||
if [ "$UPVERSION_LOG" == "" ]; then
|
||||
echo "ERROR: please specify location of upversion data"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "$UPVERSION_LOG" ]; then
|
||||
echo "File not found: '$UPVERSION_LOG'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
|
||||
ORIGIN_BRANCH=$PATCH_SOURCE_BRANCH
|
||||
WORKING_BRANCH=$MY_PATCH_BRANCH
|
||||
fi
|
||||
|
||||
if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
|
||||
ORIGIN_BRANCH=$SOURCE_BRANCH
|
||||
WORKING_BRANCH=$MY_BRANCH
|
||||
fi
|
||||
|
||||
if [ "$ORIGIN_BRANCH" == "" ]; then
|
||||
echo "ERROR: please specify a origin branch"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$WORKING_BRANCH" == "" ]; then
|
||||
echo "ERROR: please specify a working branch"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# One step back to see the old symlinks
|
||||
cd ${CENTOS_REPO}
|
||||
git checkout $WORKING_BRANCH
|
||||
if [ $? != 0 ]; then
|
||||
echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '${CENTOS_REPO}'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
FAILED=""
|
||||
for dat in $(cat $UPVERSION_LOG); do
|
||||
name=$(echo $dat | awk -F '#' '{print $1}')
|
||||
srpm_path=$(echo $dat | awk -F '#' '{print $2}')
|
||||
old_src_rpm=$(echo $dat | awk -F '#' '{print $4}')
|
||||
new_src_rpm=$(echo $dat | awk -F '#' '{print $5}')
|
||||
|
||||
echo "$name $old_src_rpm $new_src_rpm"
|
||||
|
||||
build-pkgs --edit $name --no-meta-patch
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed cmd 'build-pkgs --edit $name'"
|
||||
FAILED="$name $FAILED"
|
||||
break
|
||||
fi
|
||||
echo "$? <=<=<=<=<=<=<=<=<=<=<=<=<=<=<=<="
|
||||
done
|
||||
|
||||
if [ "$FAILED" != "" ]; then
|
||||
echo "Failed build-pkgs --edit for ... $FAILED"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -1,413 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Migrate Titanium Cloud patches to the new package version
|
||||
#
|
||||
|
||||
# For backward compatibility. Old repo location or new?
|
||||
CENTOS_REPO=${MY_REPO}/centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
|
||||
if [ ! -d ${CENTOS_REPO} ]; then
|
||||
echo "ERROR: directory ${MY_REPO}/centos-repo not found."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
usage () {
|
||||
echo ""
|
||||
echo "Step 4: Migrate Titanium Cloud patches to the new package version"
|
||||
echo ""
|
||||
echo "Usage: "
|
||||
echo " patch_rebase_4 [--origin_branch <branch>] [--working_branch <branch>] [--upversion_data <file>]"
|
||||
echo ""
|
||||
echo "Assumes $(basename ${CENTOS_REPO}) already has a working_branch commit that sets the new symlinks."
|
||||
echo ""
|
||||
echo "The upversion_data file has data on all the src.rpm being updated in the format:"
|
||||
echo " export UPVERSION_DATA=$MY_WORKSPACE/upversion.log"
|
||||
echo " PKG=lighttpd"
|
||||
echo " OLD_SRC_RPM=lighttpd-1.4.41-1.el7.src.rpm"
|
||||
echo " NEW_SRC_RPM=lighttpd-1.4.41-2.el7.src.rpm"
|
||||
echo " SRPM_PATH=$MY_REPO/stx/integ/extended/lighttpd/centos/srpm_path"
|
||||
echo " echo \"\$PKG#\$SRPM_PATH##\$OLD_SRC_RPM#\$NEW_SRC_RPM\" > UPVERSION_DATA"
|
||||
echo ""
|
||||
}
|
||||
|
||||
|
||||
TEMP=`getopt -o h --long origin_branch:,working_branch:,upversion_data:,help -n 'test.sh' -- "$@"`
|
||||
eval set -- "$TEMP"
|
||||
|
||||
ORIGIN_BRANCH=""
|
||||
WORKING_BRANCH=""
|
||||
UPVERSION_LOG=""
|
||||
HELP=0
|
||||
|
||||
while true ; do
|
||||
case "$1" in
|
||||
--origin_branch) shift ; ORIGIN_BRANCH="$1" ; shift ;;
|
||||
--working_branch) shift ; WORKING_BRANCH="$1" ; shift ;;
|
||||
--upversion_data) shift ; UPVERSION_LOG="$1" ; shift ;;
|
||||
-h|--help) HELP=1 ; shift ;;
|
||||
--) shift ; break ;;
|
||||
*) usage; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ $HELP -eq 1 ]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$UPVERSION_LOG" == "" ]; then
|
||||
UPVERSION_LOG=$UPVERSION_DATA
|
||||
fi
|
||||
|
||||
if [ "$UPVERSION_LOG" == "" ]; then
|
||||
echo "ERROR: please specify location of upversion data"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "$UPVERSION_LOG" ]; then
|
||||
echo "File not found: '$UPVERSION_LOG'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
|
||||
ORIGIN_BRANCH=$PATCH_SOURCE_BRANCH
|
||||
WORKING_BRANCH=$MY_PATCH_BRANCH
|
||||
fi
|
||||
|
||||
if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
|
||||
ORIGIN_BRANCH=$SOURCE_BRANCH
|
||||
WORKING_BRANCH=$MY_BRANCH
|
||||
fi
|
||||
|
||||
if [ "$ORIGIN_BRANCH" == "" ]; then
|
||||
echo "ERROR: please specify a origin branch"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$WORKING_BRANCH" == "" ]; then
|
||||
echo "ERROR: please specify a working branch"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$DISPLAY" == "" ]; then
|
||||
echo "ERROR: X-Windows 'DISPLAY' variable not set. This script needs to open pop-up windows."
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# One step back to see the old symlinks
|
||||
cd ${CENTOS_REPO}
|
||||
git checkout $WORKING_BRANCH
|
||||
if [ $? != 0 ]; then
|
||||
echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '${CENTOS_REPO}'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
FAILED=""
|
||||
build_types="std rt"
|
||||
for dat in $(cat $UPVERSION_LOG); do
|
||||
(
|
||||
name=$(echo $dat | awk -F '#' '{print $1}')
|
||||
srpm_path=$(echo $dat | awk -F '#' '{print $2}')
|
||||
old_src_rpm=$(echo $dat | awk -F '#' '{print $4}')
|
||||
new_src_rpm=$(echo $dat | awk -F '#' '{print $5}')
|
||||
|
||||
PKG_DIR=$(dirname $(dirname $srpm_path))
|
||||
OLD_BRANCH=$(echo $old_src_rpm | sed 's#[.]src[.]rpm$##')
|
||||
NEW_BRANCH=$(echo $new_src_rpm | sed 's#[.]src[.]rpm$##')
|
||||
|
||||
WORK_META_DIR=""
|
||||
for dd in $build_types; do
|
||||
WORK_META_DIR=$MY_WORKSPACE/$dd/srpm_work/$name/rpmbuild
|
||||
echo "WORK_META_DIR=$WORK_META_DIR"
|
||||
if [ -d $WORK_META_DIR ]; then
|
||||
break;
|
||||
else
|
||||
WORK_META_DIR=""
|
||||
fi
|
||||
done
|
||||
if [ "$WORK_META_DIR" == "" ]; then
|
||||
echo "ERROR: failed to find srpm_work directory for '$name'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# WORK_SRC_DIR=$(dirname $(find $MY_WORKSPACE/srpm_work/$name/gits/ -type d -name .git))
|
||||
NEW_WORK_SRC_DIR=""
|
||||
OLD_WORK_SRC_DIR=""
|
||||
for dd in $build_types; do
|
||||
for g in $(find $MY_WORKSPACE/$dd/srpm_work/$name/gits/ -type d -name .git); do
|
||||
d=$(dirname $g)
|
||||
if [ -d $d ]; then
|
||||
cd $d;
|
||||
git tag | grep pre_wrs_ >> /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
continue
|
||||
fi
|
||||
git checkout $OLD_BRANCH 2>> /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
OLD_WORK_SRC_DIR=$d
|
||||
fi
|
||||
git checkout $NEW_BRANCH 2>> /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
NEW_WORK_SRC_DIR=$d
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
if [ "$WORK_META_DIR" == "" ]; then
|
||||
echo "ERROR: failed to find srpm_work directory for '$name'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$name $old_src_rpm $new_src_rpm"
|
||||
echo "PKG_DIR=$PKG_DIR"
|
||||
echo "OLD_BRANCH=$OLD_BRANCH"
|
||||
echo "NEW_BRANCH=$NEW_BRANCH"
|
||||
echo "WORK_META_DIR=$WORK_META_DIR"
|
||||
echo "OLD_WORK_SRC_DIR=$OLD_WORK_SRC_DIR"
|
||||
echo "NEW_WORK_SRC_DIR=$NEW_WORK_SRC_DIR"
|
||||
echo ""
|
||||
|
||||
(
|
||||
cd $WORK_META_DIR
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed to cd to WORK_META_DIR=$WORK_META_DIR"
|
||||
exit 1
|
||||
fi
|
||||
echo "--- old meta git log (oldest to newest) ---"
|
||||
git checkout $OLD_BRANCH
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed to git checkout OLD_BRANCH=$OLD_BRANCH"
|
||||
exit 1
|
||||
fi
|
||||
git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit | tac
|
||||
PATCH_COMMIT_LIST=$(git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit | tac | grep WRS: | grep -v 'WRS: COPY_LIST content' | awk '{ print $2 }')
|
||||
echo "--- new meta git log (oldest to newest) ---"
|
||||
git checkout $NEW_BRANCH
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed to git checkout NEW_BRANCH=$NEW_BRANCH"
|
||||
exit 1
|
||||
fi
|
||||
git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit | tac
|
||||
REFERENCE_COMMIT=$(git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit | head -n 1 | awk '{ print $2 }')
|
||||
echo ""
|
||||
|
||||
for COMMIT in ${PATCH_COMMIT_LIST}; do
|
||||
echo "git cherry-pick $COMMIT"
|
||||
git cherry-pick "$COMMIT"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "WARNING: 'git cherry-pick $COMMIT' found merge conflicts. Please fix these files"
|
||||
git status --porcelain | grep '^UU ' | awk '{ print $2}'
|
||||
echo "pwd=$(pwd)"
|
||||
# gitk &
|
||||
echo "git mergetool --no-prompt"
|
||||
git mergetool --no-prompt
|
||||
# for FILE_NAME in $(git status --porcelain | grep '^UU ' | awk '{ print $2}'); do
|
||||
# xterm -e "vi $FILE_NAME -c '/[<=>][<=>][<=>][<=>]'"
|
||||
# if [ $? -ne 0 ]; then
|
||||
# echo "ERROR: problem launching editor on "
|
||||
# exit 1
|
||||
# fi
|
||||
# done
|
||||
echo "git cherry-pick --continue"
|
||||
git cherry-pick --continue
|
||||
fi
|
||||
done
|
||||
|
||||
PATCH_LIST=$(git format-patch -n $REFERENCE_COMMIT)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=$REFERENCE_COMMIT"
|
||||
exit 1
|
||||
fi
|
||||
for PATCH_FILE in ${PATCH_LIST}; do
|
||||
PATCH_TARGET=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-//' | sed 's/.patch$//')
|
||||
echo "$PATCH_FILE -> $PATCH_TARGET"
|
||||
N=$(find "$PKG_DIR/centos/meta_patches" -name "$PATCH_TARGET*" | wc -l)
|
||||
if [ $N -eq 1 ]; then
|
||||
PATCH_DEST=$(find "$PKG_DIR/centos/meta_patches" -name "$PATCH_TARGET*")
|
||||
echo "cp -f $PATCH_FILE $PATCH_DEST"
|
||||
\cp -f $PATCH_FILE $PATCH_DEST
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: copy failed $WORK_META_DIR/$PATCH_FILE -> $PATCH_DEST"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "ERROR: Don't know what destination file name to use for patch '$WORK_META_DIR/$PATCH_FILE' derived from commit $COMMIT, and to be copied to '$PKG_DIR/centos/meta_patches'"
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
)
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
FAILED=$name
|
||||
break
|
||||
fi
|
||||
|
||||
(
|
||||
echo "--- old git log (oldest to newest) ---"
|
||||
cd $OLD_WORK_SRC_DIR
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed to cd to OLD_WORK_SRC_DIR=$OLD_WORK_SRC_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git checkout $OLD_BRANCH
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed to git checkout OLD_BRANCH=$OLD_BRANCH in directory '$OLD_WORK_SRC_DIR'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit | tac
|
||||
PATCH_COMMIT_LIST=$(git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit | tac | grep WRS: | grep -v 'WRS: COPY_LIST content' | awk '{ print $2 }')
|
||||
|
||||
echo "--- new git log (oldest to newest) ---"
|
||||
cd $NEW_WORK_SRC_DIR
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed to cd to NEW_WORK_SRC_DIR=$NEW_WORK_SRC_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git checkout $NEW_BRANCH
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed to git checkout NEW_BRANCH=$NEW_BRANCH in directory '$NEW_WORK_SRC_DIR'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit | tac
|
||||
REFERENCE_COMMIT=$(git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit | head -n 1 | awk '{ print $2 }')
|
||||
echo ""
|
||||
|
||||
if [ "$OLD_WORK_SRC_DIR" == "$NEW_WORK_SRC_DIR" ]; then
|
||||
for COMMIT in ${PATCH_COMMIT_LIST}; do
|
||||
echo "git cherry-pick $COMMIT"
|
||||
git cherry-pick "$COMMIT"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "WARNING: 'git cherry-pick $COMMIT' found merge conflicts. Please fix these files"
|
||||
git status --porcelain | grep '^UU ' | awk '{ print $2}'
|
||||
echo "pwd=$(pwd)"
|
||||
# gitk &
|
||||
echo "git mergetool --no-prompt"
|
||||
git mergetool --no-prompt
|
||||
# for FILE_NAME in $(git status --porcelain | grep '^UU ' | awk '{ print $2}'); do
|
||||
# xterm -e "vi $FILE_NAME -c '/[<=>][<=>][<=>][<=>]'"
|
||||
# if [ $? -ne 0 ]; then
|
||||
# echo "ERROR: problem launching editor on "
|
||||
# exit 1
|
||||
# fi
|
||||
# done
|
||||
echo "git cherry-pick --continue"
|
||||
git cherry-pick --continue
|
||||
fi
|
||||
done
|
||||
else
|
||||
cd $OLD_WORK_SRC_DIR
|
||||
PATCH_LIST=$(git format-patch -n pre_wrs_$OLD_BRANCH)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=pre_wrs_$OLD_BRANCH"
|
||||
exit 1
|
||||
fi
|
||||
cd $NEW_WORK_SRC_DIR
|
||||
for PATCH_FILE in ${PATCH_LIST}; do
|
||||
cat $OLD_WORK_SRC_DIR/$PATCH_FILE | patch -p1
|
||||
if [ $? -ne 0 ]; then
|
||||
for REJECT in $(find . -name '*.rej'); do
|
||||
FILE_NAME=$(echo $REJECT | sed 's#.rej$##')
|
||||
cd $OLD_WORK_SRC_DIR
|
||||
gitk $FILE_NAME &
|
||||
cd $NEW_WORK_SRC_DIR
|
||||
if [ -f $FILE_NAME ] && [ -f $FILE_NAME.orig ]; then
|
||||
\cp -f $FILE_NAME.orig $FILE_NAME
|
||||
xterm -e "vi $FILE_NAME $REJECT"
|
||||
rm -f $REJECT
|
||||
rm -f $FILE_NAME.orig
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
git add --all
|
||||
MSG=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-//' | sed 's/.patch$//')
|
||||
git commit -m "WRS: $MSG"
|
||||
done
|
||||
|
||||
fi
|
||||
|
||||
PATCH_LIST=$(git format-patch -n $REFERENCE_COMMIT)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=$REFERENCE_COMMIT"
|
||||
exit 1
|
||||
fi
|
||||
for PATCH_FILE in ${PATCH_LIST}; do
|
||||
PATCH_TARGET=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-Patch[0-9]*-//' | sed 's/^[0-9][0-9][0-9][0-9]-WRS-Patch//' | sed 's/.patch$//')
|
||||
echo "$PATCH_FILE -> $PATCH_TARGET"
|
||||
PKG_PATCH_DIR="$PKG_DIR/centos/patches"
|
||||
N=0
|
||||
if [ -d "$PKG_PATCH_DIR" ]; then
|
||||
N=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]' | wc -l)
|
||||
fi
|
||||
if [ $N -ne 1 ]; then
|
||||
PKG_PATCH_DIR="$PKG_DIR"
|
||||
if [ -d "$PKG_PATCH_DIR" ]; then
|
||||
N=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]' | wc -l)
|
||||
fi
|
||||
fi
|
||||
echo "N=$N"
|
||||
echo "PKG_PATCH_DIR=$PKG_PATCH_DIR"
|
||||
|
||||
if [ $N -eq 1 ]; then
|
||||
PATCH_DEST=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]')
|
||||
echo "meld $PATCH_FILE -> $PATCH_DEST"
|
||||
meld $PATCH_FILE $PATCH_DEST
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: meld failed $WORK_SRC_DIR/$PATCH_FILE -> $PATCH_DEST"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "ERROR: Don't know what destination file name to use for patch '$OLD_WORK_SRC_DIR/$PATCH_FILE', and to be copied to '$PKG_PATCH_DIR'"
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
)
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
FAILED=$name
|
||||
break
|
||||
fi
|
||||
|
||||
)
|
||||
|
||||
|
||||
done
|
||||
|
||||
if [ "$FAILED" != "" ]; then
|
||||
echo "Failed for ... $FAILED"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
for d in $(for dat in $(cat $UPVERSION_LOG); do srpm_path=$(echo $dat | awk -F '#' '{print $2}'); ( cd $(dirname $srpm_path); git rev-parse --show-toplevel ); done | sort --unique); do
|
||||
(
|
||||
cd $d
|
||||
echo "cd $d"
|
||||
for f in $(git status --porcelain | awk '{print $2}'); do
|
||||
echo "git add $f";
|
||||
done
|
||||
if [ "$PATCH_ID" == "" ]; then
|
||||
echo "git commit -m 'rebased patches'"
|
||||
else
|
||||
echo "git commit -m 'rebased patches for patch $PATCH_ID'"
|
||||
fi
|
||||
)
|
||||
done
|
||||
echo ""
|
||||
|
||||
|
@ -1,39 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE comps PUBLIC "-//Red Hat, Inc.//DTD Comps info//EN" "comps.dtd">
|
||||
<comps>
|
||||
<group>
|
||||
<id>buildsys-build</id>
|
||||
<name>Buildsystem building group</name>
|
||||
<description/>
|
||||
<default>false</default>
|
||||
<uservisible>false</uservisible>
|
||||
<packagelist>
|
||||
<packagereq type="mandatory">bash</packagereq>
|
||||
<packagereq type="mandatory">bzip2</packagereq>
|
||||
<packagereq type="mandatory">coreutils</packagereq>
|
||||
<packagereq type="mandatory">cpio</packagereq>
|
||||
<packagereq type="mandatory">diffutils</packagereq>
|
||||
<packagereq type="mandatory">epel-release</packagereq>
|
||||
<packagereq type="mandatory">epel-rpm-macros</packagereq>
|
||||
<packagereq type="mandatory">findutils</packagereq>
|
||||
<packagereq type="mandatory">gawk</packagereq>
|
||||
<packagereq type="mandatory">gcc</packagereq>
|
||||
<packagereq type="mandatory">gcc-c++</packagereq>
|
||||
<packagereq type="mandatory">grep</packagereq>
|
||||
<packagereq type="mandatory">gzip</packagereq>
|
||||
<packagereq type="mandatory">hostname</packagereq>
|
||||
<packagereq type="mandatory">info</packagereq>
|
||||
<packagereq type="mandatory">make</packagereq>
|
||||
<packagereq type="mandatory">patch</packagereq>
|
||||
<packagereq type="mandatory">redhat-rpm-config</packagereq>
|
||||
<packagereq type="mandatory">rpm-build</packagereq>
|
||||
<packagereq type="mandatory">sed</packagereq>
|
||||
<packagereq type="mandatory">shadow-utils</packagereq>
|
||||
<packagereq type="mandatory">tar</packagereq>
|
||||
<packagereq type="mandatory">unzip</packagereq>
|
||||
<packagereq type="mandatory">util-linux-ng</packagereq>
|
||||
<packagereq type="mandatory">which</packagereq>
|
||||
<packagereq type="mandatory">xz</packagereq>
|
||||
</packagelist>
|
||||
</group>
|
||||
</comps>
|
@ -1 +0,0 @@
|
||||
mock.cfg.centos7.all.proto
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user