jenkins-pipelines/pipelines/monolithic.Jenkinsfile
Davlet Panech bba927d30c Separate building & starting of containers
Separate the build step from startup for environment containers. Build
step is very slow, and we want to be able to start the containers
without rebuilding when troubleshooting Jenkins jobs.

TESTS
==============================
Run both steps from Jenkins

Story: 2010226
Task: 49212

Depends-On: https://review.opendev.org/c/starlingx/tools/+/902792
Signed-off-by: Davlet Panech <davlet.panech@windriver.com>
Change-Id: I3462de0a9550310c931cbed2555de1b3a6bc53f9
2023-12-06 14:32:34 -05:00

402 lines
14 KiB
Plaintext

// vim: syn=groovy
//
// Copyright (c) 2022 Wind River Systems, Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
library "common@${params.JENKINS_SCRIPTS_BRANCH}"
PROPS = null
IMG_PARAMS = null
IMAGES_FAILED = false
def parseProps(text) {
def x = {}
for (line in text.split (/\n+/)) {
if (line.matches (/\s*(?:#.*)?#/)) {
continue
}
parts = line.split ("=", 2)
key = parts[0]
value = parts[1]
x."${key}" = value
}
return x
}
def loadEnv() {
def data = {}
data.NEED_BUILD = false
data.SUPPRESS_DOCKER_IMAGE_BUILD_ERRORS = params.SUPPRESS_DOCKER_IMAGE_BUILD_ERRORS
ws(params.BUILD_HOME) {
if (fileExists ("NEED_BUILD")) {
data.NEED_BUILD = true
}
}
final String configText = sh (script: "${Constants.SCRIPTS_DIR}/print-config.sh", returnStdout: true)
final props = parseProps (configText)
data.BUILD_OUTPUT_HOME_URL = props.BUILD_OUTPUT_HOME_URL
data.PUBLISH_URL = props.PUBLISH_URL
data.BUILD_REMOTE_CLI = props.BUILD_REMOTE_CLI == "true"
PROPS = data
return data.NEED_BUILD
}
def partJobName (name) {
final String folder = env.JOB_NAME.replaceAll (/(.*\/).+$/, '$1');
if (folder == env.JOB_NAME) {
error "This job must be in a Jenkins folder!"
}
return "/" + folder + "parts/" + name
}
def runPart (name, params = [], propagate = true) {
// Tell Jenkins to checkout the same commit of the sub-job's Jenkinsfile,
// as the current builds' Jenkinsfile's commit.
final gitRef = string (name: 'JENKINS_SCRIPTS_BRANCH', value: env.GIT_COMMIT)
return build (
job: partJobName (name),
parameters: copyCurrentParams() + [ gitRef ] + params,
propagate: propagate
)
}
def runImagesPart (name, params = []) {
// Ignore docker image - related errors. In this case We
// prevent sub-jobs from raising exceptions and failing the
// current build. Instead we note when an image-related
// job has failed, and skip all subsequent image-related
// jobs.
if (PROPS.SUPPRESS_DOCKER_IMAGE_BUILD_ERRORS) {
if (!IMAGES_FAILED) {
final jobName = partJobName (name)
final res = runPart (name, IMG_PARAMS + params, false).result
if (res == 'ABORTED') {
// FIXME: make current build ABORTED here
error ("child job ${jobName} aborted")
}
if (res == 'SUCCESS' || res == 'UNSTABLE') {
return true
}
print ("*** ERROR: child job ${jobName} failed!")
IMAGES_FAILED = true
}
return false
}
// Otherwise, just call the subjob normally - ie its failure
// will propagate to the current build
runPart (name, IMG_PARAMS + params)
return true
}
def printBuildFooter() {
if (PROPS) {
String msg = ""
msg += "\n"
msg += "========================================\n"
msg += "\n"
if (PROPS.NEED_BUILD) {
msg += "Build output: ${PROPS.BUILD_OUTPUT_HOME_URL}\n"
if (PROPS.PUBLISH_URL) {
msg += "Publish output: ${PROPS.PUBLISH_URL}\n"
}
if (IMAGES_FAILED) {
msg += "\n"
msg += "WARNING:\n"
msg += "WARNING: docker images build attempted, but failed!\n"
msg += "WARNING: see log output above\n"
msg += "WARNING:\n"
}
}
else {
echo "*** NO CHANGES - BUILD NOT REQUIRED"
}
msg += "\n"
msg += "========================================\n"
msg += "\n"
echo (msg)
}
}
setBuildDescr()
pipeline {
agent any
options {
timestamps()
}
parameters {
string (
name: 'MASTER_JOB_NAME'
)
string (
name: 'MASTER_BUILD_NUMBER'
)
string (
name: 'BUILD_HOME'
)
string (
name: 'TIMESTAMP',
)
string (
name: 'PUBLISH_TIMESTAMP'
)
booleanParam (
name: 'REBUILD_BUILDER_IMAGES'
)
booleanParam (
name: 'BUILDER_USE_DOCKER_CACHE'
)
booleanParam (
name: 'REFRESH_SOURCE'
)
booleanParam (
name: 'BUILD_PACKAGES'
)
string (
name: 'BUILD_PACKAGES_LIST'
)
booleanParam (
name: 'PKG_REUSE'
)
booleanParam (
name: 'BUILD_ISO'
)
booleanParam (
name: 'BUILD_RT'
)
booleanParam (
name: 'DRY_RUN'
)
booleanParam (
name: 'SHELL_XTRACE'
)
booleanParam (
name: 'CLEAN_PACKAGES'
)
booleanParam (
name: 'CLEAN_ISO'
)
booleanParam (
name: 'CLEAN_REPOMGR'
)
booleanParam (
name: 'CLEAN_DOWNLOADS'
)
booleanParam (
name: 'CLEAN_DOCKER'
)
booleanParam (
name: 'FORCE_BUILD'
)
booleanParam (
name: 'BUILD_HELM_CHARTS'
)
booleanParam (
name: 'FORCE_BUILD_WHEELS'
)
booleanParam (
name: 'BUILD_DOCKER_BASE_IMAGE'
)
booleanParam (
name: 'BUILD_DOCKER_IMAGES'
)
string (
name: 'DOCKER_IMAGE_LIST'
)
booleanParam (
name: 'PUSH_DOCKER_IMAGES'
)
booleanParam (
name: 'SUPPRESS_DOCKER_IMAGE_BUILD_ERRORS',
defaultValue: false
)
booleanParam (
name: 'IMPORT_BUILD'
)
string (
name: 'IMPORT_BUILD_DIR'
)
booleanParam (
name: 'USE_DOCKER_CACHE',
)
string (
name: 'JENKINS_SCRIPTS_BRANCH'
)
text (
name: 'PATCH_LIST',
defaultValue: '-',
description: '''\
<pre><code>List of Gerrit URLs to apply before running the build, one per line "[PATH] URL REF", eg:
https://review.opendev.org/starlingx/config refs/changes/71/859571/4
https://review.opendev.org/starlingx/stx-puppet refs/changes/75/859575/1
https://review.opendev.org/starlingx/tools refs/changes/76/859576/2
or with paths relative to repo root:
cgcs-root/stx/config https://review.opendev.org/starlingx/config refs/changes/71/859571/4
cgcs-root/stx/stx-puppet https://review.opendev.org/starlingx/stx-puppet refs/changes/75/859575/1
stx-tools https://review.opendev.org/starlingx/tools refs/changes/76/859576/2
</code></pre>
'''
)
}
stages {
stage('INIT') {
steps {
script {
// Initialize BUILD_HOME, create build.conf & stx.conf
runPart ("init-env")
// Update source tree
runPart ("clone-source")
// create BUILD & stx.conf
runPart ("configure-build")
// Stop containers before updating source treee
runPart ("stop-containers")
// Create chnagelog, LAST_COMMITS, NEED_BUILD etc
runPart ("create-changelog")
// Is build required?
if (loadEnv()) {
IMG_PARAMS = [ string (name: 'BUILD_STREAM', value: 'stable') ]
}
else {
println "*** NO CHANGES, BUILD NOT REQUIRED ***"
}
}
}
}
// This stage runs only if build is required
stage('X0') {
when { expression { PROPS.NEED_BUILD } }
stages {
stage('PREPARE') {
steps {
// Login to host's docker
runPart ("host-docker-login")
// Delete or keep packages, aptly state, etc depending on build params
runPart ("clean-build")
// start containers
runPart ("build-env-containers")
runPart ("start-containers")
// login to docker early to catch login errors
runPart ("docker-login")
}
}
// populate mirror/
stage('DOWNLOAD') {
steps {
runPart ("download-prerequisites")
}
}
// build packages
stage('PACKAGES') {
when { expression { params.BUILD_PACKAGES } }
steps {
runPart ("build-packages")
runPart ("publish-packages")
}
}
// Generate initial helm charts. We will re-generate them after
// building docker images, if requested in order to replace
// image tags by locally-built images
stage('HELM:initial') {
when { expression { params.BUILD_HELM_CHARTS } }
steps {
runPart ("build-helm-charts", IMG_PARAMS)
runPart ("publish-helm-charts", IMG_PARAMS)
}
}
// Build ISO & images in parallel
stage('X1') { parallel {
stage('ISO') {
when { expression { params.BUILD_ISO } }
steps { script {
runPart ("build-iso")
runPart ("publish-iso")
sh ("BUILD_STATUS=success ${Constants.SCRIPTS_DIR}/create-latest-iso-symlinks.sh")
} }
} // stage('ISO')
stage('IMAGES') {
when { expression { params.BUILD_DOCKER_BASE_IMAGE || params.BUILD_DOCKER_IMAGES } }
stages {
stage('IMAGES:base') {
when { expression { ! IMAGES_FAILED && params.BUILD_DOCKER_BASE_IMAGE } }
steps { script {
runImagesPart ("build-docker-base")
} }
}
stage('IMAGES:wheels') {
when { expression { ! IMAGES_FAILED && params.BUILD_DOCKER_IMAGES } }
steps { script {
runImagesPart ("build-wheels")
runImagesPart ("publish-wheels")
} }
}
stage('IMAGES:images') {
when { expression { ! IMAGES_FAILED && params.BUILD_DOCKER_IMAGES } }
steps { script {
runImagesPart ("build-docker-images")
} }
}
stage('IMAGES:helm') {
// Rebuild helm charts even if image builds failed.
// This will record any images that were built sucessfully in the helm charts
when { expression { params.BUILD_DOCKER_IMAGES && params.BUILD_HELM_CHARTS } }
steps { script {
runPart ("build-helm-charts", IMG_PARAMS)
runPart ("publish-helm-charts", IMG_PARAMS)
} }
}
stage('IMAGES:symlinks') {
// Create the symlink even if some images failed.
// FIXME: remove all logic re publishing failed docker builds
// once all images have been fixed for Debian
// when { expression { ! IMAGES_FAILED } }
steps { script {
// copy image lists to publish root and create the "latest_docker_image_build" symlinks
// in publish and archive roots
sh ("BUILD_STATUS=success ${Constants.SCRIPTS_DIR}/create-latest-containers-symlinks.sh")
} }
}
} // stages
} // stage('IMAGES')
stage('remote-cli') {
when { expression { PROPS.BUILD_REMOTE_CLI } }
steps {
runPart ("build-remote-cli")
}
}
stage('export-dir') { steps {
runPart ("build-export-dir")
} }
} }// stage('X1')
} // stages
post {
always {
echo "build result: ${currentBuild.result}"
runPart ("stop-containers")
runPart ("archive-misc") // archive anything we may have missed
saveCurrentJenkinsBuildInfo() // save this job's build number on disk (for publish-logs)
}
success {
// copy LAST_COMMITS to archive root & update the "latest_build" symlink in
// both archive and publish roots
sh ("BUILD_STATUS=success ${Constants.SCRIPTS_DIR}/create-latest-symlinks.sh")
printBuildFooter() // Print archive & publish URLs
runPart ("publish-logs") // publish this job's Jenkins log
}
unsuccessful {
sh ("BUILD_STATUS=fail ${Constants.SCRIPTS_DIR}/create-latest-symlinks.sh")
runPart ("publish-logs") // publish this job's Jenkins log
}
}
} // stage X0
} // stages
}