Update Debian build system to support Kubernetes

Extends the build tools to add support to Kubernetes. For kubernetes,
it supports single node cluster and host path for the volumes.

New environment variables:
- PROJECT: build project name
- STX_PLATFORM:  minikube or kubernetes
- STX_BUILD_CPUS: replaces MINIKUBECPUS
- STX_K8S_NAMESPACE: kubernetes namespace name
* Default values are set to minikube, more details added in
import-stx.README.

To deploy stx on k8s you need to follow the below steps:

- create k8s namespace
- export PROJECT, included to support multiproject environments it is
used by the new default build home and also for docker tagging.
- export STX_BUILD_HOME, e.g: /localdisk/user/$PROJECT
- export STX_K8S_NAMESPACE="namespace_name"
- export STX_PLATFORM="kubernetes"
- export KUBECONFIG to your kubernetes config file
- STX_BUILD_CPUS replaces MINIKUBECPUS, this variable is used
by build-pkgs parallel jobs ($MAX_CPUS)
- Create your build home $STX_BUILD_HOME
- Init repo & repo sync
- source import-stx, check the env variables
- stx-init-env
- stx control status/start/stop/enter

Test Plan:

Pass: Create env on minikube
Pass: Create env on Kubernetes
Pass: Apply patch on current minikube env and continue to work on the
environment without issues
Pass: build package on Debian
Pass: build Debian image

Story: 2009812
Task: 44391

Signed-off-by: Luis Sampaio <luis.sampaio@windriver.com>
Change-Id: I7b760fbf1454f6aa90dd93dd9ff3a61d5fbd1b5c
This commit is contained in:
Luis Sampaio 2022-02-11 11:05:12 -08:00
parent a334124287
commit 52ef35d1bf
24 changed files with 652 additions and 359 deletions

View File

@ -1,25 +1,6 @@
# bash # bash
if [ -z "$MINIKUBE_HOME" ];then notice_warn () {
MINIKUBE_HOME=$HOME
else
if [ ! -d "$MINIKUBE_HOME" ]; then
echo "The directory defined by \$MINIKUBE_HOME doesn't exist"
return 1
fi
fi
FSTYPE=$(stat -f -L -c %T $MINIKUBE_HOME)
if [ x"$FSTYPE" == x"nfs" ]; then
echo ""
echo "Warning: stx minikube doesn't allow \$MINIKUBE_HOME or \$HOME directory as nfs mount point!!!"
echo " Please set non-nfs MINIKUBE_HOME with the command 'export MINIKUBE_HOME=XXX/YYY'"
echo ""
unset MINIKUBE_HOME
return 1
fi
notice_warn() {
local tty_on tty_off local tty_on tty_off
if [[ -t 2 ]] ; then if [[ -t 2 ]] ; then
tty_on=$'\033[1;33m' tty_on=$'\033[1;33m'
@ -28,18 +9,6 @@ notice_warn() {
echo >&2 "${tty_on}$*${tty_off}" echo >&2 "${tty_on}$*${tty_off}"
} }
export PRJDIR=$(pwd)
export PATH=$PRJDIR/stx/bin:$PATH
export MINIKUBECPUS=${MINIKUBECPUS:-6}
export MINIKUBEMEMORY=${MINIKUBEMEMORY:-16000}
export MINIKUBENAME=${MINIKUBENAME:-minikube-$USER-upstream}
export KUBECONFIG=$MINIKUBE_HOME/.kube/config
export STX_BUILD_HOME="${STX_BUILD_HOME:-/localdisk/$USER}"
if [ ! -f "stx.conf" ]; then
cp stx.conf.sample stx.conf
fi
number_of_users () { number_of_users () {
local count local count
@ -68,34 +37,125 @@ sqrt () {
echo -e "sqrt($1)" | bc -q -i | head -2 | tail -1 echo -e "sqrt($1)" | bc -q -i | head -2 | tail -1
} }
# Consider many users are just working with code and not actually building.
NUM_USERS=$(sqrt $(number_of_users)) if [ -z "$PROJECT" ]; then
ABSOLUTE_MAX_CPUS=$(($(number_of_cpus)/$NUM_USERS)) notice_warn "\$PROJECT needs to be defined, this will be your project name."
MAX_CPUS=$(number_of_cpus) notice_warn "It will be used on the docker image tagging to support multiusers."
if [ "$MAX_CPUS" == "" ] || [ "$MAX_CPUS" == "0" ]; then return 1
MAX_CPUS=1
fi fi
if [ $MAX_CPUS -gt $ABSOLUTE_MAX_CPUS ]; then # Host side path, exports STX lib to user's PATH
MAX_CPUS=$ABSOLUTE_MAX_CPUS export PRJDIR=$(pwd)
export PATH=$PRJDIR/stx/bin:$PATH
# Used by helm/stx-init to tag the user images
DOCKER_TAG_VERSION="v0.1.0"
export DOCKER_TAG_LOCAL="${USER}-${PROJECT}-${DOCKER_TAG_VERSION}"
# Platform 'minikube' or 'kubernetes'
export STX_PLATFORM="${STX_PLATFORM:-minikube}"
# Max cpus for the build parallel jobs, replaces MINIKUBECPUS env var
export STX_BUILD_CPUS=${STX_BUILD_CPUS:-6}
STX_BUILD_HOME_DEFAULT_v1="/localdisk/$USER"
STX_BUILD_HOME_DEFAULT_v2="/localdisk/designer/$USER/$PROJECT"
if [ ! -f "stx.conf" ]; then
cp stx.conf.sample stx.conf
fi fi
if [ $MINIKUBECPUS -gt $MAX_CPUS ]; then # Platform specifics
notice_warn "MINIKUBECPUS setting:$MINIKUBECPUS is more than MAX_CPUS: $MAX_CPUS." if [ "$STX_PLATFORM" = "minikube" ]; then
notice_warn "Limit the minikube cluster with MAX_CPUS." # MINIKUBE Settings
if [ -z "$STX_BUILD_HOME" ]; then
# Verify default build home
if [ -d "${STX_BUILD_HOME_DEFAULT_v1}/localdisk/designer/$USER" ]; then
STX_BUILD_HOME="${STX_BUILD_HOME_DEFAULT_v1}"
else
STX_BUILD_HOME="${STX_BUILD_HOME_DEFAULT_v2}"
fi
export STX_BUILD_HOME
fi
export MINIKUBECPUS=$MAX_CPUS if [ -z "$MINIKUBE_HOME" ]; then
fi MINIKUBE_HOME=$HOME
else
MAX_MEMORY=`expr $(cat /proc/meminfo |grep MemTotal | awk '{print $2}') / 1024` if [ ! -d "$MINIKUBE_HOME" ]; then
if [ "$MAX_MEMORY" == "" ] || [ "$MAX_MEMORY" == "0" ]; then echo "The directory defined by \$MINIKUBE_HOME doesn't exist"
MAX_MEMORY=2048 return 1
fi fi
fi
if [ $MINIKUBEMEMORY -gt $MAX_MEMORY ]; then
FSTYPE=$(stat -f -L -c %T $MINIKUBE_HOME)
notice_warn "MINIKUBEMEMORY setting:$MINIKUBEMEMORY is more than system MAX_MEMORY: $MAX_MEMORY M." if [ x"$FSTYPE" == x"nfs" ]; then
notice_warn "Limit the minikube cluster with MAX_MEMORY." echo ""
echo "Warning: stx minikube doesn't allow \$MINIKUBE_HOME or \$HOME directory as nfs mount point!!!"
export MINIKUBEMEMORY=$MAX_MEMORY echo " Please set non-nfs MINIKUBE_HOME with the command 'export MINIKUBE_HOME=XXX/YYY'"
echo ""
unset MINIKUBE_HOME
return 1
fi
export MINIKUBEMEMORY=${MINIKUBEMEMORY:-16000}
export MINIKUBENAME=${MINIKUBENAME:-minikube-$USER-upstream}
export KUBECONFIG=$MINIKUBE_HOME/.kube/config
# Consider many users are just working with code and not actually building.
NUM_USERS=$(sqrt $(number_of_users))
ABSOLUTE_MAX_CPUS=$(($(number_of_cpus)/$NUM_USERS))
MAX_CPUS=$(number_of_cpus)
if [ "$MAX_CPUS" == "" ] || [ "$MAX_CPUS" == "0" ]; then
MAX_CPUS=1
fi
if [ $MAX_CPUS -gt $ABSOLUTE_MAX_CPUS ]; then
MAX_CPUS=$ABSOLUTE_MAX_CPUS
fi
if [ $STX_BUILD_CPUS -gt $MAX_CPUS ]; then
notice_warn "\$STX_BUILD_CPUS setting:$STX_BUILD_CPUS is more than MAX_CPUS: $MAX_CPUS."
notice_warn "Limit the minikube cluster with MAX_CPUS."
export STX_BUILD_CPUS=$MAX_CPUS
fi
MAX_MEMORY=`expr $(cat /proc/meminfo |grep MemTotal | awk '{print $2}') / 1024`
if [ "$MAX_MEMORY" == "" ] || [ "$MAX_MEMORY" == "0" ]; then
MAX_MEMORY=2048
fi
if [ $MINIKUBEMEMORY -gt $MAX_MEMORY ]; then
notice_warn "MINIKUBEMEMORY setting:$MINIKUBEMEMORY is more than system MAX_MEMORY: $MAX_MEMORY M."
notice_warn "Limit the minikube cluster with MAX_MEMORY."
export MINIKUBEMEMORY=$MAX_MEMORY
fi
elif [ "$STX_PLATFORM" = "kubernetes" ]; then
# Host side path STX_BUILD_HOME
export STX_BUILD_HOME="${STX_BUILD_HOME:-${STX_BUILD_HOME_DEFAULT_v2}}"
if [ -z "$STX_K8S_NAMESPACE" ]; then
notice_warn "\$STX_K8S_NAMESPACE needs to be defined, this will be your namespace name"
return 1
fi
if ! kubectl get namespace 2>/dev/null | grep -q $STX_K8S_NAMESPACE; then
notice_warn "namespace $STX_K8S_NAMESPACE not found"
return 1
fi
if [ -z "$KUBECONFIG" ]; then
# Kubeconfig default location inside STX_BUILD_HOME
export KUBECONFIG=$STX_BUILD_HOME/.kube/config
fi
if [ ! -f "$KUBECONFIG" ]; then
notice_warn "KUBECONFIG: $KUBECONFIG not found"
notice_warn "Fix the kube config and try again."
return 1
fi
else
notice_warn "\$STX_PLATFORM not specified, valid options are: 'minikube' or 'kubernetes'"
return 1
fi fi

70
import-stx.README Normal file
View File

@ -0,0 +1,70 @@
import-stx: Import StarlingX build environment and stx tool
For more information: https://wiki.openstack.org/wiki/StarlingX/DebianBuildEnvironment
###############################################
# STX Environment variables
###############################################
$PROJECT
* Required
* Project name, used by docker image tagging for multiuser/multiproject environments. It is also used on the
default STX_BUILD_HOME path.
$STX_PLATFORM
* Required for Kubernetes.
* Platforms supported: "minikube" and "kubernetes"
* Default value: "minikube"
$STX_BUILD_HOME
* Optional
* Default value: "/localdisk/designer/$USER/$PROJECT"
* Working directory for the build.
$STX_BUILD_CPUS
* Optional
* Default value: 6
* Number of cpus that build-pkgs can use for parallel jobs, in a minikube environment this option is also used
to set the minikube cluster cpus.
###############################################
# Minikube only
###############################################
$MINIKUBENAME
* Optional
* Default value: minikube-$USER-upstream
* Sets the minikube cluster profile name.
$MINIKUBE_HOME
* Optional
* Default value: $HOME
* Sets the path for the .minikube and .kube directories that minikube uses for state/configuration.
$MINIKUBEMEMORY
* Optional
* Default value: 16000
* Amount of memory available for the minikube cluster.
###############################################
# Kubernetes only
###############################################
$STX_K8S_NAMESPACE
* Required
* Kubernetes namespace name for your project, currently each namespace can host 1 stx deployment. Namespace must
be created and access setup prior to sourcing import-stx and starting your build environment.
$KUBECONFIG
* Optional
* Default value: $STX_BUILD_HOME/.kube/config
* Path to your Kubernetes config file.

View File

@ -29,19 +29,18 @@ notice() {
echo >&2 "${tty_on}$*${tty_off}" echo >&2 "${tty_on}$*${tty_off}"
} }
PROGNAME=$(basename "$0") PROGNAME=$(basename "$0")
MINIKUBE=minikube MINIKUBE=minikube
HELM=helm HELM=helm
DOCKER=docker DOCKER=docker
PYTHON3=python3 PYTHON3=python3
KUBECTL=kubectl
DOCKER_PREFIX="starlingx/" DOCKER_PREFIX="starlingx/"
DOCKER_IMAGES="stx-builder stx-pkgbuilder stx-lat-tool stx-aptly" DOCKER_IMAGES="stx-builder stx-pkgbuilder stx-lat-tool stx-aptly"
DOCKER_TAG="master-debian-latest" DOCKER_TAG="master-debian-latest"
DOCKER_TAG_LOCAL="v0.1.0"
BUILD_DOCKER=0 BUILD_DOCKER=0
DELETE_MINIKUBE=0 DELETE_ENV=0
RESTART_MINIKUBE=0 RESTART_MINIKUBE=0
CLEAN_CONFIG=0 CLEAN_CONFIG=0
@ -96,7 +95,7 @@ while true ; do
shift shift
;; ;;
--nuke) --nuke)
DELETE_MINIKUBE=1 DELETE_ENV=1
shift shift
;; ;;
--) --)
@ -114,19 +113,30 @@ done
[[ "$#" -le 0 ]] || cmdline_error "too many arguments" [[ "$#" -le 0 ]] || cmdline_error "too many arguments"
# make sure required programs are installed # make sure required programs are installed
if ! command -v $MINIKUBE &> /dev/null; then if [ "$STX_PLATFORM" = "minikube" ]; then
echo >&2 "Command $MINIKUBE could not be found." if ! command -v "$MINIKUBE" &> /dev/null; then
echo >&2 "Please install it as https://minikube.sigs.k8s.io/docs/start/" echo >&2 "Command $MINIKUBE could not be found."
echo "" echo >&2 "Please install it as https://minikube.sigs.k8s.io/docs/start/"
exit 1 echo ""
exit 1
fi
fi fi
if ! command -v $HELM &> /dev/null; then if [ "$STX_PLATFORM" = "kubernetes" ]; then
if ! command -v "$KUBECTL" &> /dev/null; then
echo >&2 "Command $KUBECTL could not be found."
echo >&2 "Please install and configure kubectl."
echo ""
exit 1
fi
fi
if ! command -v "$HELM" &> /dev/null; then
echo >&2 "Command $HELM could not be found." echo >&2 "Command $HELM could not be found."
echo >&2 "Please install it as https://helm.sh/" echo >&2 "Please install it as https://helm.sh/"
echo "" echo ""
exit 1 exit 1
fi fi
if ! command -v $DOCKER &> /dev/null; then if ! command -v "$DOCKER" &> /dev/null; then
echo >&2 "Command $DOCKER could not be found. Please install it." echo >&2 "Command $DOCKER could not be found. Please install it."
echo >&2 "" echo >&2 ""
exit 1 exit 1
@ -171,55 +181,64 @@ if [[ ! -d "$STX_BUILD_HOME/mirrors/starlingx" ]] ; then
mkdir -p $STX_BUILD_HOME/mirrors/starlingx || exit 1 mkdir -p $STX_BUILD_HOME/mirrors/starlingx || exit 1
fi fi
# --nuke: just delete the cluster and exit
if [[ $DELETE_MINIKUBE -eq 1 ]] ; then
if minikube_exists ; then
notice "Deleting minikube cluster \`$MINIKUBENAME'"
$MINIKUBE delete -p "$MINIKUBENAME" || exit 1
fi
exit 0
fi
# Stop minikube if necessary if [ "$STX_PLATFORM" = "minikube" ]; then
WANT_START_MINIKUBE=0 # MINIKUBE
if [[ $RESTART_MINIKUBE -eq 1 ]] ; then # --nuke: just delete the cluster and exit
if minikube_started ; then if [[ $DELETE_ENV -eq 1 ]] ; then
notice "Stopping minikube cluster \`$MINIKUBENAME'" if minikube_exists ; then
$MINIKUBE stop -p $MINIKUBENAME notice "Deleting minikube cluster \`$MINIKUBENAME'"
if minikube_started ; then $MINIKUBE delete -p "$MINIKUBENAME" || exit 1
echo >&2 "minikube container $MINIKUBENAME exist!"
echo >&2 "And the command 'minikube -p $MINIKUBENAME stop' failed. The reason may be"
echo >&2 "the current MINIKUBE_HOME/HOME is not the same as the $MINIKUBENAME"
echo >&2 "Please change the MINIKUBE_HOME/HOME directory to the previous value"
echo >&2 "then re-execute this script"
exit 1
fi fi
exit 0
fi
# Stop minikube if necessary
WANT_START_MINIKUBE=0
if [[ $RESTART_MINIKUBE -eq 1 ]] ; then
if minikube_started ; then
notice "Stopping minikube cluster \`$MINIKUBENAME'"
$MINIKUBE stop -p $MINIKUBENAME
if minikube_started ; then
echo >&2 "minikube container $MINIKUBENAME exist!"
echo >&2 "And the command 'minikube -p $MINIKUBENAME stop' failed. The reason may be"
echo >&2 "the current MINIKUBE_HOME/HOME is not the same as the $MINIKUBENAME"
echo >&2 "Please change the MINIKUBE_HOME/HOME directory to the previous value"
echo >&2 "then re-execute this script"
exit 1
fi
fi
WANT_START_MINIKUBE=1
elif ! minikube_started ; then
WANT_START_MINIKUBE=1
fi
# Start minikube
if [[ $WANT_START_MINIKUBE -eq 1 ]] ; then
notice "Starting minikube cluster \`$MINIKUBENAME'"
$MINIKUBE start --driver=docker -p $MINIKUBENAME \
--cpus=$STX_BUILD_CPUS \
--memory=$MINIKUBEMEMORY \
--mount=true \
--mount-string="$STX_BUILD_HOME:/workspace" \
|| exit 1
fi
# Record the project environment variables
echo "The last minikube cluster startup date: `date`" > minikube_history.log
echo "MINIKUBE_HOME: $MINIKUBE_HOME" >> minikube_history.log
echo "MINIKUBENAME: $MINIKUBENAME" >> minikube_history.log
echo "STX_BUILD_HOME: $STX_BUILD_HOME" >> minikube_history.log
# Import minikube's docker environment
eval $(minikube -p $MINIKUBENAME docker-env)
elif [ "$STX_PLATFORM" = "kubernetes" ]; then
if [[ $DELETE_ENV -eq 1 ]] ; then
notice "--nuke not supported for Kubernetes platform"
fi fi
WANT_START_MINIKUBE=1
elif ! minikube_started ; then
WANT_START_MINIKUBE=1
fi fi
# Start minikube
if [[ $WANT_START_MINIKUBE -eq 1 ]] ; then
notice "Starting minikube cluster \`$MINIKUBENAME'"
$MINIKUBE start --driver=docker -p $MINIKUBENAME \
--cpus=$MINIKUBECPUS \
--memory=$MINIKUBEMEMORY \
--mount=true \
--mount-string="$STX_BUILD_HOME:/workspace" \
|| exit 1
fi
# Record the project environment variables
echo "The last minikube cluster startup date: `date`" > minikube_history.log
echo "MINIKUBE_HOME: $MINIKUBE_HOME" >> minikube_history.log
echo "MINIKUBENAME: $MINIKUBENAME" >> minikube_history.log
echo "STX_BUILD_HOME: $STX_BUILD_HOME" >> minikube_history.log
# Import minikube's docker environment
eval $(minikube -p $MINIKUBENAME docker-env)
# Build container images # Build container images
if [[ $BUILD_DOCKER -eq 1 ]] ; then if [[ $BUILD_DOCKER -eq 1 ]] ; then
notice "Building docker images" notice "Building docker images"
@ -240,3 +259,4 @@ notice "Restarting pods"
stx control stop || exit 1 stx control stop || exit 1
stx control start || exit 1 stx control start || exit 1
notice "Run 'stx control status' to check the pod startup status"

View File

@ -1,98 +0,0 @@
# Copyright (c) 2021 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from stx import utils # pylint: disable=E0611
import subprocess
import sys
logger = logging.getLogger('STX-Command')
utils.set_logger(logger)
def check_prjdir_env():
prjdir_value = os.getenv('PRJDIR', '')
if not prjdir_value:
logger.warning('Please source the file stx-init-env to export the \
PRJDIR variable.')
logger.warning('If the minikube had already started, please source \
the file import-stx instead.')
sys.exit(0)
def get_pods_info():
'''Get all pods information of the stx building tools.'''
cmd = 'minikube -p $MINIKUBENAME kubectl -- get pods '
logger.info('stx-tools pods list:')
subprocess.check_call(cmd, shell=True)
def get_deployment_info():
'''Get all deployment information of the stx building tools.'''
cmd = 'minikube -p $MINIKUBENAME kubectl -- get deployment'
logger.info('stx-tools deployments list:')
subprocess.check_call(cmd, shell=True)
def get_helm_info():
'''Get the helm list information of the stx building tools.'''
cmd = 'helm ls'
logger.info('helm list:\n')
subprocess.check_call(cmd, shell=True)
def get_pod_name(dockername):
'''get the detailed pod name from the four pods.'''
cmd = 'minikube -p $MINIKUBENAME kubectl -- get pods | grep Running| \
grep stx-' + dockername + ' | awk \'{print $1}\' '
output = subprocess.check_output(cmd, shell=True)
podname = str(output.decode('utf8').strip())
return podname
def helm_release_exists(projectname):
'''Check if the helm release exists'''
cmd = 'helm ls | grep ' + projectname
ret = subprocess.getoutput(cmd)
if ret:
return True
else:
return False
def generatePrefixCommand(podname, command, enableuser):
'''Generate the command executed in the host'''
prefix_exec_cmd = 'minikube -p $MINIKUBENAME kubectl -- exec -ti '
builder_exec_cmd = prefix_exec_cmd + podname
prefix_bash_cmd = ' -- bash -l -c '
prefix_bash_with_user_cmd = ' -- bash -l -c \'sudo -u ${MYUNAME} bash \
--rcfile /home/$MYUNAME/userenv -i -c '
builder_exec_bash_cmd = builder_exec_cmd + prefix_bash_cmd
builder_exec_bash_with_user_cmd = builder_exec_cmd + \
prefix_bash_with_user_cmd
if enableuser:
cmd = builder_exec_bash_with_user_cmd + command
else:
cmd = builder_exec_bash_cmd + command
return cmd

115
stx/lib/stx/config.py Normal file
View File

@ -0,0 +1,115 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
from stx import stx_configparser
from stx import utils
logger = logging.getLogger('STX-Config')
utils.set_logger(logger)
def require_env(var):
value = os.getenv(var)
if value is None:
logger.error(
f'{var} not found in the environment')
logger.error(
'Please source the file \'import-stx\' to define the ' +
f'{var} variable and execute \'stx-init-env\' to start builder pods')
raise LookupError(f'{var} not found in the environment!')
return value
class Config:
"""Configuration interface.
This class provides a read-only interface to project
configuration.
Usage
=====
::
from stx import config
# load once
config = Config().load()
# use this instance throughout the app
value = config.get ('section', 'key')
# returns "minikube -p $PROFILE kubectl -n $NAMESPACE --"
# or similar
# kubectl_command = config.kubectl()
"""
def __init__(self):
"""Construct an empty instance; must call "load" explicitly before using"""
self.prjdir = require_env('PRJDIR')
self.config_filename = os.path.join(self.prjdir, 'stx.conf')
self.use_minikube = os.getenv('STX_PLATFORM', 'minikube') == 'minikube'
if self.use_minikube:
self.minikube_profile = require_env('MINIKUBENAME')
else:
self.k8s_namespace = os.getenv('STX_K8S_NAMESPACE')
self.build_home = require_env('STX_BUILD_HOME')
self.docker_tag = require_env('DOCKER_TAG_LOCAL')
self.kubectl_cmd = None
self.helm_cmd = None
def load(self):
"""Load stx.conf"""
self.data = stx_configparser.STXConfigParser(self.config_filename)
self._init_kubectl_cmd()
return self
def get(self, section, key):
"""Get a config value"""
assert self.data
return self.data.getConfig(section, key)
def impl(self):
"""Internal object that stores configuration"""
return self.data
def prjdir(self):
"""Path of starlingx/tools checkout"""
return self.prjdir
def kubectl(self):
"""Returns the command for invoking kubect"""
assert self.data
return self.kubectl_cmd
def helm(self):
"""Returns the command for invoking helm"""
assert self.data
return self.helm_cmd
def _init_kubectl_cmd(self):
# helm
self.helm_cmd = 'helm'
# kubectl
if self.use_minikube:
self.kubectl_cmd = f'minikube -p {self.minikube_profile} kubectl --'
else:
self.kubectl_cmd = 'kubectl'
# Kubernetes namespace
if self.k8s_namespace:
self.kubectl_cmd += f' -n {self.k8s_namespace}'
self.helm_cmd += f' -n {self.k8s_namespace}'

91
stx/lib/stx/k8s.py Normal file
View File

@ -0,0 +1,91 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from stx import utils # pylint: disable=E0611
import subprocess
logger = logging.getLogger('STX-k8s')
utils.set_logger(logger)
class KubeHelper:
"""Common k8s commands"""
"""Constructor:
:param config: an instance of stx.config.Config
"""
def __init__(self, config):
self.config = config
def get_pods_info(self):
'''Get all pods information of the stx building tools.'''
cmd = self.config.kubectl() + ' get pods '
logger.info('stx-tools pods list:')
subprocess.check_call(cmd, shell=True)
def get_deployment_info(self):
'''Get all deployment information of the stx building tools.'''
cmd = self.config.kubectl() + ' get deployment'
logger.info('stx-tools deployments list:')
subprocess.check_call(cmd, shell=True)
def get_helm_info(self):
'''Get the helm list information of the stx building tools.'''
cmd = self.config.helm() + ' ls'
logger.info('helm list:\n')
subprocess.check_call(cmd, shell=True)
def get_pod_name(self, dockername):
'''get the detailed pod name from the four pods.'''
cmd = self.config.kubectl() + ' get pods | grep Running | ' + \
'grep stx-' + dockername + ' | awk \'{print $1}\' '
output = subprocess.check_output(cmd, shell=True)
podname = str(output.decode('utf8').strip())
return podname
def helm_release_exists(self, projectname):
'''Check if the helm release exists'''
cmd = self.config.helm() + ' ls | grep ' + projectname
ret = subprocess.getoutput(cmd)
if ret:
return True
else:
return False
def generatePrefixCommand(self, podname, command, enableuser):
'''Generate the command executed in the host'''
prefix_exec_cmd = self.config.kubectl() + ' exec -ti '
builder_exec_cmd = prefix_exec_cmd + podname
prefix_bash_cmd = ' -- bash -l -c '
prefix_bash_with_user_cmd = ' -- bash -l -c \'sudo -u ${MYUNAME} bash \
--rcfile /home/$MYUNAME/userenv -i -c '
builder_exec_bash_cmd = builder_exec_cmd + prefix_bash_cmd
builder_exec_bash_with_user_cmd = builder_exec_cmd + \
prefix_bash_with_user_cmd
if enableuser:
cmd = builder_exec_bash_with_user_cmd + command
else:
cmd = builder_exec_bash_cmd + command
return cmd

View File

@ -18,7 +18,7 @@ import logging
import subprocess import subprocess
import sys import sys
from stx import command # pylint: disable=E0611 from stx.k8s import KubeHelper
from stx import utils # pylint: disable=E0611 from stx import utils # pylint: disable=E0611
STX_BUILD_TYPES = ['rt', 'std'] STX_BUILD_TYPES = ['rt', 'std']
@ -28,7 +28,9 @@ STX_LAYERS = ['distro', 'flock']
class HandleBuildTask: class HandleBuildTask:
'''Handle the task for the build sub-command''' '''Handle the task for the build sub-command'''
def __init__(self): def __init__(self, config):
self.config = config
self.k8s = KubeHelper(config)
self.logger = logging.getLogger('STX-Build') self.logger = logging.getLogger('STX-Build')
utils.set_logger(self.logger) utils.set_logger(self.logger)
@ -125,7 +127,7 @@ class HandleBuildTask:
self.logger.setLevel(args.loglevel) self.logger.setLevel(args.loglevel)
podname = command.get_pod_name('builder') podname = self.k8s.get_pod_name('builder')
if not podname: if not podname:
self.logger.error('The builder container does not exist, ' + self.logger.error('The builder container does not exist, ' +
'so please use the control module to start.') 'so please use the control module to start.')
@ -135,7 +137,7 @@ class HandleBuildTask:
bashcmd = "\'find /home/${MYUNAME}/prepare-build.done " bashcmd = "\'find /home/${MYUNAME}/prepare-build.done "
bashcmd += "&>/dev/null\'" bashcmd += "&>/dev/null\'"
cmd = command.generatePrefixCommand(podname, bashcmd, 0) cmd = self.k8s.generatePrefixCommand(podname, bashcmd, 0)
ret = subprocess.call(cmd, shell=True) ret = subprocess.call(cmd, shell=True)
if ret != 0: if ret != 0:
@ -148,7 +150,7 @@ class HandleBuildTask:
'***********************************') '***********************************')
sys.exit(1) sys.exit(1)
prefix_cmd = command.generatePrefixCommand(podname, '', 1) prefix_cmd = self.k8s.generatePrefixCommand(podname, '', 1)
if args.build_task == 'image': if args.build_task == 'image':
cmd = self.buildImageCMD(args, prefix_cmd) cmd = self.buildImageCMD(args, prefix_cmd)

View File

@ -22,7 +22,7 @@ from stx import helper # pylint: disable=E0611
from stx import utils # pylint: disable=E0611 from stx import utils # pylint: disable=E0611
import sys import sys
logger = logging.getLogger('STX-Config') logger = logging.getLogger('STX-Config-Parser')
utils.set_logger(logger) utils.set_logger(logger)
@ -128,8 +128,8 @@ class STXConfigParser:
class HandleConfigTask: class HandleConfigTask:
'''Handle the task for the config sub-command''' '''Handle the task for the config sub-command'''
def __init__(self): def __init__(self, config):
self.stxconfig = STXConfigParser() self.stxconfig = config.impl()
def handleShow(self): def handleShow(self):
self.stxconfig.showAll() self.stxconfig.showAll()

View File

@ -21,9 +21,8 @@ import subprocess
import sys import sys
import time import time
from stx import command # pylint: disable=E0611
from stx import helper # pylint: disable=E0611 from stx import helper # pylint: disable=E0611
from stx import stx_configparser # pylint: disable=E0611 from stx.k8s import KubeHelper
from stx import utils # pylint: disable=E0611 from stx import utils # pylint: disable=E0611
helmchartdir = 'stx/stx-build-tools-chart/stx-builder' helmchartdir = 'stx/stx-build-tools-chart/stx-builder'
@ -32,9 +31,10 @@ helmchartdir = 'stx/stx-build-tools-chart/stx-builder'
class HandleControlTask: class HandleControlTask:
'''Handle the task for the control sub-command''' '''Handle the task for the control sub-command'''
def __init__(self): def __init__(self, config):
self.stxconfig = stx_configparser.STXConfigParser() self.config = config
self.projectname = self.stxconfig.getConfig('project', 'name') self.k8s = KubeHelper(config)
self.projectname = self.config.get('project', 'name')
self.logger = logging.getLogger('STX-Control') self.logger = logging.getLogger('STX-Control')
utils.set_logger(self.logger) utils.set_logger(self.logger)
@ -46,9 +46,9 @@ class HandleControlTask:
remote_cmd = ' -- bash /etc/pulp/changepasswd' remote_cmd = ' -- bash /etc/pulp/changepasswd'
pulpname = ' stx-pulp' pulpname = ' stx-pulp'
while count: while count:
podname = command.get_pod_name(pulpname) podname = self.k8s.get_pod_name(pulpname)
if podname: if podname:
cmd = 'minikube -p $MINIKUBENAME kubectl -- exec -ti ' cmd = self.config.kubectl() + ' exec -ti '
cmd = cmd + podname + remote_cmd cmd = cmd + podname + remote_cmd
subprocess.call(cmd, shell=True) subprocess.call(cmd, shell=True)
count = 0 count = 0
@ -63,31 +63,30 @@ class HandleControlTask:
def finish_configure(self): def finish_configure(self):
'''Before starting, we need to finish the setup''' '''Before starting, we need to finish the setup'''
max_cpus = os.environ['MINIKUBECPUS'] max_cpus = os.environ['STX_BUILD_CPUS']
projectname = self.stxconfig.getConfig('project', 'name') projectname = self.config.get('project', 'name')
builder_uid = self.stxconfig.getConfig('builder', 'uid') builder_uid = self.config.get('builder', 'uid')
builder_myuname = self.stxconfig.getConfig('builder', 'myuname') builder_myuname = self.config.get('builder', 'myuname')
builder_release = self.stxconfig.getConfig('builder', 'release') builder_release = self.config.get('builder', 'release')
builder_dist = self.stxconfig.getConfig('builder', 'dist') builder_dist = self.config.get('builder', 'dist')
builder_stx_dist = self.stxconfig.getConfig('builder', 'stx_dist') builder_stx_dist = self.config.get('builder', 'stx_dist')
builder_debfullname = self.stxconfig.getConfig('builder', builder_debfullname = self.config.get('builder', 'debfullname')
'debfullname') builder_debemail = self.config.get('builder', 'debemail')
builder_debemail = self.stxconfig.getConfig('builder', 'debemail') repomgr_type = self.config.get('repomgr', 'type')
repomgr_type = self.stxconfig.getConfig('repomgr', 'type') gituser = self.config.get('project', 'gituser')
gituser = self.stxconfig.getConfig('project', 'gituser') gitemail = self.config.get('project', 'gitemail')
gitemail = self.stxconfig.getConfig('project', 'gitemail') proxy = self.config.get('project', 'proxy')
proxy = self.stxconfig.getConfig('project', 'proxy') proxyserver = self.config.get('project', 'proxyserver')
proxyserver = self.stxconfig.getConfig('project', 'proxyserver') proxyport = self.config.get('project', 'proxyport')
proxyport = self.stxconfig.getConfig('project', 'proxyport') buildbranch = self.config.get('project', 'buildbranch')
buildbranch = self.stxconfig.getConfig('project', 'buildbranch') manifest = self.config.get('project', 'manifest')
manifest = self.stxconfig.getConfig('project', 'manifest') cengnurl = self.config.get('repomgr', 'cengnurl')
cengnurl = self.stxconfig.getConfig('repomgr', 'cengnurl') cengnstrategy = self.config.get('repomgr', 'cengnstrategy')
cengnstrategy = self.stxconfig.getConfig('repomgr', 'cengnstrategy') sourceslist = self.config.get('repomgr', 'sourceslist')
sourceslist = self.stxconfig.getConfig('repomgr', 'sourceslist') deblist = self.config.get('repomgr', 'deblist')
deblist = self.stxconfig.getConfig('repomgr', 'deblist') dsclist = self.config.get('repomgr', 'dsclist')
dsclist = self.stxconfig.getConfig('repomgr', 'dsclist') ostree_osname = self.config.get('project', 'ostree_osname')
ostree_osname = self.stxconfig.getConfig('project', 'ostree_osname')
if sourceslist: if sourceslist:
if not (deblist or dsclist): if not (deblist or dsclist):
self.logger.warning('*************************************\ self.logger.warning('*************************************\
@ -98,7 +97,7 @@ when sourceslist is enabled!!!')
*********************************') *********************************')
sys.exit(1) sys.exit(1)
repomgr_type = self.stxconfig.getConfig('repomgr', 'type') repomgr_type = self.config.get('repomgr', 'type')
if repomgr_type not in ('aptly', 'pulp'): if repomgr_type not in ('aptly', 'pulp'):
self.logger.warning('Repomgr type only supports [aptly] or [pulp],\ self.logger.warning('Repomgr type only supports [aptly] or [pulp],\
please modify the value with config command!!!') please modify the value with config command!!!')
@ -183,9 +182,16 @@ stx-pkgbuilder/configmap/')
return repomgr_type return repomgr_type
def handleStartTask(self, projectname): def handleStartTask(self, projectname):
cmd = 'helm install ' + projectname + ' ' + helmchartdir cmd = self.config.helm() + ' install ' + projectname + ' ' + helmchartdir \
+ ' --set global.image.tag=' + self.config.docker_tag
if not self.config.use_minikube:
# Override hostDir for k8s local host mount
# need to review this to support multi node (PV/PVCs)
cmd += ' --set global.hostDir=' + self.config.build_home
self.logger.debug('Execute the helm start command: %s', cmd) self.logger.debug('Execute the helm start command: %s', cmd)
helm_status = command.helm_release_exists(self.projectname) helm_status = self.k8s.helm_release_exists(self.projectname)
if helm_status: if helm_status:
self.logger.warning('The helm release %s already exists - nothing to do', self.logger.warning('The helm release %s already exists - nothing to do',
projectname) projectname)
@ -196,9 +202,9 @@ stx-pkgbuilder/configmap/')
self.configurePulp() self.configurePulp()
def handleStopTask(self, projectname): def handleStopTask(self, projectname):
helm_status = command.helm_release_exists(self.projectname) helm_status = self.k8s.helm_release_exists(self.projectname)
if helm_status: if helm_status:
cmd = 'helm uninstall ' + projectname cmd = self.config.helm() + ' uninstall ' + projectname
self.logger.debug('Execute the helm stop command: %s', cmd) self.logger.debug('Execute the helm stop command: %s', cmd)
subprocess.check_call(cmd, shell=True) subprocess.check_call(cmd, shell=True)
else: else:
@ -206,11 +212,10 @@ stx-pkgbuilder/configmap/')
projectname) projectname)
def handleUpgradeTask(self, projectname): def handleUpgradeTask(self, projectname):
command.check_prjdir_env()
self.finish_configure() self.finish_configure()
helm_status = command.helm_release_exists(self.projectname) helm_status = self.k8s.helm_release_exists(self.projectname)
if helm_status: if helm_status:
cmd = 'helm upgrade ' + projectname + ' ' + helmchartdir cmd = self.config.helm() + ' upgrade ' + projectname + ' ' + helmchartdir
self.logger.debug('Execute the upgrade command: %s', cmd) self.logger.debug('Execute the upgrade command: %s', cmd)
subprocess.call(cmd, shell=True, cwd=os.environ['PRJDIR']) subprocess.call(cmd, shell=True, cwd=os.environ['PRJDIR'])
else: else:
@ -221,7 +226,7 @@ stx-pkgbuilder/configmap/')
def handleEnterTask(self, args): def handleEnterTask(self, args):
default_docker = 'builder' default_docker = 'builder'
container_list = ['builder', 'pkgbuilder', 'repomgr', 'lat'] container_list = ['builder', 'pkgbuilder', 'repomgr', 'lat']
prefix_exec_cmd = 'minikube -p $MINIKUBENAME kubectl -- exec -ti ' prefix_exec_cmd = self.config.kubectl() + ' exec -ti '
if args.dockername: if args.dockername:
if args.dockername not in container_list: if args.dockername not in container_list:
@ -230,7 +235,7 @@ argument. eg: %s \n', container_list)
sys.exit(1) sys.exit(1)
default_docker = args.dockername default_docker = args.dockername
podname = command.get_pod_name(default_docker) podname = self.k8s.get_pod_name(default_docker)
if podname: if podname:
if default_docker == 'builder': if default_docker == 'builder':
cmd = prefix_exec_cmd + podname cmd = prefix_exec_cmd + podname
@ -251,7 +256,7 @@ enter has been started!!!\n')
def handleControl(self, args): def handleControl(self, args):
self.logger.setLevel(args.loglevel) self.logger.setLevel(args.loglevel)
projectname = self.stxconfig.getConfig('project', 'name') projectname = self.config.get('project', 'name')
if not projectname: if not projectname:
projectname = 'stx' projectname = 'stx'
@ -268,9 +273,9 @@ enter has been started!!!\n')
self.handleEnterTask(args) self.handleEnterTask(args)
elif args.ctl_task == 'status': elif args.ctl_task == 'status':
command.get_helm_info() self.k8s.get_helm_info()
command.get_deployment_info() self.k8s.get_deployment_info()
command.get_pods_info() self.k8s.get_pods_info()
else: else:
self.logger.error('Control module doesn\'t support your \ self.logger.error('Control module doesn\'t support your \

View File

@ -15,7 +15,7 @@
import argparse import argparse
import logging import logging
from stx import command # pylint: disable=E0611 from stx import config
from stx import stx_build # pylint: disable=E0611 from stx import stx_build # pylint: disable=E0611
from stx import stx_configparser # pylint: disable=E0611 from stx import stx_configparser # pylint: disable=E0611
from stx import stx_control # pylint: disable=E0611 from stx import stx_control # pylint: disable=E0611
@ -34,10 +34,11 @@ class CommandLine:
'''Handles parsing the commandline parameters for stx tool''' '''Handles parsing the commandline parameters for stx tool'''
def __init__(self): def __init__(self):
command.check_prjdir_env() self.config = config.Config().load()
self.handleconfig = stx_configparser.HandleConfigTask() self.handleconfig = stx_configparser.HandleConfigTask(self.config)
self.handlecontrol = stx_control.HandleControlTask() self.handlecontrol = stx_control.HandleControlTask(self.config)
self.handlebuild = stx_build.HandleBuildTask() self.handlebuild = stx_build.HandleBuildTask(self.config)
self.handlerepomgr = stx_repomgr.HandleRepomgrTask(self.config)
self.parser = self.parseCommandLine() self.parser = self.parseCommandLine()
def parseCommandLine(self): def parseCommandLine(self):
@ -132,7 +133,7 @@ delete_pkg ]')
help='[ list|download|sync|mirror|clean|\ help='[ list|download|sync|mirror|clean|\
remove_repo|upload_pkg|delete_pkg ]: \ remove_repo|upload_pkg|delete_pkg ]: \
Execute the management task.\n\n') Execute the management task.\n\n')
repo_subparser.set_defaults(handle=stx_repomgr.handleRepomgr) repo_subparser.set_defaults(handle=self.handlerepomgr.handleCommand)
parser.add_argument('-d', '--debug', parser.add_argument('-d', '--debug',
help='Enable debug output\n\n', help='Enable debug output\n\n',

View File

@ -13,32 +13,37 @@
# limitations under the License. # limitations under the License.
import logging import logging
from stx import command # pylint: disable=E0611 from stx.k8s import KubeHelper
from stx import utils # pylint: disable=E0611 from stx import utils # pylint: disable=E0611
import subprocess import subprocess
logger = logging.getLogger('STX-Repomgr') logger = logging.getLogger('STX-Repomgr')
utils.set_logger(logger) utils.set_logger(logger)
def handleRepomgr(args): class HandleRepomgrTask:
'''Sync the repo '''
logger.setLevel(args.loglevel) def __init__(self, config):
logger.debug('Execute the repomgr command: [%s]', args.repomgr_task) self.config = config
self.k8s = KubeHelper(config)
podname = command.get_pod_name('builder') def handleCommand(self, args):
if not podname: '''Sync the repo '''
logger.error('The builder container does not exist, so please \
consider to use the control module')
prefix_cmd = command.generatePrefixCommand(podname, '', 1) logger.setLevel(args.loglevel)
cmd = prefix_cmd + '"repo_manage.py ' + args.repomgr_task + '"\'' logger.debug('Execute the repomgr command: [%s]', args.repomgr_task)
logger.debug('Manage the repo with the command [%s]', cmd)
try: podname = self.k8s.get_pod_name('builder')
subprocess.check_call(cmd, shell=True) if not podname:
except subprocess.CalledProcessError as exc: logger.error('The builder container does not exist, so please \
raise Exception('Failed to manage the repo with the command [%s].\n \ consider to use the control module')
Returncode: %s' % (cmd, exc.returncode))
prefix_cmd = self.k8s.generatePrefixCommand(podname, '', 1)
cmd = prefix_cmd + '"repo_manage.py ' + args.repomgr_task + '"\''
logger.debug('Manage the repo with the command [%s]', cmd)
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as exc:
raise Exception('Failed to manage the repo with the command [%s].\n \
Returncode: %s' % (cmd, exc.returncode))

View File

@ -31,7 +31,7 @@ spec:
- name: {{ .Chart.Name }} - name: {{ .Chart.Name }}
securityContext: securityContext:
{{- toYaml .Values.securityContext | nindent 12 }} {{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
tty: true tty: true
volumeMounts: volumeMounts:
@ -44,7 +44,7 @@ spec:
volumes: volumes:
- name: {{ .Values.volumes.name }} - name: {{ .Values.volumes.name }}
hostPath: hostPath:
path: {{ .Values.volumes.hostPath.path }} path: {{ .Values.global.hostDir }}{{ .Values.volumes.hostPath.path }}
{{- with .Values.nodeSelector }} {{- with .Values.nodeSelector }}
nodeSelector: nodeSelector:

View File

@ -22,7 +22,7 @@ volumeMounts:
volumes: volumes:
name: shared-workspace name: shared-workspace
hostPath: hostPath:
path: /workspace/aptly path: /aptly
podAnnotations: {} podAnnotations: {}

View File

@ -31,7 +31,7 @@ spec:
- name: {{ .Chart.Name }} - name: {{ .Chart.Name }}
securityContext: securityContext:
{{- toYaml .Values.securityContext | nindent 12 }} {{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
tty: true tty: true
volumeMounts: volumeMounts:
@ -46,11 +46,12 @@ spec:
volumes: volumes:
- name: {{ .Values.volumes.name }} - name: {{ .Values.volumes.name }}
hostPath: hostPath:
path: {{ .Values.volumes.hostPath.path }} path: {{ .Values.global.hostDir }}{{ .Values.volumes.hostPath.path }}
- name: {{ .Values.volumes.entropyname }} - name: {{ .Values.volumes.entropyname }}
hostPath: hostPath:
path: {{ .Values.volumes.entropyhostPath.path }} path: {{ .Values.volumes.entropyhostPath.path }}
{{- with .Values.nodeSelector }} {{- with .Values.nodeSelector }}
nodeSelector: nodeSelector:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}

View File

@ -20,7 +20,7 @@ volumeMounts:
volumes: volumes:
name: latd-shared-workspace name: latd-shared-workspace
hostPath: hostPath:
path: /workspace/localdisk path: /localdisk
entropyname: entropydevice entropyname: entropydevice
entropyhostPath: entropyhostPath:
path: /dev/urandom path: /dev/urandom

View File

@ -2,6 +2,6 @@
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: {{ .Values.volumes.configmapname }} name: {{ .Values.volumes.envsetup.configMapName }}
data: data:
{{ (.Files.Glob "configmap/stx*").AsConfig | indent 2 }} {{ (.Files.Glob "configmap/stx*").AsConfig | indent 2 }}

View File

@ -31,30 +31,30 @@ spec:
- name: {{ .Chart.Name }} - name: {{ .Chart.Name }}
securityContext: securityContext:
{{- toYaml .Values.securityContext | nindent 12 }} {{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
tty: true tty: true
volumeMounts: volumeMounts:
- name: {{ .Values.volumeMounts.name }} - name: {{ .Values.volumeMounts.workspace.name }}
mountPath: {{ .Values.volumeMounts.mountPath}} mountPath: {{ .Values.volumeMounts.workspace.mountPath}}
- name: {{ .Values.volumeMounts.envsetupname }} - name: {{ .Values.volumeMounts.envsetup.name }}
mountPath: {{ .Values.volumeMounts.envsetupmountPath}} mountPath: {{ .Values.volumeMounts.envsetup.mountPath }}
- name: {{ .Values.volumeMounts.mirrorName }} - name: {{ .Values.volumeMounts.mirror.name }}
mountPath: {{ .Values.volumeMounts.mirrorMountPath }} mountPath: {{ .Values.volumeMounts.mirror.mountPath }}
resources: resources:
{{- toYaml .Values.resources | nindent 12 }} {{- toYaml .Values.resources | nindent 12 }}
volumes: volumes:
- name: {{ .Values.volumes.name }} - name: {{ .Values.volumes.workspace.name }}
hostPath: hostPath:
path: {{ .Values.volumes.hostPath.path }} path: {{ .Values.global.hostDir }}{{ .Values.volumes.workspace.hostPath.path }}
- name: {{ .Values.volumes.envsetupname }} - name: {{ .Values.volumes.envsetup.name }}
configMap: configMap:
name: {{ .Values.volumes.configmapname }} name: {{ .Values.volumes.envsetup.configMapName }}
- name: {{ .Values.volumes.mirrorName }} - name: {{ .Values.volumes.mirror.name }}
hostPath: hostPath:
path: {{ .Values.volumes.mirrorHostPath.path }} path: {{ .Values.global.hostDir }}{{ .Values.volumes.mirror.hostPath.path }}
{{- with .Values.nodeSelector }} {{- with .Values.nodeSelector }}
nodeSelector: nodeSelector:

View File

@ -12,22 +12,28 @@ image:
tag: "v0.1.0" tag: "v0.1.0"
volumeMounts: volumeMounts:
name: shared-workspace workspace:
mountPath: /localdisk name: shared-workspace
envsetupname: env-setting mountPath: /localdisk
envsetupmountPath: /usr/local/bin/stx envsetup:
mirrorName: mirror name: env-setting
mirrorMountPath: /import/mirrors/starlingx mountPath: /usr/local/bin/stx
mirror:
name: mirror
mountPath: /import/mirrors/starlingx
volumes: volumes:
name: shared-workspace workspace:
hostPath: name: shared-workspace
path: /workspace/localdisk hostPath:
envsetupname: env-setting path: /localdisk
configmapname: pkgbuilder envsetup:
mirrorName: mirror name: env-setting
mirrorHostPath: configMapName: pkgbuilder
path: /workspace/mirrors/starlingx mirror:
name: mirror
hostPath:
path: /mirrors/starlingx
imagePullSecrets: [] imagePullSecrets: []
nameOverride: "" nameOverride: ""

View File

@ -31,7 +31,7 @@ spec:
- name: {{ .Chart.Name }} - name: {{ .Chart.Name }}
securityContext: securityContext:
{{- toYaml .Values.securityContext | nindent 12 }} {{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
tty: true tty: true
volumeMounts: volumeMounts:
@ -53,13 +53,13 @@ spec:
name: {{ .Values.volumes.configmapname }} name: {{ .Values.volumes.configmapname }}
- name: {{ .Values.volumes.storagename }} - name: {{ .Values.volumes.storagename }}
hostPath: hostPath:
path: {{ .Values.volumes.storagehostPath.path }} path: {{ .Values.global.hostDir }}{{ .Values.volumes.storagehostPath.path }}
- name: {{ .Values.volumes.pgsqlname }} - name: {{ .Values.volumes.pgsqlname }}
hostPath: hostPath:
path: {{ .Values.volumes.pgsqlhostPath.path }} path: {{ .Values.global.hostDir }}{{ .Values.volumes.pgsqlhostPath.path }}
- name: {{ .Values.volumes.containersname }} - name: {{ .Values.volumes.containersname }}
hostPath: hostPath:
path: {{ .Values.volumes.containershostPath.path }} path: {{ .Values.global.hostDir }}{{ .Values.volumes.containershostPath.path }}
{{- with .Values.nodeSelector }} {{- with .Values.nodeSelector }}
nodeSelector: nodeSelector:

View File

@ -26,13 +26,13 @@ volumes:
configmapname: pulp configmapname: pulp
storagename: pulp-storage storagename: pulp-storage
storagehostPath: storagehostPath:
path: /workspace/pulp/pulp-storage path: /pulp/pulp-storage
pgsqlname: pulp-pgsql pgsqlname: pulp-pgsql
pgsqlhostPath: pgsqlhostPath:
path: /workspace/pulp/pulp-pgsql path: /pulp/pulp-pgsql
containersname: pulp-containers containersname: pulp-containers
containershostPath: containershostPath:
path: /workspace/pulp/pulp-containers path: /pulp/pulp-containers
imagePullSecrets: [] imagePullSecrets: []
nameOverride: "" nameOverride: ""

View File

@ -2,6 +2,6 @@
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: {{ .Values.volumes.configmapname }} name: {{ .Values.volumes.userSetting.configMapName }}
data: data:
{{ (.Files.Glob "configmap/stx*").AsConfig | indent 2 }} {{ (.Files.Glob "configmap/stx*").AsConfig | indent 2 }}

View File

@ -31,30 +31,30 @@ spec:
- name: {{ .Chart.Name }} - name: {{ .Chart.Name }}
securityContext: securityContext:
{{- toYaml .Values.securityContext | nindent 12 }} {{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
tty: true tty: true
volumeMounts: volumeMounts:
- name: {{ .Values.volumeMounts.name }} - name: {{ .Values.volumeMounts.workspace.name }}
mountPath: {{ .Values.volumeMounts.mountPath}} mountPath: {{ .Values.volumeMounts.workspace.mountPath}}
- name: {{ .Values.volumeMounts.usersetupname }} - name: {{ .Values.volumeMounts.userSetting.name }}
mountPath: {{ .Values.volumeMounts.usersetupmountPath}} mountPath: {{ .Values.volumeMounts.userSetting.mountPath}}
- name: {{ .Values.volumeMounts.mirrorName }} - name: {{ .Values.volumeMounts.mirror.name }}
mountPath: {{ .Values.volumeMounts.mirrorMountPath }} mountPath: {{ .Values.volumeMounts.mirror.mountPath }}
resources: resources:
{{- toYaml .Values.resources | nindent 12 }} {{- toYaml .Values.resources | nindent 12 }}
volumes: volumes:
- name: {{ .Values.volumes.name }} - name: {{ .Values.volumes.workspace.name }}
hostPath: hostPath:
path: {{ .Values.volumes.hostPath.path }} path: {{ .Values.global.hostDir }}{{ .Values.volumes.workspace.hostPath.path }}
- name: {{ .Values.volumes.usersetupname }} - name: {{ .Values.volumes.userSetting.name }}
configMap: configMap:
name: {{ .Values.volumes.configmapname }} name: {{ .Values.volumes.userSetting.configMapName }}
- name: {{ .Values.volumes.mirrorName }} - name: {{ .Values.volumes.mirror.name }}
hostPath: hostPath:
path: {{ .Values.volumes.mirrorHostPath.path }} path: {{ .Values.global.hostDir }}{{ .Values.volumes.mirror.hostPath.path }}
{{- with .Values.nodeSelector }} {{- with .Values.nodeSelector }}
nodeSelector: nodeSelector:

View File

@ -8,26 +8,41 @@ replicaCount: 1
image: image:
repository: stx-builder repository: stx-builder
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "v0.1.0" global:
# This is the host source directory, all the other directories are under it
# On minikube this is the /workspace, on K8s we can customize to any host directory
# In the future we should move to PV/PVCs
hostDir: /workspace
image:
# Overrides the image tag whose default is the chart appVersion.
# Setting as global so all dependencies can use the same TAG
# Not sure if this is the best way to handle this
tag: "v.0.1.0"
volumeMounts: volumeMounts:
name: shared-workspace workspace:
mountPath: /localdisk name: shared-workspace
usersetupname: user-setting mountPath: /localdisk
usersetupmountPath: /usr/local/bin/stx userSetting:
mirrorName: mirror name: user-setting
mirrorMountPath: /import/mirrors/starlingx mountPath: /usr/local/bin/stx
mirror:
name: mirror
mountPath: /import/mirrors/starlingx
volumes: volumes:
name: shared-workspace workspace:
hostPath: name: shared-workspace
path: /workspace/localdisk hostPath:
usersetupname: user-setting path: /localdisk
configmapname: builder userSetting:
mirrorName: mirror name: user-setting
mirrorHostPath: configMapName: builder
path: /workspace/mirrors/starlingx mirror:
name: mirror
hostPath:
path: /mirrors/starlingx
imagePullSecrets: [] imagePullSecrets: []
nameOverride: "" nameOverride: ""

View File

@ -37,12 +37,12 @@ To start a fresh source tree:
repo init -u https://opendev.org/starlingx/manifest.git -m default.xml repo init -u https://opendev.org/starlingx/manifest.git -m default.xml
repo sync repo sync
To download the sources & 3rd-party to local mirror:
downloader -b -s
To build all packages: To build all packages:
build-pkgs -a | build-pkgs -p <packageA,packageB...> build-pkgs -a | build-pkgs -p <packageA,packageB...>
To fill local binary repo:
debdownloader <path binary package list>
To make image: To make image:
build-image [ -t std|rt ] build-image [ -t std|rt ]