3f4058b566
Builds generated by Jenkins are identified by a timestamp, set in the environment, such as TIMESTAMP=2024-06-12_19-00-08. Yet the ISO file created by build-image is named after the clock value at the point of creation, ie a different time stamp. This patch renames the ISO to match the overall build timestamp, if TIMESTAMP is set in the environment. EXAMPLE ========================== * Original file created by LAT SDK is similar to: starlingx-intel-x86-64-20240613180213-cd.iso * With TIMESTAMP=2024-06-12_19-00-08 it's renamed to: starlingx-intel-x86-64-2024-06-12_19-00-08-cd.iso along with the signatures and symlinks TESTS ========================== * Call build-image with TIMESTAMP set and make sure the ISO file, the signature and the symlinks are generated correctly * Call build-image w/o the TIMESTAMP and check that the files are not renamed Story: 2010055 Task: 50358 Signed-off-by: Davlet Panech <davlet.panech@windriver.com> Change-Id: I184f6a4f79688e0c8a3029d2aafa22c383b5a524
855 lines
32 KiB
Python
Executable File
855 lines
32 KiB
Python
Executable File
#!/usr/bin/python3
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
# Copyright (C) 2021-2022 Wind River Systems,Inc
|
|
|
|
import argparse
|
|
import discovery
|
|
import getopt
|
|
import logging
|
|
import os
|
|
from pathlib import Path
|
|
import re
|
|
import repo_manage
|
|
import shutil
|
|
import signal
|
|
import subprocess
|
|
import sys
|
|
import time
|
|
import utils
|
|
import yaml
|
|
|
|
STX_DEFAULT_DISTRO = discovery.STX_DEFAULT_DISTRO
|
|
ALL_LAYERS = discovery.get_all_layers(distro=STX_DEFAULT_DISTRO)
|
|
ALL_BUILD_TYPES = discovery.get_all_build_types(distro=STX_DEFAULT_DISTRO)
|
|
|
|
LAT_ROOT = '/localdisk'
|
|
REPO_ALL = 'deb-merge-all'
|
|
REPO_BINARY = 'deb-local-binary'
|
|
REPO_BUILD = 'deb-local-build'
|
|
DEB_CONFIG_DIR = 'stx-tools/debian-mirror-tools/config/'
|
|
PKG_LIST_DIR = os.path.join(os.environ.get('MY_REPO_ROOT_DIR'), DEB_CONFIG_DIR)
|
|
CERT_FILE = 'cgcs-root/public-keys/TiBoot.crt'
|
|
CERT_PATH = os.path.join(os.environ.get('MY_REPO_ROOT_DIR'), CERT_FILE)
|
|
IMAGE_LAYERS_FILE = 'cgcs-root/build-tools/stx/image-layers.conf'
|
|
IMAGE_LAYERS_PATH = os.path.join(
|
|
os.environ.get('MY_REPO_ROOT_DIR'),
|
|
IMAGE_LAYERS_FILE
|
|
)
|
|
img_pkgs = []
|
|
kernel_type = 'std'
|
|
stx_std_kernel = 'linux-image-5.10.0-6-amd64-unsigned'
|
|
stx_rt_kernel = 'linux-rt-image-5.10.0-6-rt-amd64-unsigned'
|
|
WAIT_TIME_BEFORE_CHECKING_LOG = 2
|
|
# The max timeout value to wait LAT to output the build log
|
|
MAX_WAIT_LAT_TIME = 300
|
|
|
|
pkg_version_mapping = {}
|
|
binary_repositories = []
|
|
|
|
logger = logging.getLogger('build-image')
|
|
utils.set_logger(logger)
|
|
|
|
|
|
def merge_local_repos(repomgr):
|
|
logger.debug('Calls repo manager to create/udpate the snapshot %s which is merged from local repositories', REPO_ALL)
|
|
# The build repository (deb-local-build) has a higher priority than
|
|
# the binary repositories (deb-local-binary-*) for `repomgr` to
|
|
# select packages.
|
|
try:
|
|
pubname = repomgr.merge(REPO_ALL, ','.join([REPO_BUILD, *binary_repositories]))
|
|
except Exception as e:
|
|
logger.error(str(e))
|
|
logger.error('Exception when repo_manager creates/updates snapshot %s', REPO_ALL)
|
|
return False
|
|
if pubname:
|
|
logger.debug('repo manager successfully created/updated snapshot %s', REPO_ALL)
|
|
else:
|
|
logger.debug('repo manager failed to create/update snapshot %s', REPO_ALL)
|
|
return False
|
|
return True
|
|
|
|
|
|
def update_debootstrap_mirror(img_yaml):
|
|
repomgr_url = os.environ.get('REPOMGR_DEPLOY_URL')
|
|
if not repomgr_url:
|
|
logger.error('REPOMGR_URL is not in current sys ENV')
|
|
return False
|
|
|
|
try:
|
|
with open(img_yaml) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
if not yaml_doc['debootstrap-mirror']:
|
|
logger.warning("There is not debootstrap-mirror in %s", img_yaml)
|
|
else:
|
|
mirror = yaml_doc['debootstrap-mirror']
|
|
if mirror == REPO_ALL:
|
|
yaml_doc['debootstrap-mirror'] = os.path.join(repomgr_url, REPO_ALL)
|
|
else:
|
|
yaml_doc['debootstrap-mirror'] = os.environ.get('DEBIAN_SNAPSHOT')
|
|
with open(img_yaml, 'w') as f:
|
|
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
|
|
logger.debug('Updating %s, setting debootstrap_mirror to %s', img_yaml, yaml_doc['debootstrap-mirror'])
|
|
return True
|
|
except IOError as e:
|
|
logger.error(str(e))
|
|
logger.debug('Failed to update %s, could not set debootstrap_mirror to %s', img_yaml, yaml_doc['debootstrap-mirror'])
|
|
return False
|
|
|
|
|
|
def update_ostree_osname(img_yaml):
|
|
|
|
ostree_osname = os.environ.get('OSTREE_OSNAME')
|
|
if ostree_osname is None:
|
|
return False
|
|
|
|
try:
|
|
with open(img_yaml) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
yaml_doc['ostree']['ostree_osname'] = ostree_osname
|
|
with open(img_yaml, 'w') as f:
|
|
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
|
|
except IOError as e:
|
|
logger.error(str(e))
|
|
return False
|
|
|
|
logger.debug(' '.join(['Update', img_yaml, 'to update the ostree_osname']))
|
|
return True
|
|
|
|
|
|
def change_default_kernel(img_yaml, ktype):
|
|
|
|
rt_kernel = std_kernel = None
|
|
try:
|
|
with open(img_yaml) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
multi_kernels = yaml_doc["multiple-kernels"].split(" ")
|
|
default_kernel = yaml_doc["default-kernel"]
|
|
if len(multi_kernels) == 1:
|
|
return False
|
|
for kernel in multi_kernels:
|
|
if re.search("-rt-", kernel):
|
|
rt_kernel = kernel
|
|
else:
|
|
std_kernel = kernel
|
|
if ktype == "rt":
|
|
if re.search("-rt-", default_kernel):
|
|
return True
|
|
elif rt_kernel != None:
|
|
yaml_doc["default-kernel"] = rt_kernel
|
|
else:
|
|
logger.error(f"No rt kernel is found in {multiple-kernels}")
|
|
return False
|
|
elif ktype == "std":
|
|
if not re.search("-rt-", default_kernel):
|
|
return True
|
|
elif std_kernel != None:
|
|
yaml_doc["default-kernel"] = std_kernel
|
|
else:
|
|
logger.error(f"No std kernel is found in {multiple-kernels}")
|
|
return False
|
|
|
|
logger.debug(f'Set default kernel as {yaml_doc["default-kernel"]}')
|
|
try:
|
|
with open(img_yaml, 'w') as f:
|
|
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
|
|
except IOError as e:
|
|
logger.error(str(e))
|
|
return False
|
|
|
|
except IOError as e:
|
|
logger.error(str(e))
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
def replace_in_yaml(dst_yaml, field, field_type, src_str, dst_str):
|
|
logger.debug("Start to replace %s in field %s of yaml %s", src_str, field, dst_yaml)
|
|
|
|
try:
|
|
with open(dst_yaml) as f:
|
|
main_doc = yaml.safe_load(f)
|
|
except Exception as e:
|
|
logger.error(str(e))
|
|
logger.error("Failed to open the yaml file %s", dst_yaml)
|
|
return False
|
|
else:
|
|
if field_type == 'yaml_string':
|
|
string_orig = main_doc[field]
|
|
if not string_orig:
|
|
logger.error("Failed to find the field %s", field)
|
|
return False
|
|
if not string_orig == src_str:
|
|
logger.error("Found field %s, but the value %s does not match target %s", field, string_orig, src_str)
|
|
return False
|
|
main_doc[field] = dst_str
|
|
logger.debug("Successfully updated the field %s with %s", field, dst_str)
|
|
elif field_type == 'yaml_list':
|
|
list_new = []
|
|
list_orig = main_doc[field]
|
|
if not list_orig:
|
|
logger.error("Failed to find the field %s", field)
|
|
return False
|
|
for item in list_orig:
|
|
list_new.append(item.replace(src_str, dst_str))
|
|
main_doc[field] = list_new
|
|
logger.debug("Successfully updated the value %s of field %s with %s", src_str, field, dst_str)
|
|
elif field_type == 'yaml_list_suffix':
|
|
list_new = []
|
|
list_orig = main_doc[field]
|
|
if not list_orig:
|
|
logger.error("Failed to find the field %s", field)
|
|
return False
|
|
for item in list_orig:
|
|
if src_str in item:
|
|
if '=' in item:
|
|
logger.error("Package version is defined, can't be appened with suffix %s", dst_str)
|
|
return False
|
|
list_new.append(item.strip() + dst_str)
|
|
else:
|
|
list_new.append(item)
|
|
main_doc[field] = list_new
|
|
logger.debug("Successfully updated %s in field %s with %s suffix", src_str, field, dst_str)
|
|
|
|
try:
|
|
with open(dst_yaml, 'w') as f:
|
|
yaml.safe_dump(main_doc, f, default_flow_style=False, sort_keys=False)
|
|
except Exception as e:
|
|
logger.error(str(e))
|
|
logger.error("Failed to write to %s", dst_yaml)
|
|
return False
|
|
logger.info("Successfully updated %s", dst_yaml)
|
|
return True
|
|
|
|
|
|
def update_rt_kernel_in_main_yaml(main_yaml):
|
|
return replace_in_yaml(main_yaml, 'rootfs-pre-scripts', 'yaml_list', stx_std_kernel, stx_rt_kernel)
|
|
|
|
|
|
def update_rt_kernel_in_initramfs_yaml(initramfs_yaml):
|
|
ret = False
|
|
|
|
# Updated the name of kernel module
|
|
for layer in ALL_LAYERS:
|
|
pkg_dirs = discovery.package_dir_list(distro=STX_DEFAULT_DISTRO, layer=layer, build_type='rt')
|
|
if not pkg_dirs:
|
|
continue
|
|
for pkg_dir in pkg_dirs:
|
|
pkg_name = discovery.package_dir_to_package_name(pkg_dir, STX_DEFAULT_DISTRO)
|
|
if pkg_name and pkg_name != 'linux-rt':
|
|
if replace_in_yaml(initramfs_yaml, 'packages', 'yaml_list_suffix', pkg_name, '-rt'):
|
|
logger.debug("RT Initramfs: Updated %s with rt suffix", pkg_name)
|
|
else:
|
|
logger.debug("RT Initramfs: Failed to update %s with rt suffix", pkg_name)
|
|
return ret
|
|
ret = replace_in_yaml(initramfs_yaml, 'packages', 'yaml_list', stx_std_kernel, stx_rt_kernel)
|
|
return ret
|
|
|
|
|
|
def include_initramfs(img_yaml, ramfs_yaml_path):
|
|
if not os.path.exists(img_yaml):
|
|
logger.error("LAT yaml file %s does not exist", img_yaml)
|
|
return False
|
|
try:
|
|
with open(img_yaml) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
yaml_doc['system'][0]['contains'][0] = ramfs_yaml_path
|
|
|
|
with open(img_yaml, 'w') as f:
|
|
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
|
|
except Exception as e:
|
|
logger.error(str(e))
|
|
logger.error("Failed to add %s to %s", ramfs_yaml_path, img_yaml)
|
|
return False
|
|
|
|
logger.debug("Successfully included %s in %s", ramfs_yaml_path, img_yaml)
|
|
return True
|
|
|
|
|
|
def feed_lat_src_repos(img_yaml, repo_url):
|
|
if not os.path.exists(img_yaml):
|
|
logger.error(' '.join(['LAT yaml file', img_yaml, 'does not exist']))
|
|
return False
|
|
|
|
with open(img_yaml) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
yaml_doc['package_feeds'].extend(repo_url)
|
|
yaml_doc['package_feeds'] = list(set(yaml_doc['package_feeds']))
|
|
yaml_doc['package_feeds'].sort()
|
|
|
|
with open(img_yaml, 'w') as f:
|
|
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
|
|
|
|
logger.debug(' '.join(['Update', img_yaml, 'to feed repos']))
|
|
return True
|
|
|
|
|
|
def add_lat_packages(img_yaml, packages):
|
|
if not os.path.exists(img_yaml):
|
|
logger.error(' '.join(['LAT yaml file', img_yaml, 'does not exist']))
|
|
return False
|
|
|
|
with open(img_yaml) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
yaml_doc['packages'].extend(packages)
|
|
|
|
for build_type in ALL_BUILD_TYPES:
|
|
pkgs = discovery.package_iso_list(distro=STX_DEFAULT_DISTRO, layer="all", build_type=build_type)
|
|
yaml_doc['packages'].extend(pkgs)
|
|
|
|
yaml_doc['packages'] = list(set(yaml_doc['packages']))
|
|
yaml_doc['packages'].sort()
|
|
|
|
with open(img_yaml, 'w') as f:
|
|
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
|
|
|
|
logger.debug(' '.join(['Update', img_yaml, 'to add packages']))
|
|
return True
|
|
|
|
|
|
def check_base_os_binaries(repomgr):
|
|
base_bins_list = os.path.join(os.environ.get('MY_REPO_ROOT_DIR'),
|
|
'cgcs-root/build-tools/stx/debian-image.inc')
|
|
if not os.path.exists(base_bins_list):
|
|
logger.error(' '.join(['Base OS packages list', base_bins_list,
|
|
'does not exist']))
|
|
return False
|
|
|
|
results = verify_pkgs_in_repo(repomgr, binary_repositories, base_bins_list)
|
|
if results:
|
|
logger.error("====OS binaries checking fail:")
|
|
for deb in results:
|
|
logger.error(deb)
|
|
logger.error("====OS binaries missing end====\n")
|
|
return False
|
|
logger.info("====All OS binary packages are ready ====\n")
|
|
return True
|
|
|
|
|
|
def check_stx_binaries(repomgr, btype='std'):
|
|
stx_bins_list = ''.join([PKG_LIST_DIR, '/debian/distro/os-', btype,
|
|
'.lst'])
|
|
if not os.path.exists(stx_bins_list):
|
|
logger.warning(' '.join(['STX binary packages list', stx_bins_list,
|
|
'does not exist']))
|
|
# Assume no such list here means ok
|
|
return True
|
|
|
|
results = verify_pkgs_in_repo(repomgr, binary_repositories, stx_bins_list)
|
|
if results:
|
|
logger.error("====STX binaries checking fail:")
|
|
for deb in results:
|
|
logger.error(deb)
|
|
logger.error("====STX binaries missing end====\n")
|
|
return False
|
|
logger.info("====All STX binary packages are ready ====\n")
|
|
return True
|
|
|
|
|
|
def check_stx_patched(repomgr, btype='std'):
|
|
stx_patched_list = ''.join([PKG_LIST_DIR, '/debian/distro/stx-', btype,
|
|
'.lst'])
|
|
if not os.path.exists(stx_patched_list):
|
|
logger.warning(''.join(['STX patched packages list', stx_patched_list,
|
|
'does not exist']))
|
|
return False
|
|
|
|
results = verify_pkgs_in_repo(repomgr, [REPO_BUILD], stx_patched_list)
|
|
if results:
|
|
logger.error("====STX patched packages checking fail:")
|
|
for deb in results:
|
|
logger.error(deb)
|
|
logger.error("====STX patched packages missing end====\n")
|
|
return False
|
|
logger.info("====All STX patched packages are ready ====\n")
|
|
return True
|
|
|
|
|
|
def verify_pkgs_in_repo(repomgr, repo_names, pkg_list_path):
|
|
"""Verify if packages exist in one (or more) repositories.
|
|
|
|
:param repomgr: A RepoMgr instance.
|
|
:param repo_names: The list of repositories to query.
|
|
:param pkg_list_path: The path to the file listing the packages to be
|
|
checked.
|
|
:returns: list -- The list of packages that could not be found.
|
|
"""
|
|
|
|
failed_pkgs = []
|
|
with open(pkg_list_path, 'r') as flist:
|
|
lines = list(line for line in (lpkg.strip() for lpkg in flist) if line)
|
|
for pkg in lines:
|
|
pkg = pkg.strip()
|
|
if pkg.startswith('#'):
|
|
continue
|
|
pname_parts = pkg.split()
|
|
name = pname_parts[0]
|
|
|
|
found = False
|
|
for i, repo_name in enumerate(repo_names):
|
|
if len(pname_parts) > 1:
|
|
version = pname_parts[1]
|
|
pkg_name = ''.join([name, '_', version])
|
|
if repomgr.search_pkg(repo_name, name, version):
|
|
found = True
|
|
if repo_name != REPO_BUILD:
|
|
if name not in pkg_version_mapping:
|
|
pkg_version_mapping[name] = [version]
|
|
else:
|
|
if version not in pkg_version_mapping[name]:
|
|
failed_pkgs.append(pkg_name)
|
|
logger.error(
|
|
f"Multiple versions found for `{name}`: "
|
|
f"{pkg_version_mapping[name]}"
|
|
)
|
|
|
|
img_pkgs.append(''.join([name, '=', version]))
|
|
logger.debug(''.join(['Found package:name=', name,
|
|
' version=', version]))
|
|
|
|
# If after processing the last repository the package was
|
|
# still not found, mark it as missing.
|
|
if not found and i == len(repo_names) - 1:
|
|
logger.debug(' '.join([pkg_name,
|
|
'is missing in local binary repo']))
|
|
failed_pkgs.append(pkg_name)
|
|
else:
|
|
if repomgr.search_pkg(repo_name, name, None, True):
|
|
found = True
|
|
img_pkgs.append(name)
|
|
logger.debug(''.join(['Found package with name:', name]))
|
|
|
|
# If after processing the last repository the package was
|
|
# still not found, mark it as missing.
|
|
if not found and i == len(repo_names) - 1:
|
|
failed_pkgs.append(name)
|
|
|
|
return failed_pkgs
|
|
|
|
|
|
def stop_latd():
|
|
os.system("latc stop")
|
|
time.sleep(2)
|
|
|
|
cmd = 'latc status'
|
|
try:
|
|
status = subprocess.check_output(cmd, shell=True).decode()
|
|
except Exception as e:
|
|
logger.error(str(e))
|
|
else:
|
|
if status:
|
|
if 'idle' in status:
|
|
logger.info("Successfully stopped latd")
|
|
return
|
|
logger.info("Failed to stop latd, you may have to login pkgbuilder to kill")
|
|
|
|
|
|
def is_latd_running():
|
|
running = False
|
|
cmd = 'latc status'
|
|
try:
|
|
status = subprocess.check_output(cmd, shell=True).decode()
|
|
except Exception as e:
|
|
logger.error(str(e))
|
|
else:
|
|
if status:
|
|
if 'lat_status: idle' in status:
|
|
logger.debug("latd is idle")
|
|
else:
|
|
if 'lat_status: busy' in status:
|
|
logger.debug("latd is running")
|
|
running = True
|
|
return running
|
|
|
|
|
|
def user_signal_handler(signum, frame):
|
|
stop_latd()
|
|
sys.exit(1)
|
|
|
|
|
|
def user_register_signals():
|
|
signal.signal(signal.SIGINT, user_signal_handler)
|
|
signal.signal(signal.SIGHUP, user_signal_handler)
|
|
signal.signal(signal.SIGTERM, user_signal_handler)
|
|
signal.signal(signal.SIGPIPE, user_signal_handler)
|
|
|
|
|
|
def get_iso_name(iso_name_prefix: str, timestamp: str = None) -> str:
|
|
if timestamp is not None:
|
|
return '%s-%s-cd' % (iso_name_prefix, timestamp)
|
|
return '%s-cd' % iso_name_prefix
|
|
|
|
|
|
def rename_iso(deploy_dir: str, iso_name_prefix: str, timestamp: str) -> None:
|
|
iso_name = get_iso_name(iso_name_prefix)
|
|
iso_basename = '%s.iso' % iso_name
|
|
iso_file = '%s/%s' % (deploy_dir, iso_basename)
|
|
iso_name_regex = re.compile('^%s-[0-9]{4,}.*[.]iso$' % re.escape(iso_name_prefix))
|
|
|
|
if not os.path.islink(iso_file):
|
|
if not os.path.exists(iso_file):
|
|
logger.warning('%s: file not found', iso_file)
|
|
else:
|
|
logger.warning('%s: expecting a symlink', iso_file)
|
|
return
|
|
|
|
def check_iso_file_name(file_name: str) -> bool:
|
|
if not iso_name_regex.match(file_name):
|
|
logger.warning('failed to rename %s: unexpected file name pattern', file_name)
|
|
return False
|
|
return True
|
|
|
|
def create_renamed_info_file(iso_dir, iso_basename, new_iso_basename) -> None:
|
|
info_file = '%s/%s.RENAMED.txt' % (iso_dir, iso_basename)
|
|
with open(info_file, 'w') as f:
|
|
print('File %s renamed to %s by build-image script' % (iso_basename, new_iso_basename), file=f)
|
|
|
|
iso_target = os.readlink(iso_file)
|
|
if os.path.isabs(iso_target):
|
|
real_iso_file = iso_target
|
|
else:
|
|
real_iso_file = os.path.join(os.path.dirname(iso_file), iso_target)
|
|
|
|
# make sure target exists
|
|
if not os.path.exists(real_iso_file):
|
|
raise RuntimeError('%s: broken symlink', iso_file)
|
|
|
|
real_iso_dir = os.path.dirname(real_iso_file) # .../deploy
|
|
real_iso_basename = os.path.basename(real_iso_file) # starlingx-intel-x86-64-${OLD_TIMESTAMP}-cd.iso
|
|
|
|
# reject unexpected file names
|
|
if not check_iso_file_name(real_iso_basename):
|
|
return
|
|
|
|
new_real_iso_basename = '%s.iso' % get_iso_name(iso_name_prefix, timestamp) # starlingx-intel-x86-64-${TIMESTAMP}-cd.iso
|
|
new_real_iso_file = os.path.join(real_iso_dir, new_real_iso_basename) # .../deploy/starlingx-intel-x86-64-${TIMESTAMP}-cd.iso
|
|
|
|
# basename already correct, bail out
|
|
if real_iso_basename == new_real_iso_basename:
|
|
logger.debug('ISO file name is already correct: %s', real_iso_basename)
|
|
return
|
|
|
|
# if original symlink had a directory component, keep it in the new symlink
|
|
if iso_target.find('/') != -1:
|
|
new_iso_target = os.path.join(os.path.dirname(iso_target), new_real_iso_basename)
|
|
else:
|
|
new_iso_target = new_real_iso_basename
|
|
|
|
# Rename and link
|
|
Path(iso_file).unlink(missing_ok=True) # remove symlink
|
|
Path(new_real_iso_file).unlink(missing_ok=True) # remove new filename (rename target)
|
|
os.rename(real_iso_file, new_real_iso_file) # rename old to new name
|
|
os.symlink(new_iso_target, iso_file) # re-create symlink
|
|
|
|
# create XYZ.iso.RENAMED.txt
|
|
create_renamed_info_file(real_iso_dir, real_iso_basename, new_real_iso_basename)
|
|
|
|
logger.info('renamed %s/{%s => %s}' % (real_iso_dir, real_iso_basename, new_real_iso_basename))
|
|
|
|
|
|
def sign_iso_dev(deploy_dir: str, iso_name_prefix: str)->None:
|
|
'''
|
|
Sign the .iso file with the developer key
|
|
deploy_dir: ISO directory
|
|
iso_name_prefix: prefix for ISO files, eg starlingx-intel-x86-64
|
|
'''
|
|
logger.info("Trying to sign iso image with developer key")
|
|
key_path = os.path.join(os.environ.get('MY_REPO'), 'build-tools/signing/dev-private-key.pem')
|
|
iso_name = get_iso_name(iso_name_prefix)
|
|
|
|
iso_file = f'{deploy_dir}/{iso_name}.iso'
|
|
sig_file = f'{deploy_dir}/{iso_name}.sig'
|
|
|
|
# call realpath to make sure it exists and there are no symlink loops
|
|
realpath_cmd = f'realpath -e {iso_file}'
|
|
subprocess.run(realpath_cmd, shell=True, check=True)
|
|
|
|
# if ISO file is a symlink, create the signature of the symlink's target
|
|
if os.path.islink (iso_file):
|
|
# get the iso_file's target -- we will create the .sig file next to it
|
|
iso_target = os.readlink (iso_file)
|
|
sig_target = re.sub (r'[.]iso$', '', iso_target) + '.sig'
|
|
if os.path.isabs(iso_target):
|
|
real_iso_file = iso_target
|
|
real_sig_file = sig_target
|
|
else:
|
|
real_iso_file = os.path.join (os.path.dirname (iso_file), iso_target)
|
|
real_sig_file = os.path.join (os.path.dirname (sig_file), sig_target)
|
|
else:
|
|
real_iso_file = iso_file
|
|
real_sig_file = sig_file
|
|
|
|
# create the signature
|
|
sign_cmd = f'sudo openssl dgst -sha256 -sign {key_path} -binary -out {real_sig_file} {real_iso_file}'
|
|
logger.info ("running: %s", sign_cmd)
|
|
ret = subprocess.call(sign_cmd, shell=True)
|
|
if ret != 0:
|
|
raise Exception("Error while signing the image")
|
|
|
|
# ISO is a symlink => create the matching .sig link
|
|
if os.path.islink (iso_file):
|
|
if os.path.exists (sig_file):
|
|
os.remove (sig_file)
|
|
os.symlink (sig_target, sig_file)
|
|
|
|
logger.info("Image signed %s", real_iso_file)
|
|
|
|
|
|
def get_binary_repositories(config: str):
|
|
# The binary repository of the `common` layer is always present.
|
|
repositories = [REPO_BINARY]
|
|
|
|
layers = []
|
|
|
|
logger.info(f"Processing config file `{config}`...")
|
|
try:
|
|
with open(config, "r") as f:
|
|
layers = f.readlines()
|
|
except IOError as e:
|
|
logger.error(f"Unable to process config file `{config}`.")
|
|
logger.error(str(e))
|
|
sys.exit(1)
|
|
|
|
for layer in layers:
|
|
# Ignore if it's comment or white space.
|
|
if not layer.strip() or layer.startswith("#"):
|
|
continue
|
|
|
|
# Check if it's a valid layer.
|
|
layer = layer.strip().lower()
|
|
if layer in ALL_LAYERS:
|
|
repository = f"{REPO_BINARY}-{layer}"
|
|
repositories.append(repository)
|
|
logger.info(
|
|
f"Added binary repository for layer `{layer}`: {repository}"
|
|
)
|
|
else:
|
|
logger.error(
|
|
f"Unable to add binary repository for layer `{layer}`. "
|
|
f"The layer must be one of {ALL_LAYERS}."
|
|
)
|
|
sys.exit(1)
|
|
logger.info("Processing complete.")
|
|
return repositories
|
|
|
|
def post_process(deploy_dir: str, yaml_file: str, sign: bool)->None:
|
|
|
|
# find out ISO file name prefix
|
|
logger.debug('reading %s', yaml_file)
|
|
with open(yaml_file) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
assert yaml_doc['name']
|
|
assert yaml_doc['machine']
|
|
iso_name_prefix = yaml_doc['name'] + '-' + yaml_doc['machine']
|
|
|
|
# rename ISO if necessary
|
|
new_timestamp = os.getenv('TIMESTAMP')
|
|
if new_timestamp:
|
|
#new_timestamp = re.sub(r'[^a-zA-Z0-9]+', '', new_timestamp)
|
|
rename_iso(deploy_dir, iso_name_prefix, new_timestamp)
|
|
|
|
# sign ISO with developer key
|
|
if sign:
|
|
sign_iso_dev(deploy_dir, iso_name_prefix)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
parser = argparse.ArgumentParser(
|
|
description="build-image helper",
|
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
|
)
|
|
kernel_types = parser.add_mutually_exclusive_group()
|
|
kernel_types.add_argument('--std', help="build standard image",
|
|
action='store_true')
|
|
kernel_types.add_argument('--rt', help="build rt image",
|
|
action='store_true')
|
|
parser.add_argument('-c', '--clean', help="(DEPRECATED) Start a fresh image build",
|
|
default=True, action='store_true')
|
|
parser.add_argument('-k', '--keep', help="Keep the current environment " +
|
|
"(ostree, deploy), mainly used for patching",
|
|
default=False, action='store_true')
|
|
parser.add_argument('--no-sign', action='store_true',
|
|
default=False, help="Don't sign ISO at the end")
|
|
parser.add_argument(
|
|
'--image-layers-file',
|
|
help=(
|
|
"The absolute path of the configuration file that lists the "
|
|
"the layers that contribute binaries to the ISO"
|
|
),
|
|
type=str,
|
|
default=IMAGE_LAYERS_PATH
|
|
)
|
|
args = parser.parse_args()
|
|
if args.rt:
|
|
kernel_type = 'rt'
|
|
else:
|
|
kernel_type = 'std'
|
|
|
|
user_register_signals()
|
|
|
|
rmg_logger = logging.getLogger('repo_manager')
|
|
utils.set_logger(rmg_logger)
|
|
repo_manager = repo_manage.RepoMgr('aptly', os.environ.get('REPOMGR_URL'),
|
|
'/tmp/', os.environ.get('REPOMGR_ORIGIN'),
|
|
rmg_logger)
|
|
|
|
# Upload build repository (deb-local-build) to `aptly`
|
|
# and create a repository URL for it.
|
|
repo_manager.upload_pkg(REPO_BUILD, None)
|
|
build_repository_url = "deb {}{} bullseye main".format(
|
|
os.environ.get("REPOMGR_DEPLOY_URL"),
|
|
REPO_BUILD
|
|
)
|
|
|
|
# Get binary repositories that contribute binaries to the ISO.
|
|
binary_repositories = get_binary_repositories(args.image_layers_file)
|
|
binary_repositories_urls = []
|
|
|
|
# Upload binary repositories (deb-local-binary-*) to `aptly`
|
|
# and create repository URLs for them.
|
|
for binary_repository in binary_repositories:
|
|
repo_manager.upload_pkg(binary_repository, None)
|
|
binary_repositories_urls.append(
|
|
"deb {}{} bullseye main".format(
|
|
os.environ.get("REPOMGR_DEPLOY_URL"),
|
|
binary_repository
|
|
)
|
|
)
|
|
|
|
logger.info("\n")
|
|
logger.info("=====Build Image start ......")
|
|
logger.info("checking OS binary packages ......")
|
|
base_bins_ready = check_base_os_binaries(repo_manager)
|
|
|
|
logger.info("\nchecking STX binary packages ......")
|
|
stx_bins_ready = check_stx_binaries(repo_manager, "std")
|
|
|
|
logger.info("\nchecking STX patched packages ......")
|
|
stx_patched_ready = check_stx_patched(repo_manager, "std")
|
|
|
|
if not base_bins_ready or not stx_bins_ready or not stx_patched_ready:
|
|
logger.error("Fail to get prepared to build image")
|
|
sys.exit(1)
|
|
|
|
base_yaml = os.path.join(PKG_LIST_DIR, 'debian/common/base-bullseye.yaml')
|
|
base_initramfs_yaml = os.path.join(PKG_LIST_DIR, 'debian/common/base-initramfs-bullseye.yaml')
|
|
os.environ["WORKSPACE_DIR"] = LAT_ROOT
|
|
lat_yaml = os.path.join(LAT_ROOT, "lat.yaml")
|
|
lat_initramfs_yaml = os.path.join(LAT_ROOT, "lat-initramfs.yaml")
|
|
|
|
for yaml_file in (base_yaml, base_initramfs_yaml):
|
|
if not os.path.exists(yaml_file):
|
|
logger.error(' '.join(['Base yaml file', yaml_file, 'does not exist']))
|
|
sys.exit(1)
|
|
|
|
if not os.path.exists(LAT_ROOT):
|
|
os.makedirs(LAT_ROOT)
|
|
|
|
try:
|
|
shutil.copyfile(base_yaml, lat_yaml)
|
|
shutil.copyfile(base_initramfs_yaml, lat_initramfs_yaml)
|
|
except IOError as e:
|
|
logger.error(str(e))
|
|
logger.error('Failed to copy yaml files to %s', LAT_ROOT)
|
|
sys.exit(1)
|
|
|
|
include_initramfs(lat_yaml, lat_initramfs_yaml)
|
|
|
|
if merge_local_repos(repo_manager):
|
|
if update_debootstrap_mirror(lat_yaml):
|
|
logger.debug("Debootstrap switches to mirror %s in %s", REPO_ALL, lat_yaml)
|
|
if update_debootstrap_mirror(lat_initramfs_yaml):
|
|
logger.debug("Debootstrap switches to mirror %s in %s", REPO_ALL, lat_initramfs_yaml)
|
|
|
|
for yaml_file in (lat_yaml, lat_initramfs_yaml):
|
|
if not feed_lat_src_repos(
|
|
yaml_file,
|
|
[
|
|
*binary_repositories_urls,
|
|
build_repository_url
|
|
]
|
|
):
|
|
logger.error(' '.join(['Failed to set local repos to', yaml_file]))
|
|
sys.exit(1)
|
|
else:
|
|
logger.info(' '.join(['Successfully set local repos to', yaml_file]))
|
|
|
|
update_ostree_osname(lat_yaml)
|
|
|
|
if not change_default_kernel(lat_yaml, kernel_type):
|
|
logger.error("Failed to change the default boot kernel")
|
|
sys.exit(1)
|
|
|
|
ret = 1
|
|
if not add_lat_packages(lat_yaml, img_pkgs):
|
|
logger.error("Failed to add packages into image YAML config")
|
|
sys.exit(ret)
|
|
|
|
os.system('sudo rm -rf ' + LAT_ROOT + '/workdir')
|
|
os.system('sudo rm -rf ' + LAT_ROOT + '/sub_workdir')
|
|
if not args.keep:
|
|
os.system('sudo rm -rf ' + LAT_ROOT + '/deploy')
|
|
|
|
# Prepare the boot cert
|
|
os.system('sudo mkdir ' + LAT_ROOT + '/CERTS > /dev/null 2>&1')
|
|
os.system('sudo cp ' + CERT_PATH + ' ' + LAT_ROOT + '/CERTS/')
|
|
|
|
os.system(' '.join(['latc --file=' + lat_yaml, 'build']))
|
|
lat_log = os.path.join(LAT_ROOT, "log/log.appsdk")
|
|
time.sleep(WAIT_TIME_BEFORE_CHECKING_LOG)
|
|
time_counter = 0
|
|
latd_run = False
|
|
while not os.path.exists(lat_log):
|
|
latd_run = is_latd_running()
|
|
if not latd_run or time_counter >= MAX_WAIT_LAT_TIME:
|
|
break
|
|
time.sleep(1)
|
|
time_counter += 1
|
|
if not os.path.exists(lat_log):
|
|
if time_counter >= MAX_WAIT_LAT_TIME:
|
|
logger.error('The wait for %s has timed out' % lat_log)
|
|
logger.error('There is issue with latd, please check')
|
|
else:
|
|
if not latd_run:
|
|
logger.error('latd is not running, please check')
|
|
sys.exit(ret)
|
|
else:
|
|
log_printer = subprocess.Popen("tail -f " + lat_log,
|
|
stdout=subprocess.PIPE, shell=True,
|
|
universal_newlines=True)
|
|
while log_printer.poll() is None:
|
|
line = log_printer.stdout.readline()
|
|
line = line.strip()
|
|
if line:
|
|
print(line)
|
|
if "ERROR: " in line:
|
|
logger.info("Failed to build image, check the log %s", lat_log)
|
|
break
|
|
if "DEBUG: Deploy ovmf.qcow2" in line:
|
|
logger.info("build-image successfully done, check the output in %s", LAT_ROOT)
|
|
ret = 0
|
|
break
|
|
# stop latd
|
|
stop_latd()
|
|
|
|
deploy_dir = '%s/deploy' % LAT_ROOT
|
|
subprocess.run('sudo chown -R ${USER}: "%s"' % deploy_dir, shell=True, check=True)
|
|
|
|
if ret == 0:
|
|
post_process(deploy_dir, lat_yaml, not args.no_sign)
|
|
|
|
sys.exit(ret)
|