Setup controller and agent processes for "software"
Current Status: - CLI commands - API endpoints - controller process - agent process - Logging Test Plan: PASS: Build and Install ISO PASS: software-agent and software-controller services are active post unlock PASS: tox Story: 2010676 Task: 47817 Signed-off-by: Jessica Castelino <jessica.castelino@windriver.com> Change-Id: I394780ce40fee398c4eacb3aacb575a03ff93332
This commit is contained in:
parent
ba472d6ede
commit
965a6d3639
@ -51,6 +51,8 @@
|
||||
name: stx-software-tox-py39
|
||||
parent: tox-py39
|
||||
nodeset: debian-bullseye
|
||||
required-projects:
|
||||
- starlingx/config
|
||||
files:
|
||||
- software/*
|
||||
vars:
|
||||
@ -62,6 +64,8 @@
|
||||
name: stx-software-tox-pylint
|
||||
parent: tox
|
||||
nodeset: debian-bullseye
|
||||
required-projects:
|
||||
- starlingx/config
|
||||
files:
|
||||
- software/*
|
||||
vars:
|
||||
|
@ -2,6 +2,7 @@
|
||||
export DH_VERBOSE = 1
|
||||
export PYBUILD_NAME = software
|
||||
export PBR_VERSION=1.0.0
|
||||
PMONDIR := ${ROOT}/usr/share/starlingx/pmon.d
|
||||
|
||||
ROOT := $(CURDIR)/debian/tmp
|
||||
|
||||
@ -12,5 +13,48 @@ override_dh_install:
|
||||
python3 setup.py install -f --install-layout=deb --root=$(ROOT)
|
||||
python3 setup.py bdist_wheel --universal -d $(CURDIR)/debian/$(PYBUILD_NAME)-wheels/usr/share/python-wheels
|
||||
install -d -m 755 $(ROOT)/usr/bin
|
||||
install -d -m 755 $(ROOT)/usr/sbin
|
||||
install -d -m 755 $(ROOT)/run
|
||||
install -d -m 755 $(ROOT)/usr/share/bash-completion/completions
|
||||
install -m 755 -d ${ROOT}/etc/goenabled.d
|
||||
install -m 755 -d ${ROOT}/etc/init.d
|
||||
install -m 755 -d ${ROOT}/etc/logrotate.d
|
||||
install -m 755 -d ${ROOT}/etc/software
|
||||
install -m 755 -d ${ROOT}/etc/software/software-scripts
|
||||
install -m 755 -d ${ROOT}/lib/systemd/system
|
||||
install -m 755 -d ${PMONDIR}
|
||||
install -m 500 service-files/software-controller-daemon-init.sh \
|
||||
${ROOT}/etc/init.d/software-controller-daemon
|
||||
install -m 500 service-files/software-agent-init.sh \
|
||||
${ROOT}/etc/init.d/software-agent
|
||||
install -m 500 service-files/software-init.sh \
|
||||
${ROOT}/etc/init.d/software
|
||||
install -m 500 service-files/software-controller-init.sh \
|
||||
${ROOT}/etc/init.d/software-controller
|
||||
install -m 600 service-files/software.conf \
|
||||
${ROOT}/etc/software/software.conf
|
||||
install -m 644 service-files/policy.json \
|
||||
${ROOT}/etc/software/policy.json
|
||||
install -m 444 service-files/pmon-software-controller-daemon.conf \
|
||||
${PMONDIR}/software-controller-daemon.conf
|
||||
install -m 444 service-files/pmon-software-agent.conf \
|
||||
${PMONDIR}/software-agent.conf
|
||||
install -m 444 service-files/*.service \
|
||||
${ROOT}/lib/systemd/system
|
||||
install -m 444 service-files/software.completion \
|
||||
${ROOT}/usr/share/bash-completion/completions/software
|
||||
install -m 400 service-files/software-functions \
|
||||
${ROOT}/etc/software/software-functions
|
||||
install -m 444 service-files/software-tmpdirs.conf \
|
||||
${ROOT}/run/software-tmpdirs.conf
|
||||
install -m 500 service-files/run-software-scripts \
|
||||
${ROOT}/usr/sbin/run-software-scripts
|
||||
install -m 500 service-files/software-controller-daemon-restart \
|
||||
${ROOT}/usr/sbin/software-controller-daemon-restart
|
||||
install -m 500 service-files/software-agent-restart \
|
||||
${ROOT}/usr/sbin/software-agent-restart
|
||||
install -m 555 service-files/software_check_goenabled.sh \
|
||||
${ROOT}/etc/goenabled.d/software_check_goenabled.sh
|
||||
install -m 444 service-files/software.logrotate \
|
||||
${ROOT}/etc/logrotate.d/software
|
||||
dh_install
|
||||
|
||||
|
@ -1,2 +1,11 @@
|
||||
/usr/bin
|
||||
/usr/lib/python*/dist-packages/*
|
||||
etc/goenabled.d
|
||||
etc/init.d
|
||||
etc/logrotate.d
|
||||
etc/software
|
||||
lib/systemd/system
|
||||
run/software-tmpdirs.conf
|
||||
usr/bin
|
||||
usr/lib/python*/dist-packages/*
|
||||
usr/sbin
|
||||
usr/share/
|
||||
|
||||
|
@ -31,7 +31,7 @@ extension-pkg-allow-list=
|
||||
# be loaded. Extensions are loading into the active Python interpreter and may
|
||||
# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
|
||||
# for backward compatibility.)
|
||||
extension-pkg-whitelist=
|
||||
extension-pkg-whitelist=lxml
|
||||
|
||||
# Return non-zero exit code if any of these messages/categories are detected,
|
||||
# even if score is above --fail-under value. Syntax same as enable. Messages
|
||||
@ -420,7 +420,57 @@ confidence=HIGH,
|
||||
# --enable=similarities". If you want to run only the classes checker, but have
|
||||
# no Warning level messages displayed, use "--disable=all --enable=classes
|
||||
# --disable=W".
|
||||
disable=
|
||||
# -Conventions-
|
||||
# C0103 invalid-name
|
||||
# C0114 missing-module-docstring
|
||||
# C0115 missing-class-docstring
|
||||
# C0116 missing-function-docstring
|
||||
# C0201 consider-iterating-dictionary
|
||||
# C0206 consider-using-dict-items
|
||||
# C0209 consider-using-f-string
|
||||
# C2801 unnecessary-dunder-call
|
||||
# C0301 line-too-long
|
||||
# C0302 too-many-lines
|
||||
# C0325 superfluous-parens
|
||||
# C0411 wrong-import-order
|
||||
# C0415 import-outside-toplevel
|
||||
# -Refactoring-
|
||||
# R0205 useless-object-inheritance
|
||||
# R0402 consider-using-from-import
|
||||
# R0801 Similar lines in x files
|
||||
# R0902 too-many-instance-attributes
|
||||
# R0903 too-few-public-methods
|
||||
# R0904 too-many-public-methods
|
||||
# R0911 too-many-return-statements
|
||||
# R0912 too-many-branches
|
||||
# R0913 too-many-arguments
|
||||
# R0914 too-many-locals
|
||||
# R0915 too-many-statements
|
||||
# R1702 too-many-nested-blocks
|
||||
# R1705 no-else-return
|
||||
# R1714 consider-using-in
|
||||
# R1715 consider-using-get
|
||||
# R1722 consider-using-sys-exit
|
||||
# R1724 no-else-continue
|
||||
# R1725 super-with-arguments
|
||||
# R1732 consider-using-with
|
||||
# R1735 use-dict-literal
|
||||
# -Warnings-
|
||||
# W0107 unnecessary-pass
|
||||
# W0602 global-variable-not-assigned
|
||||
# W0603 global-statement
|
||||
# W0703 broad-except
|
||||
# W0707 raise-missing-from
|
||||
# W0719 broad-exception-raised
|
||||
# W1505 deprecated-method
|
||||
# W1514 unspecified-encoding
|
||||
# W3101 missing-timeout
|
||||
disable= C0103,C0114,C0115,C0116,C0201,C0206,C0209,C2801,
|
||||
C0301,C0302,C0325,C0411,C0413,C0415,
|
||||
R0205,R0402,R0801,R0902,R0903,R0904,R0911,
|
||||
R0912,R0913,R0914,R0915,R1702,R1705,R1714,
|
||||
R1715,R1722,R1724,R1725,R1732,R1735,
|
||||
W0107,W0602,W0603,W0703,W0707,W0719,W1514,W3101
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
@ -547,7 +597,7 @@ contextmanager-decorators=contextlib.contextmanager
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E1101 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=
|
||||
generated-members=sh
|
||||
|
||||
# Tells whether to warn about missing members when the owner of the attribute
|
||||
# is inferred to be None.
|
||||
|
@ -1,3 +1,12 @@
|
||||
keystoneauth1
|
||||
keystonemiddleware
|
||||
lxml
|
||||
oslo.config
|
||||
oslo.log
|
||||
oslo.policy
|
||||
oslo.serialization
|
||||
netaddr
|
||||
pecan
|
||||
pycryptodomex
|
||||
requests_toolbelt
|
||||
sh
|
||||
WebOb
|
||||
|
19
software/service-files/pmon-software-agent.conf
Normal file
19
software/service-files/pmon-software-agent.conf
Normal file
@ -0,0 +1,19 @@
|
||||
[process]
|
||||
process = software-agent
|
||||
pidfile = /var/run/software-agent.pid
|
||||
script = /etc/init.d/software-agent
|
||||
style = lsb ; ocf or lsb
|
||||
severity = major ; Process failure severity
|
||||
; critical : host is failed
|
||||
; major : host is degraded
|
||||
; minor : log is generated
|
||||
restarts = 3 ; Number of back to back unsuccessful restarts before severity assertion
|
||||
interval = 5 ; Number of seconds to wait between back-to-back unsuccessful restarts
|
||||
debounce = 20 ; Number of seconds the process needs to run before declaring
|
||||
; it as running O.K. after a restart.
|
||||
; Time after which back-to-back restart count is cleared.
|
||||
startuptime = 10 ; Seconds to wait after process start before starting the debounce monitor
|
||||
mode = passive ; Monitoring mode: passive (default) or active
|
||||
; passive: process death monitoring (default: always)
|
||||
; active: heartbeat monitoring, i.e. request / response messaging
|
||||
|
19
software/service-files/pmon-software-controller-daemon.conf
Normal file
19
software/service-files/pmon-software-controller-daemon.conf
Normal file
@ -0,0 +1,19 @@
|
||||
[process]
|
||||
process = software-controller-daemon
|
||||
pidfile = /var/run/software-controller-daemon.pid
|
||||
script = /etc/init.d/software-controller-daemon
|
||||
style = lsb ; ocf or lsb
|
||||
severity = major ; Process failure severity
|
||||
; critical : host is failed
|
||||
; major : host is degraded
|
||||
; minor : log is generated
|
||||
restarts = 3 ; Number of back to back unsuccessful restarts before severity assertion
|
||||
interval = 5 ; Number of seconds to wait between back-to-back unsuccessful restarts
|
||||
debounce = 20 ; Number of seconds the process needs to run before declaring
|
||||
; it as running O.K. after a restart.
|
||||
; Time after which back-to-back restart count is cleared.
|
||||
startuptime = 10 ; Seconds to wait after process start before starting the debounce monitor
|
||||
mode = passive ; Monitoring mode: passive (default) or active
|
||||
; passive: process death monitoring (default: always)
|
||||
; active: heartbeat monitoring, i.e. request / response messaging
|
||||
|
2
software/service-files/policy.json
Normal file
2
software/service-files/policy.json
Normal file
@ -0,0 +1,2 @@
|
||||
{
|
||||
}
|
59
software/service-files/run-software-scripts
Normal file
59
software/service-files/run-software-scripts
Normal file
@ -0,0 +1,59 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
. /etc/software/software-functions
|
||||
|
||||
declare SCRIPTS=$(find $PATCH_SCRIPTDIR -type f -executable | sort)
|
||||
declare -i NUM_SCRIPTS=$(echo "$SCRIPTS" | wc -l)
|
||||
|
||||
if [ $NUM_SCRIPTS -eq 0 ]
|
||||
then
|
||||
loginfo "No in-service patch scripts found."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
loginfo "Running $NUM_SCRIPTS in-service patch scripts"
|
||||
|
||||
declare SCRIPTLOG=/var/log/software-insvc.log
|
||||
cat <<EOF >>$SCRIPTLOG
|
||||
############################################################
|
||||
`date "+%FT%T.%3N"`: Running $NUM_SCRIPTS in-service patch scripts:
|
||||
|
||||
$SCRIPTS
|
||||
|
||||
############################################################
|
||||
EOF
|
||||
|
||||
declare -i FAILURES=0
|
||||
for cmd in $SCRIPTS
|
||||
do
|
||||
cat <<EOF >>$SCRIPTLOG
|
||||
############################################################
|
||||
`date "+%FT%T.%3N"`: Running $cmd
|
||||
|
||||
EOF
|
||||
|
||||
bash -x $cmd >>$SCRIPTLOG 2>&1
|
||||
rc=$?
|
||||
if [ $rc -ne $PATCH_STATUS_OK ]
|
||||
then
|
||||
let -i FAILURES++
|
||||
fi
|
||||
cat <<EOF >>$SCRIPTLOG
|
||||
`date "+%FT%T.%3N"`: Completed running $cmd (rc=$rc)
|
||||
############################################################
|
||||
|
||||
EOF
|
||||
done
|
||||
|
||||
cat <<EOF >>$SCRIPTLOG
|
||||
|
||||
`date "+%FT%T.%3N"`: Completed running scripts with $FAILURES failures
|
||||
############################################################
|
||||
EOF
|
||||
|
||||
exit $FAILURES
|
141
software/service-files/setup_software_repo
Executable file
141
software/service-files/setup_software_repo
Executable file
@ -0,0 +1,141 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
import getopt
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
import software.software_functions as sf
|
||||
import software.release_verify as pv
|
||||
import sotware.constants as constants
|
||||
|
||||
import logging
|
||||
logging.getLogger('main_logger')
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
# Override the pv.dev_certificate_marker so we can verify signatures off-box
|
||||
software_bindir = os.path.dirname(os.path.abspath(sys.argv[0]))
|
||||
dev_cert_path = os.path.abspath(os.path.join(software_bindir, '../../enable-dev-patch/enable-dev-patch/dev_certificate_enable.bin'))
|
||||
|
||||
pv.dev_certificate_marker = dev_cert_path
|
||||
|
||||
def usage():
|
||||
print("Usage: %s -o <repodir> <patch> ..." % os.path.basename(sys.argv[0]))
|
||||
exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, remainder = getopt.getopt(sys.argv[1:],
|
||||
'o:',
|
||||
['output='])
|
||||
except getopt.GetoptError:
|
||||
usage()
|
||||
|
||||
output = None
|
||||
|
||||
for opt, arg in opts:
|
||||
if opt == "--output" or opt == '-o':
|
||||
output = arg
|
||||
|
||||
if output is None:
|
||||
usage()
|
||||
|
||||
sw_version = os.environ['PLATFORM_RELEASE']
|
||||
|
||||
allpatches = sf.PatchData()
|
||||
|
||||
output = os.path.abspath(output)
|
||||
|
||||
pkgdir = os.path.join(output, 'Packages')
|
||||
datadir = os.path.join(output, 'metadata')
|
||||
committed_dir = os.path.join(datadir, 'committed')
|
||||
|
||||
if os.path.exists(output):
|
||||
# Check to see if the expected structure already exists,
|
||||
# maybe we're appending a patch.
|
||||
if not os.path.exists(committed_dir) or not os.path.exists(pkgdir):
|
||||
print("Packages or metadata dir missing from existing %s. Aborting..." % output)
|
||||
exit(1)
|
||||
|
||||
# Load the existing metadata
|
||||
allpatches.load_all_metadata(committed_dir, constants.COMMITTED)
|
||||
else:
|
||||
os.mkdir(output, 0o755)
|
||||
os.mkdir(datadir, 0o755)
|
||||
os.mkdir(committed_dir, 0o755)
|
||||
os.mkdir(pkgdir, 0o755)
|
||||
|
||||
# Save the current directory, so we can chdir back after
|
||||
orig_wd = os.getcwd()
|
||||
|
||||
tmpdir = None
|
||||
try:
|
||||
for p in remainder:
|
||||
fpath = os.path.abspath(p)
|
||||
|
||||
# Create a temporary working directory
|
||||
tmpdir = tempfile.mkdtemp(prefix="patchrepo_")
|
||||
|
||||
# Change to the tmpdir
|
||||
os.chdir(tmpdir)
|
||||
|
||||
print("Parsing %s" % fpath)
|
||||
sf.PatchFile.read_patch(fpath)
|
||||
|
||||
thispatch = sf.PatchData()
|
||||
patch_id = thispatch.parse_metadata("metadata.xml", constants.COMMITTED)
|
||||
|
||||
if patch_id in allpatches.metadata:
|
||||
print("Skipping %s as it's already in the repo" % patch_id)
|
||||
# Change back to original working dir
|
||||
os.chdir(orig_wd)
|
||||
|
||||
shutil.rmtree(tmpdir)
|
||||
tmpdir = None
|
||||
|
||||
continue
|
||||
|
||||
patch_sw_version = thispatch.query_line(patch_id, 'sw_version')
|
||||
if patch_sw_version != sw_version:
|
||||
raise Exception("%s is for release %s, not %s" % (patch_id, patch_sw_version, sw_version))
|
||||
|
||||
# Move the metadata to the "committed" dir, and the deb packages to the Packages dir
|
||||
shutil.move('metadata.xml', os.path.join(committed_dir, "%s-metadata.xml" % patch_id))
|
||||
for f in thispatch.query_line(patch_id, 'contents'):
|
||||
shutil.move(f, pkgdir)
|
||||
|
||||
allpatches.add_patch(patch_id, thispatch)
|
||||
|
||||
# Change back to original working dir
|
||||
os.chdir(orig_wd)
|
||||
|
||||
shutil.rmtree(tmpdir)
|
||||
tmpdir = None
|
||||
except:
|
||||
if tmpdir is not None:
|
||||
# Change back to original working dir
|
||||
os.chdir(orig_wd)
|
||||
|
||||
shutil.rmtree(tmpdir)
|
||||
tmpdir = None
|
||||
raise
|
||||
|
||||
allpatches.gen_release_groups_xml(sw_version, output)
|
||||
|
||||
# Purge unneeded deb pkgs
|
||||
keep = {}
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
94
software/service-files/software-agent-init.sh
Executable file
94
software/service-files/software-agent-init.sh
Executable file
@ -0,0 +1,94 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# chkconfig: 345 26 30
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: software-agent
|
||||
# Required-Start: $syslog
|
||||
# Required-Stop: $syslog
|
||||
# Default-Start: 2 3 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: software-agent
|
||||
# Description: Provides the Unified Software Management Agent Daemon
|
||||
### END INIT INFO
|
||||
|
||||
DESC="software-agent"
|
||||
DAEMON="/usr/bin/software-agent"
|
||||
PIDFILE="/var/run/software-agent.pid"
|
||||
PATCH_INSTALLING_FILE="/var/run/patch_installing"
|
||||
|
||||
start()
|
||||
{
|
||||
if [ -e $PIDFILE ]; then
|
||||
PIDDIR=/proc/$(cat $PIDFILE)
|
||||
if [ -d ${PIDDIR} ]; then
|
||||
echo "$DESC already running."
|
||||
exit 1
|
||||
else
|
||||
echo "Removing stale PID file $PIDFILE"
|
||||
rm -f $PIDFILE
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -n "Starting $DESC..."
|
||||
|
||||
start-stop-daemon --start --quiet --background \
|
||||
--pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON}
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "done."
|
||||
else
|
||||
echo "failed."
|
||||
fi
|
||||
}
|
||||
|
||||
stop()
|
||||
{
|
||||
if [ -f $PATCH_INSTALLING_FILE ]; then
|
||||
echo "Patches are installing. Waiting for install to complete."
|
||||
while [ -f $PATCH_INSTALLING_FILE ]; do
|
||||
# Verify the agent is still running
|
||||
pid=$(cat $PATCH_INSTALLING_FILE)
|
||||
cat /proc/$pid/cmdline 2>/dev/null | grep -q $DAEMON
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Patch agent not running."
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
echo "Continuing with shutdown."
|
||||
fi
|
||||
|
||||
echo -n "Stopping $DESC..."
|
||||
start-stop-daemon --stop --quiet --pidfile $PIDFILE
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "done."
|
||||
else
|
||||
echo "failed."
|
||||
fi
|
||||
rm -f $PIDFILE
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
restart|force-reload)
|
||||
stop
|
||||
start
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|force-reload|restart}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
20
software/service-files/software-agent-restart
Normal file
20
software/service-files/software-agent-restart
Normal file
@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
. /etc/software/software-functions
|
||||
|
||||
#
|
||||
# Triggering a restart of the software daemons is done by
|
||||
# creating a flag file and letting the daemon handle the restart.
|
||||
#
|
||||
loginfo "Requesting restart of software-agent"
|
||||
|
||||
restart_software_agent_flag="/run/software/.restart.software-agent"
|
||||
touch $restart_software_agent_flag
|
||||
|
||||
exit 0
|
||||
|
16
software/service-files/software-agent.service
Normal file
16
software/service-files/software-agent.service
Normal file
@ -0,0 +1,16 @@
|
||||
[Unit]
|
||||
Description=Unified Software Management Agent
|
||||
After=syslog.target network-online.target software.service
|
||||
Before=pmon.service
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
User=root
|
||||
ExecStart=/etc/init.d/software-agent start
|
||||
ExecStop=/etc/init.d/software-agent stop
|
||||
ExecReload=/etc/init.d/software-agent restart
|
||||
PIDFile=/var/run/software-agent.pid
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
78
software/service-files/software-controller-daemon-init.sh
Executable file
78
software/service-files/software-controller-daemon-init.sh
Executable file
@ -0,0 +1,78 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# chkconfig: 345 25 30
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: software-controller-daemon
|
||||
# Required-Start: $syslog
|
||||
# Required-Stop: $syslog
|
||||
# Default-Start: 2 3 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: software-controller-daemon
|
||||
# Description: Provides the Unified Software Patch Controller Daemon
|
||||
### END INIT INFO
|
||||
|
||||
DESC="software-controller-daemon"
|
||||
DAEMON="/usr/bin/software-controller-daemon"
|
||||
PIDFILE="/var/run/software-controller-daemon.pid"
|
||||
|
||||
start()
|
||||
{
|
||||
if [ -e $PIDFILE ]; then
|
||||
PIDDIR=/proc/$(cat $PIDFILE)
|
||||
if [ -d ${PIDDIR} ]; then
|
||||
echo "$DESC already running."
|
||||
exit 1
|
||||
else
|
||||
echo "Removing stale PID file $PIDFILE"
|
||||
rm -f $PIDFILE
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -n "Starting $DESC..."
|
||||
|
||||
start-stop-daemon --start --quiet --background \
|
||||
--pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON}
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "done."
|
||||
else
|
||||
echo "failed."
|
||||
fi
|
||||
}
|
||||
|
||||
stop()
|
||||
{
|
||||
echo -n "Stopping $DESC..."
|
||||
start-stop-daemon --stop --quiet --pidfile $PIDFILE
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "done."
|
||||
else
|
||||
echo "failed."
|
||||
fi
|
||||
rm -f $PIDFILE
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
restart|force-reload)
|
||||
stop
|
||||
start
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|force-reload|restart}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
20
software/service-files/software-controller-daemon-restart
Normal file
20
software/service-files/software-controller-daemon-restart
Normal file
@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
. /etc/software/software-functions
|
||||
|
||||
#
|
||||
# Triggering a restart of the software daemons is done by
|
||||
# creating a flag file and letting the daemon handle the restart.
|
||||
#
|
||||
loginfo "Requesting restart of software-controller"
|
||||
|
||||
restart_software_controller_flag="/run/software/.restart.software-controller"
|
||||
touch $restart_software_controller_flag
|
||||
|
||||
exit 0
|
||||
|
16
software/service-files/software-controller-daemon.service
Normal file
16
software/service-files/software-controller-daemon.service
Normal file
@ -0,0 +1,16 @@
|
||||
[Unit]
|
||||
Description=Unified Software Management Controller Daemon
|
||||
After=syslog.target network-online.target software.service software-controller.service
|
||||
Before=pmon.service
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
User=root
|
||||
ExecStart=/etc/init.d/software-controller-daemon start
|
||||
ExecStop=/etc/init.d/software-controller-daemon stop
|
||||
ExecReload=/etc/init.d/software-controller-daemon restart
|
||||
PIDFile=/var/run/software-controller-daemon.pid
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
92
software/service-files/software-controller-init.sh
Normal file
92
software/service-files/software-controller-init.sh
Normal file
@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# StarlingX Patching Controller setup
|
||||
# chkconfig: 345 20 24
|
||||
# description: CGCS Patching Controller init script
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: software-controller
|
||||
# Required-Start: $syslog
|
||||
# Required-Stop: $syslog
|
||||
# Default-Start: 2 3 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: software-controller
|
||||
# Description: Provides the Unified Software Management Controller Daemon
|
||||
### END INIT INFO
|
||||
|
||||
. /usr/bin/tsconfig
|
||||
|
||||
NAME=$(basename $0)
|
||||
|
||||
REPO_ID=updates
|
||||
REPO_ROOT=/var/www/pages/${REPO_ID}
|
||||
REPO_DIR=${REPO_ROOT}/rel-${SW_VERSION}
|
||||
GROUPS_FILE=$REPO_DIR/comps.xml
|
||||
PATCHING_DIR=/opt/software
|
||||
|
||||
logfile=/var/log/software.log
|
||||
|
||||
function LOG {
|
||||
logger "$NAME: $*"
|
||||
echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile
|
||||
}
|
||||
|
||||
function LOG_TO_FILE {
|
||||
echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile
|
||||
}
|
||||
|
||||
function do_setup {
|
||||
# Does the repo exist?
|
||||
if [ ! -d $REPO_DIR ]; then
|
||||
LOG "Creating repo."
|
||||
mkdir -p $REPO_DIR
|
||||
|
||||
# The original Centos code would create the groups and call createrepo
|
||||
# todo(jcasteli): determine if the ostree code needs a setup also
|
||||
fi
|
||||
|
||||
if [ ! -d $PATCHING_DIR ]; then
|
||||
LOG "Creating $PATCHING_DIR"
|
||||
mkdir -p $PATCHING_DIR
|
||||
fi
|
||||
|
||||
# If we can ping the active controller, sync the repos
|
||||
LOG_TO_FILE "ping -c 1 -w 1 controller"
|
||||
ping -c 1 -w 1 controller >> $logfile 2>&1 || ping6 -c 1 -w 1 controller >> $logfile 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
LOG "Cannot ping controller. Nothing to do"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Sync the software dir
|
||||
LOG_TO_FILE "rsync -acv --delete rsync://controller/software/ ${PATCHING_DIR}/"
|
||||
rsync -acv --delete rsync://controller/software/ ${PATCHING_DIR}/ >> $logfile 2>&1
|
||||
|
||||
# Sync the repo dir
|
||||
LOG_TO_FILE "rsync -acv --delete rsync://controller/repo/ ${REPO_ROOT}/"
|
||||
rsync -acv --delete rsync://controller/repo/ ${REPO_ROOT}/ >> $logfile 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
do_setup
|
||||
;;
|
||||
status)
|
||||
;;
|
||||
stop)
|
||||
# Nothing to do here
|
||||
;;
|
||||
restart)
|
||||
do_setup
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {status|start|stop|restart}"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
exit 0
|
||||
|
14
software/service-files/software-controller.service
Normal file
14
software/service-files/software-controller.service
Normal file
@ -0,0 +1,14 @@
|
||||
[Unit]
|
||||
Description=Unified Software Management Controller
|
||||
After=syslog.service network-online.target software.service
|
||||
Before=software-agent.service software-controller-daemon.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
User=root
|
||||
ExecStart=/etc/init.d/software-controller start
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
52
software/service-files/software-functions
Normal file
52
software/service-files/software-functions
Normal file
@ -0,0 +1,52 @@
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# This bash source file provides variables and functions that
|
||||
# may be used by in-service patch scripts.
|
||||
#
|
||||
|
||||
# Source platform.conf, for nodetype and subfunctions
|
||||
. /etc/platform/platform.conf
|
||||
|
||||
declare PATCH_SCRIPTDIR=/run/software/software-scripts
|
||||
declare PATCH_FLAGDIR=/run/software/software-flags
|
||||
declare -i PATCH_STATUS_OK=0
|
||||
declare -i PATCH_STATUS_FAILED=1
|
||||
|
||||
declare logfile=/var/log/software.log
|
||||
declare NAME=$(basename $0)
|
||||
|
||||
function loginfo()
|
||||
{
|
||||
echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile
|
||||
}
|
||||
|
||||
function is_controller()
|
||||
{
|
||||
[[ $nodetype == "controller" ]]
|
||||
}
|
||||
|
||||
function is_worker()
|
||||
{
|
||||
[[ $nodetype == "worker" ]]
|
||||
}
|
||||
|
||||
function is_storage()
|
||||
{
|
||||
[[ $nodetype == "storage" ]]
|
||||
}
|
||||
|
||||
function is_cpe()
|
||||
{
|
||||
[[ $nodetype == "controller" && $subfunction =~ worker ]]
|
||||
}
|
||||
|
||||
function is_locked()
|
||||
{
|
||||
test -f /var/run/.node_locked
|
||||
}
|
||||
|
180
software/service-files/software-init.sh
Normal file
180
software/service-files/software-init.sh
Normal file
@ -0,0 +1,180 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Unified Software Management
|
||||
# chkconfig: 345 20 23
|
||||
# description: StarlingX Unified Software Management init script
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: software
|
||||
# Required-Start: $syslog
|
||||
# Required-Stop: $syslog
|
||||
# Default-Start: 2 3 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: software
|
||||
# Description: Provides the Unified Software Management component
|
||||
### END INIT INFO
|
||||
|
||||
NAME=$(basename $0)
|
||||
|
||||
. /usr/bin/tsconfig
|
||||
. /etc/platform/platform.conf
|
||||
|
||||
logfile=/var/log/software.log
|
||||
patch_failed_file=/var/run/patch_install_failed
|
||||
patched_during_init=/etc/software/.patched_during_init
|
||||
|
||||
# if the system has never been bootstrapped, system_mode is not set
|
||||
# treat a non bootstrapped system like it is simplex
|
||||
# and manually manage lighttpd, etc..
|
||||
if [ "${system_mode}" = "" ]; then
|
||||
system_mode="simplex"
|
||||
fi
|
||||
|
||||
function LOG_TO_FILE {
|
||||
echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile
|
||||
}
|
||||
|
||||
function check_for_rr_patch {
|
||||
if [ -f /var/run/node_is_patched_rr ]; then
|
||||
if [ ! -f ${patched_during_init} ]; then
|
||||
echo
|
||||
echo "Node has been patched and requires an immediate reboot."
|
||||
echo
|
||||
LOG_TO_FILE "Node has been patched, with reboot-required flag set. Rebooting"
|
||||
touch ${patched_during_init}
|
||||
/sbin/reboot
|
||||
else
|
||||
echo
|
||||
echo "Node has been patched during init a second consecutive time. Skipping reboot due to possible error"
|
||||
echo
|
||||
LOG_TO_FILE "Node has been patched during init a second consecutive time. Skipping reboot due to possible error"
|
||||
touch ${patch_failed_file}
|
||||
rm -f ${patched_during_init}
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
rm -f ${patched_during_init}
|
||||
fi
|
||||
}
|
||||
|
||||
function check_install_uuid {
|
||||
# Check whether our installed load matches the active controller
|
||||
CONTROLLER_UUID=`curl -sf http://controller:${http_port}/feed/rel-${SW_VERSION}/install_uuid`
|
||||
if [ $? -ne 0 ]; then
|
||||
if [ "$HOSTNAME" = "controller-1" ]; then
|
||||
# If we're on controller-1, controller-0 may not have the install_uuid
|
||||
# matching this release, if we're in an upgrade. If the file doesn't exist,
|
||||
# bypass this check
|
||||
return 0
|
||||
fi
|
||||
|
||||
LOG_TO_FILE "Unable to retrieve installation uuid from active controller"
|
||||
echo "Unable to retrieve installation uuid from active controller"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "$INSTALL_UUID" != "$CONTROLLER_UUID" ]; then
|
||||
LOG_TO_FILE "This node is running a different load than the active controller and must be reinstalled"
|
||||
echo "This node is running a different load than the active controller and must be reinstalled"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check for installation failure
|
||||
if [ -f /etc/platform/installation_failed ] ; then
|
||||
LOG_TO_FILE "/etc/platform/installation_failed flag is set. Aborting."
|
||||
echo "$(basename $0): Detected installation failure. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For AIO-SX, abort if config is not yet applied and this is running in init
|
||||
if [ "${system_mode}" = "simplex" -a ! -f ${INITIAL_CONTROLLER_CONFIG_COMPLETE} -a "$1" = "start" ]; then
|
||||
LOG_TO_FILE "Config is not yet applied. Skipping init patching"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# If the management interface is bonded, it may take some time
|
||||
# before communications can be properly setup.
|
||||
# Allow up to $DELAY_SEC seconds to reach controller.
|
||||
DELAY_SEC=120
|
||||
START=`date +%s`
|
||||
FOUND=0
|
||||
while [ $(date +%s) -lt $(( ${START} + ${DELAY_SEC} )) ]; do
|
||||
LOG_TO_FILE "Waiting for controller to be pingable"
|
||||
ping -c 1 controller > /dev/null 2>&1 || ping6 -c 1 controller > /dev/null 2>&1
|
||||
if [ $? -eq 0 ]; then
|
||||
LOG_TO_FILE "controller is pingable"
|
||||
FOUND=1
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ ${FOUND} -eq 0 ]; then
|
||||
# 'controller' is not available, just exit
|
||||
LOG_TO_FILE "Unable to contact active controller (controller). Boot will continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
RC=0
|
||||
case "$1" in
|
||||
start)
|
||||
if [ "${system_mode}" = "simplex" ]; then
|
||||
# On a simplex CPE, we need to launch the http server first,
|
||||
# before we can do the patch installation
|
||||
LOG_TO_FILE "***** Launching lighttpd *****"
|
||||
/etc/init.d/lighttpd start
|
||||
|
||||
LOG_TO_FILE "***** Starting patch operation *****"
|
||||
/usr/sbin/software-agent --install 2>>$logfile
|
||||
if [ -f ${patch_failed_file} ]; then
|
||||
RC=1
|
||||
LOG_TO_FILE "***** Patch operation failed *****"
|
||||
fi
|
||||
LOG_TO_FILE "***** Finished patch operation *****"
|
||||
|
||||
LOG_TO_FILE "***** Shutting down lighttpd *****"
|
||||
/etc/init.d/lighttpd stop
|
||||
else
|
||||
check_install_uuid
|
||||
if [ $? -ne 0 ]; then
|
||||
# The INSTALL_UUID doesn't match the active controller, so exit
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LOG_TO_FILE "***** Starting patch operation *****"
|
||||
/usr/sbin/software-agent --install 2>>$logfile
|
||||
if [ -f ${patch_failed_file} ]; then
|
||||
RC=1
|
||||
LOG_TO_FILE "***** Patch operation failed *****"
|
||||
fi
|
||||
LOG_TO_FILE "***** Finished patch operation *****"
|
||||
fi
|
||||
|
||||
check_for_rr_patch
|
||||
;;
|
||||
stop)
|
||||
# Nothing to do here
|
||||
;;
|
||||
restart)
|
||||
LOG_TO_FILE "***** Starting patch operation *****"
|
||||
/usr/sbin/software-agent --install 2>>$logfile
|
||||
if [ -f ${patch_failed_file} ]; then
|
||||
RC=1
|
||||
LOG_TO_FILE "***** Patch operation failed *****"
|
||||
fi
|
||||
LOG_TO_FILE "***** Finished patch operation *****"
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart}"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
exit $RC
|
||||
|
2
software/service-files/software-tmpdirs.conf
Normal file
2
software/service-files/software-tmpdirs.conf
Normal file
@ -0,0 +1,2 @@
|
||||
d /run/software 0700 root root -
|
||||
|
153
software/service-files/software.completion
Normal file
153
software/service-files/software.completion
Normal file
@ -0,0 +1,153 @@
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# This file provides bash-completion functionality for
|
||||
# the unified software management CLI
|
||||
#
|
||||
|
||||
function _sw()
|
||||
{
|
||||
COMPREPLY=()
|
||||
local cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
local prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
local subcommand=${COMP_WORDS[1]}
|
||||
|
||||
#
|
||||
# The available software subcommands
|
||||
#
|
||||
local subcommands="
|
||||
release upload
|
||||
release upload-dir
|
||||
release delete
|
||||
release list
|
||||
release show
|
||||
deploy create
|
||||
deploy list
|
||||
deploy precheck
|
||||
deploy start
|
||||
deploy host
|
||||
deploy query
|
||||
deploy activate
|
||||
deploy complete
|
||||
deploy abort
|
||||
deploy host-rollback
|
||||
is-applied
|
||||
is-available
|
||||
report-app-dependencies
|
||||
query-app-dependencies
|
||||
what-requires
|
||||
"
|
||||
if [ -f /etc/platform/.initial_config_complete ]; then
|
||||
# Post-config, so the host-install commands are accessible
|
||||
subcommands="${subcommands} deploy host"
|
||||
else
|
||||
# Pre-config, so the install-local command is accessible
|
||||
subcommands="${subcommands} install-local"
|
||||
fi
|
||||
|
||||
# Appends the '/' when completing dir names
|
||||
set mark-directories on
|
||||
|
||||
if [ $COMP_CWORD -gt 1 ]; then
|
||||
#
|
||||
# Complete the arguments to the subcommands.
|
||||
#
|
||||
case "$subcommand" in
|
||||
apply|delete|show|what-requires|is-applied|is-available)
|
||||
# Query the list of known patches
|
||||
local patches=$(software completion patches 2>/dev/null)
|
||||
COMPREPLY=( $(compgen -W "${patches}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
remove)
|
||||
# Query the list of known patches
|
||||
local patches=$(software completion patches 2>/dev/null)
|
||||
COMPREPLY=( $(compgen -W "--skipappcheck ${patches}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
host-install|host-install-async|drop-host)
|
||||
if [ "${prev}" = "${subcommand}" -o "${prev}" = "--force" ]; then
|
||||
# Query the list of known hosts
|
||||
local names=$(software completion hosts 2>/dev/null)
|
||||
COMPREPLY=( $(compgen -W "${names}" -- ${cur}) )
|
||||
else
|
||||
# Only one host can be specified, so no more completion
|
||||
COMPREPLY=( $(compgen -- ${cur}) )
|
||||
fi
|
||||
return 0
|
||||
;;
|
||||
upload)
|
||||
# Allow dirs and files with .patch extension for completion
|
||||
COMPREPLY=( $(compgen -f -o plusdirs -X '!*.patch' -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
upload-dir)
|
||||
# Allow dirs only for completion
|
||||
COMPREPLY=( $(compgen -d -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
query)
|
||||
if [ "${prev}" = "--release" ]; then
|
||||
# If --release has been specified, provide installed releases for completion
|
||||
local releases=$(/bin/ls -d /var/www/pages/feed/rel-* 2>/dev/null | sed 's#/var/www/pages/feed/rel-##')
|
||||
COMPREPLY=( $(compgen -W "${releases}" -- ${cur}) )
|
||||
else
|
||||
# --release is only completion option for query
|
||||
COMPREPLY=( $(compgen -W "--release" -- ${cur}) )
|
||||
fi
|
||||
return 0
|
||||
;;
|
||||
query-hosts|install-local)
|
||||
# These subcommands have no options/arguments
|
||||
COMPREPLY=( $(compgen -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
query-dependencies)
|
||||
# Query the list of known patches
|
||||
local patches=$(software completion patches 2>/dev/null)
|
||||
COMPREPLY=( $(compgen -W "--recursive ${patches}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
commit)
|
||||
if [ "${prev}" = "--release" ]; then
|
||||
# If --release has been specified, provide installed releases for completion
|
||||
local releases=$(/bin/ls -d /var/www/pages/feed/rel-* 2>/dev/null | sed 's#/var/www/pages/feed/rel-##')
|
||||
COMPREPLY=( $(compgen -W "${releases}" -- ${cur}) )
|
||||
else
|
||||
# Query the list of known patches
|
||||
local patches=$(software completion patches 2>/dev/null)
|
||||
COMPREPLY=( $(compgen -W "--all --dry-run --release ${patches}" -- ${cur}) )
|
||||
fi
|
||||
return 0
|
||||
;;
|
||||
report-app-dependencies)
|
||||
if [ "${prev}" = "${subcommand}" ]; then
|
||||
COMPREPLY=( $(compgen -W "--app" -- ${cur}) )
|
||||
elif [ "${prev}" = "--app" ]; then
|
||||
COMPREPLY=
|
||||
else
|
||||
local patches=$(software completion patches 2>/dev/null)
|
||||
COMPREPLY=( $(compgen -W "${patches}" -- ${cur}) )
|
||||
fi
|
||||
return 0
|
||||
;;
|
||||
query-app-dependencies)
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Provide subcommands for completion
|
||||
COMPREPLY=($(compgen -W "${subcommands}" -- ${cur}))
|
||||
return 0
|
||||
}
|
||||
|
||||
# Bind the above function to the software CLI
|
||||
complete -F _sw -o filenames software
|
||||
|
7
software/service-files/software.conf
Normal file
7
software/service-files/software.conf
Normal file
@ -0,0 +1,7 @@
|
||||
[runtime]
|
||||
controller_multicast = 239.1.1.3
|
||||
agent_multicast = 239.1.1.4
|
||||
api_port = 5493
|
||||
controller_port = 5494
|
||||
agent_port = 5495
|
||||
|
15
software/service-files/software.logrotate
Normal file
15
software/service-files/software.logrotate
Normal file
@ -0,0 +1,15 @@
|
||||
/var/log/software.log
|
||||
/var/log/software-api.log
|
||||
/var/log/software-insvc.log
|
||||
{
|
||||
nodateext
|
||||
size 10M
|
||||
start 1
|
||||
rotate 10
|
||||
missingok
|
||||
notifempty
|
||||
compress
|
||||
delaycompress
|
||||
copytruncate
|
||||
}
|
||||
|
15
software/service-files/software.service
Normal file
15
software/service-files/software.service
Normal file
@ -0,0 +1,15 @@
|
||||
[Unit]
|
||||
Description=Unified Software Management
|
||||
After=syslog.target network-online.target
|
||||
Before=software-agent.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
User=root
|
||||
ExecStart=/etc/init.d/software start
|
||||
RemainAfterExit=yes
|
||||
StandardOutput=journal+console
|
||||
StandardError=journal+console
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
28
software/service-files/software_check_goenabled.sh
Normal file
28
software/service-files/software_check_goenabled.sh
Normal file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Software "goenabled" check.
|
||||
# If a minor software release version has been applied on this node,
|
||||
# it is now out-of-date and should be rebooted.
|
||||
|
||||
NAME=$(basename $0)
|
||||
SYSTEM_CHANGED_FLAG=/var/run/node_is_patched
|
||||
|
||||
logfile=/var/log/software.log
|
||||
|
||||
function LOG {
|
||||
logger "$NAME: $*"
|
||||
echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile
|
||||
}
|
||||
|
||||
if [ -f $SYSTEM_CHANGED_FLAG ]; then
|
||||
LOG "Node has been patched. Failing goenabled check."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
@ -24,8 +24,9 @@ packages =
|
||||
|
||||
[entry_points]
|
||||
console_scripts =
|
||||
software = software.cmd.shell:main
|
||||
software-api = software.cmd.api:main
|
||||
software = software.software_client:main
|
||||
software-controller-daemon = software.software_controller:main
|
||||
software-agent = software.software_agent:main
|
||||
|
||||
[wheel]
|
||||
universal = 1
|
||||
|
@ -17,7 +17,7 @@ def get_pecan_config():
|
||||
cfg_dict = {
|
||||
# todo(abailey): add server defaults to config
|
||||
"server": {
|
||||
"port": "5490",
|
||||
"port": "5496",
|
||||
"host": "127.0.0.1"
|
||||
},
|
||||
"app": {
|
||||
@ -52,3 +52,12 @@ def setup_app(pecan_config=None):
|
||||
guess_content_type_from_ext=pecan_config.app.guess_content_type_from_ext
|
||||
)
|
||||
return app
|
||||
|
||||
|
||||
class VersionSelectorApplication(object):
|
||||
def __init__(self):
|
||||
pc = get_pecan_config()
|
||||
self.v1 = setup_app(pecan_config=pc)
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
return self.v1(environ, start_response)
|
||||
|
@ -6,6 +6,36 @@ SPDX-License-Identifier: Apache-2.0
|
||||
"""
|
||||
from pecan import expose
|
||||
|
||||
from software.exceptions import PatchError
|
||||
from software.software_controller import pc
|
||||
|
||||
|
||||
class PatchAPIController(object):
|
||||
|
||||
@expose('json')
|
||||
@expose('query.xml', content_type='application/xml')
|
||||
def host_install_async(self, *args):
|
||||
if len(list(args)) == 0:
|
||||
return dict(error="Host must be specified for install")
|
||||
force = False
|
||||
if len(list(args)) > 1 and 'force' in list(args)[1:]:
|
||||
force = True
|
||||
|
||||
try:
|
||||
result = pc.patch_host_install(list(args)[0], force, async_req=True)
|
||||
except PatchError as e:
|
||||
return dict(error="Error: %s" % str(e))
|
||||
|
||||
return result
|
||||
|
||||
@expose('json')
|
||||
def is_applied(self, *args):
|
||||
return pc.is_applied(list(args))
|
||||
|
||||
@expose('json')
|
||||
def is_available(self, *args):
|
||||
return pc.is_available(list(args))
|
||||
|
||||
|
||||
class RootController:
|
||||
"""pecan REST API root"""
|
||||
@ -14,4 +44,7 @@ class RootController:
|
||||
@expose('json')
|
||||
def index(self):
|
||||
"""index for the root"""
|
||||
return {}
|
||||
return "Unified Software Management API, Available versions: /v1"
|
||||
|
||||
patch = PatchAPIController()
|
||||
v1 = PatchAPIController()
|
||||
|
25
software/software/authapi/__init__.py
Executable file
25
software/software/authapi/__init__.py
Executable file
@ -0,0 +1,25 @@
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
API_SERVICE_OPTS = [
|
||||
cfg.StrOpt('auth_api_bind_ip',
|
||||
default=None,
|
||||
help='IP for the authenticated Unified Software Management API server to bind to'),
|
||||
cfg.IntOpt('auth_api_port',
|
||||
default=5497,
|
||||
help='The port for the authenticated Unified Software Management API server'),
|
||||
cfg.IntOpt('api_limit_max',
|
||||
default=1000,
|
||||
help='the maximum number of items returned in a single '
|
||||
'response from a collection resource')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
opt_group = cfg.OptGroup(name='api',
|
||||
title='Options for the patch-api service')
|
||||
CONF.register_group(opt_group)
|
||||
CONF.register_opts(API_SERVICE_OPTS)
|
27
software/software/authapi/acl.py
Executable file
27
software/software/authapi/acl.py
Executable file
@ -0,0 +1,27 @@
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""Access Control Lists (ACL's) control access the API server."""
|
||||
from software.authapi import auth_token
|
||||
|
||||
OPT_GROUP_NAME = 'keystone_authtoken'
|
||||
OPT_GROUP_PROVIDER = 'keystonemiddleware.auth_token'
|
||||
|
||||
|
||||
def install(app, conf, public_routes):
|
||||
"""Install ACL check on application.
|
||||
|
||||
:param app: A WSGI application.
|
||||
:param conf: Settings. Must include OPT_GROUP_NAME section.
|
||||
:param public_routes: The list of the routes which will be allowed
|
||||
access without authentication.
|
||||
:return: The same WSGI application with ACL installed.
|
||||
|
||||
"""
|
||||
keystone_config = dict(conf.get(OPT_GROUP_NAME))
|
||||
return auth_token.AuthTokenMiddleware(app,
|
||||
conf=keystone_config,
|
||||
public_api_routes=public_routes)
|
74
software/software/authapi/app.py
Executable file
74
software/software/authapi/app.py
Executable file
@ -0,0 +1,74 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
import pecan
|
||||
|
||||
from software.authapi import acl
|
||||
from software.authapi import config
|
||||
from software.authapi import hooks
|
||||
from software.authapi import policy
|
||||
|
||||
auth_opts = [
|
||||
cfg.StrOpt('auth_strategy',
|
||||
default='keystone',
|
||||
help='Method to use for auth: noauth or keystone.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(auth_opts)
|
||||
|
||||
|
||||
def get_pecan_config():
|
||||
# Set up the pecan configuration
|
||||
filename = config.__file__.replace('.pyc', '.py')
|
||||
return pecan.configuration.conf_from_file(filename)
|
||||
|
||||
|
||||
def setup_app(pecan_config=None, extra_hooks=None):
|
||||
policy.init()
|
||||
|
||||
app_hooks = [hooks.ConfigHook(),
|
||||
hooks.ContextHook(pecan_config.app.acl_public_routes),
|
||||
]
|
||||
if extra_hooks:
|
||||
app_hooks.extend(extra_hooks)
|
||||
|
||||
if not pecan_config:
|
||||
pecan_config = get_pecan_config()
|
||||
|
||||
if pecan_config.app.enable_acl:
|
||||
app_hooks.append(hooks.AccessPolicyHook())
|
||||
|
||||
pecan.configuration.set_config(dict(pecan_config), overwrite=True)
|
||||
|
||||
app = pecan.make_app(
|
||||
pecan_config.app.root,
|
||||
static_root=pecan_config.app.static_root,
|
||||
template_path=pecan_config.app.template_path,
|
||||
debug=False,
|
||||
force_canonical=getattr(pecan_config.app, 'force_canonical', True),
|
||||
hooks=app_hooks,
|
||||
guess_content_type_from_ext=False, # Avoid mime-type lookup
|
||||
)
|
||||
|
||||
# config_parser must contain the keystone_auth
|
||||
if pecan_config.app.enable_acl:
|
||||
CONF.import_group(acl.OPT_GROUP_NAME, acl.OPT_GROUP_PROVIDER)
|
||||
return acl.install(app, CONF, pecan_config.app.acl_public_routes)
|
||||
|
||||
return app
|
||||
|
||||
|
||||
class VersionSelectorApplication(object):
|
||||
def __init__(self):
|
||||
pc = get_pecan_config()
|
||||
pc.app.enable_acl = (CONF.auth_strategy == 'keystone')
|
||||
self.v1 = setup_app(pecan_config=pc)
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
return self.v1(environ, start_response)
|
44
software/software/authapi/auth_token.py
Executable file
44
software/software/authapi/auth_token.py
Executable file
@ -0,0 +1,44 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from keystonemiddleware import auth_token
|
||||
from software import utils
|
||||
|
||||
|
||||
class AuthTokenMiddleware(auth_token.AuthProtocol):
|
||||
"""A wrapper on Keystone auth_token middleware.
|
||||
|
||||
Does not perform verification of authentication tokens
|
||||
for public routes in the API.
|
||||
|
||||
"""
|
||||
def __init__(self, app, conf, public_api_routes=None):
|
||||
if public_api_routes is None:
|
||||
public_api_routes = []
|
||||
|
||||
self.public_api_routes = set(public_api_routes)
|
||||
|
||||
super(AuthTokenMiddleware, self).__init__(app, conf)
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
path = utils.safe_rstrip(env.get('PATH_INFO'), '/')
|
||||
|
||||
if path in self.public_api_routes:
|
||||
return self.app(env, start_response) # pylint: disable=no-member
|
||||
|
||||
return super(AuthTokenMiddleware, self).__call__(env, start_response) # pylint: disable=too-many-function-args
|
23
software/software/authapi/config.py
Executable file
23
software/software/authapi/config.py
Executable file
@ -0,0 +1,23 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
# Server Specific Configurations
|
||||
server = {
|
||||
'port': '5497',
|
||||
'host': '0.0.0.0'
|
||||
}
|
||||
|
||||
# Pecan Application Configurations
|
||||
app = {
|
||||
'root': 'software.api.controllers.root.RootController',
|
||||
'modules': ['software.api'],
|
||||
'static_root': '%(confdir)s/public',
|
||||
'template_path': '%(confdir)s/../templates',
|
||||
'debug': False,
|
||||
'enable_acl': True,
|
||||
'acl_public_routes': [],
|
||||
}
|
40
software/software/authapi/context.py
Normal file
40
software/software/authapi/context.py
Normal file
@ -0,0 +1,40 @@
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from oslo_context import context
|
||||
|
||||
|
||||
# Patching calls into fault. so only FM service type
|
||||
# needs to be preserved in the service catalog
|
||||
REQUIRED_SERVICE_TYPES = ('faultmanagement',)
|
||||
|
||||
|
||||
class RequestContext(context.RequestContext):
|
||||
"""Extends security contexts from the OpenStack common library."""
|
||||
|
||||
def __init__(self, is_public_api=False, service_catalog=None, **kwargs):
|
||||
"""Stores several additional request parameters:
|
||||
"""
|
||||
super(RequestContext, self).__init__(**kwargs)
|
||||
self.is_public_api = is_public_api
|
||||
if service_catalog:
|
||||
# Only include required parts of service_catalog
|
||||
self.service_catalog = [s for s in service_catalog
|
||||
if s.get('type') in REQUIRED_SERVICE_TYPES]
|
||||
else:
|
||||
# if list is empty or none
|
||||
self.service_catalog = []
|
||||
|
||||
def to_dict(self):
|
||||
value = super(RequestContext, self).to_dict()
|
||||
value.update({'is_public_api': self.is_public_api,
|
||||
'project_name': self.project_name,
|
||||
'service_catalog': self.service_catalog})
|
||||
return value
|
||||
|
||||
|
||||
def make_context(*args, **kwargs):
|
||||
return RequestContext(*args, **kwargs)
|
134
software/software/authapi/hooks.py
Executable file
134
software/software/authapi/hooks.py
Executable file
@ -0,0 +1,134 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright © 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Author: Doug Hellmann <doug.hellmann@dreamhost.com> # noqa: H105
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
from pecan import hooks
|
||||
from webob import exc
|
||||
|
||||
from software.authapi.context import RequestContext
|
||||
from software.authapi import policy
|
||||
from software import utils
|
||||
|
||||
|
||||
class ConfigHook(hooks.PecanHook):
|
||||
"""Attach the config object to the request so controllers can get to it."""
|
||||
|
||||
def before(self, state):
|
||||
state.request.cfg = cfg.CONF
|
||||
|
||||
|
||||
class ContextHook(hooks.PecanHook):
|
||||
"""Configures a request context and attaches it to the request.
|
||||
|
||||
The following HTTP request headers are used:
|
||||
|
||||
X-User-Id or X-User:
|
||||
Used for context.user_id.
|
||||
|
||||
X-Tenant-Id or X-Tenant:
|
||||
Used for context.tenant.
|
||||
|
||||
X-Auth-Token:
|
||||
Used for context.auth_token.
|
||||
|
||||
X-Roles:
|
||||
Used for setting context.is_admin flag to either True or False.
|
||||
The flag is set to True, if X-Roles contains either an administrator
|
||||
or admin substring. Otherwise it is set to False.
|
||||
|
||||
X-Project-Name:
|
||||
Used for context.project_name.
|
||||
|
||||
"""
|
||||
def __init__(self, public_api_routes):
|
||||
self.public_api_routes = public_api_routes
|
||||
super(ContextHook, self).__init__()
|
||||
|
||||
def before(self, state):
|
||||
user_id = state.request.headers.get('X-User-Id')
|
||||
user_id = state.request.headers.get('X-User', user_id)
|
||||
tenant = state.request.headers.get('X-Tenant-Id')
|
||||
tenant = state.request.headers.get('X-Tenant', tenant)
|
||||
project_name = state.request.headers.get('X-Project-Name')
|
||||
domain_id = state.request.headers.get('X-User-Domain-Id')
|
||||
domain_name = state.request.headers.get('X-User-Domain-Name')
|
||||
auth_token = state.request.headers.get('X-Auth-Token', None)
|
||||
roles = state.request.headers.get('X-Roles', '').split(',')
|
||||
catalog_header = state.request.headers.get('X-Service-Catalog')
|
||||
service_catalog = None
|
||||
if catalog_header:
|
||||
try:
|
||||
service_catalog = jsonutils.loads(catalog_header)
|
||||
except ValueError:
|
||||
raise exc.HTTPInternalServerError(
|
||||
'Invalid service catalog json.')
|
||||
|
||||
credentials = {
|
||||
'project_name': project_name,
|
||||
'roles': roles
|
||||
}
|
||||
is_admin = policy.authorize('admin_in_system_projects', {},
|
||||
credentials, do_raise=False)
|
||||
|
||||
path = utils.safe_rstrip(state.request.path, '/')
|
||||
is_public_api = path in self.public_api_routes
|
||||
|
||||
state.request.context = RequestContext(
|
||||
auth_token=auth_token,
|
||||
user=user_id,
|
||||
tenant=tenant,
|
||||
domain_id=domain_id,
|
||||
domain_name=domain_name,
|
||||
is_admin=is_admin,
|
||||
is_public_api=is_public_api,
|
||||
project_name=project_name,
|
||||
roles=roles,
|
||||
service_catalog=service_catalog)
|
||||
|
||||
|
||||
class AccessPolicyHook(hooks.PecanHook):
|
||||
"""Verify that the user has the needed credentials
|
||||
to execute the action.
|
||||
"""
|
||||
def before(self, state):
|
||||
context = state.request.context
|
||||
if not context.is_public_api:
|
||||
controller = state.controller.__self__
|
||||
if hasattr(controller, 'enforce_policy'):
|
||||
try:
|
||||
controller_method = state.controller.__name__
|
||||
controller.enforce_policy(controller_method, state.request)
|
||||
except Exception:
|
||||
raise exc.HTTPForbidden()
|
||||
else:
|
||||
method = state.request.method
|
||||
if method == 'GET':
|
||||
has_api_access = policy.authorize(
|
||||
'reader_in_system_projects', {},
|
||||
context.to_dict(), do_raise=False)
|
||||
else:
|
||||
has_api_access = policy.authorize(
|
||||
'admin_in_system_projects', {},
|
||||
context.to_dict(), do_raise=False)
|
||||
if not has_api_access:
|
||||
raise exc.HTTPForbidden()
|
83
software/software/authapi/policy.py
Executable file
83
software/software/authapi/policy.py
Executable file
@ -0,0 +1,83 @@
|
||||
#
|
||||
# Copyright (c) 2011 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""Policy Engine For Unified Software Management."""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_policy import policy
|
||||
|
||||
|
||||
base_rules = [
|
||||
policy.RuleDefault('admin_in_system_projects',
|
||||
'role:admin and (project_name:admin or ' +
|
||||
'project_name:services)',
|
||||
description='Admin user in system projects.'),
|
||||
policy.RuleDefault('reader_in_system_projects',
|
||||
'role:reader and (project_name:admin or ' +
|
||||
'project_name:services)',
|
||||
description='Reader user in system projects.'),
|
||||
policy.RuleDefault('default', 'rule:admin_in_system_projects',
|
||||
description='Default rule.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
_ENFORCER = None
|
||||
|
||||
|
||||
def init(policy_file=None, rules=None,
|
||||
default_rule=None, use_conf=True, overwrite=True):
|
||||
"""Init an Enforcer class.
|
||||
|
||||
oslo policy supports change policy rule dynamically.
|
||||
policy.enforce will reload the policy rules if it detects
|
||||
the policy files have been touched.
|
||||
|
||||
:param policy_file: Custom policy file to use, if none is
|
||||
specified, ``conf.policy_file`` will be
|
||||
used.
|
||||
:param rules: Default dictionary / Rules to use. It will be
|
||||
considered just in the first instantiation. If
|
||||
:meth:`load_rules` with ``force_reload=True``,
|
||||
:meth:`clear` or :meth:`set_rules` with
|
||||
``overwrite=True`` is called this will be overwritten.
|
||||
:param default_rule: Default rule to use, conf.default_rule will
|
||||
be used if none is specified.
|
||||
:param use_conf: Whether to load rules from cache or config file.
|
||||
:param overwrite: Whether to overwrite existing rules when reload rules
|
||||
from config file.
|
||||
"""
|
||||
global _ENFORCER
|
||||
if not _ENFORCER:
|
||||
# https://docs.openstack.org/oslo.policy/latest/user/usage.html
|
||||
_ENFORCER = policy.Enforcer(CONF,
|
||||
policy_file=policy_file,
|
||||
rules=rules,
|
||||
default_rule=default_rule,
|
||||
use_conf=use_conf,
|
||||
overwrite=overwrite)
|
||||
_ENFORCER.register_defaults(base_rules)
|
||||
return _ENFORCER
|
||||
|
||||
|
||||
def authorize(rule, target, creds, do_raise=True):
|
||||
"""A wrapper around 'authorize' from 'oslo_policy.policy'."""
|
||||
init()
|
||||
return _ENFORCER.authorize(rule, target, creds, do_raise=do_raise)
|
171
software/software/base.py
Normal file
171
software/software/base.py
Normal file
@ -0,0 +1,171 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
import socket
|
||||
import struct
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
import software.utils as utils
|
||||
import software.software_config as cfg
|
||||
import software.constants as constants
|
||||
from software.software_functions import LOG
|
||||
|
||||
|
||||
class PatchService(object):
|
||||
def __init__(self):
|
||||
self.sock_out = None
|
||||
self.sock_in = None
|
||||
self.service_type = None
|
||||
self.port = None
|
||||
self.mcast_addr = None
|
||||
self.socket_lock = None
|
||||
|
||||
def update_config(self):
|
||||
# Implemented in subclass
|
||||
pass
|
||||
|
||||
def socket_lock_acquire(self):
|
||||
pass
|
||||
|
||||
def socket_lock_release(self):
|
||||
pass
|
||||
|
||||
def setup_socket_ipv4(self):
|
||||
mgmt_ip = cfg.get_mgmt_ip()
|
||||
if mgmt_ip is None:
|
||||
# Don't setup socket unless we have a mgmt ip
|
||||
return None
|
||||
|
||||
self.update_config()
|
||||
|
||||
interface_addr = socket.inet_pton(socket.AF_INET, mgmt_ip)
|
||||
|
||||
# Close sockets, if necessary
|
||||
for s in [self.sock_out, self.sock_in]:
|
||||
if s is not None:
|
||||
s.close()
|
||||
|
||||
self.sock_out = socket.socket(socket.AF_INET,
|
||||
socket.SOCK_DGRAM)
|
||||
self.sock_in = socket.socket(socket.AF_INET,
|
||||
socket.SOCK_DGRAM)
|
||||
|
||||
self.sock_out.setblocking(0)
|
||||
self.sock_in.setblocking(0)
|
||||
|
||||
self.sock_out.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
self.sock_in.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
|
||||
self.sock_in.bind(('', self.port))
|
||||
|
||||
if self.mcast_addr:
|
||||
# These options are for outgoing multicast messages
|
||||
self.sock_out.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, interface_addr)
|
||||
self.sock_out.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)
|
||||
# Since only the controllers are sending to this address,
|
||||
# we want the loopback so the local agent can receive it
|
||||
self.sock_out.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
|
||||
|
||||
# Register the multicast group
|
||||
group = socket.inet_pton(socket.AF_INET, self.mcast_addr)
|
||||
mreq = struct.pack('=4s4s', group, interface_addr)
|
||||
|
||||
self.sock_in.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
|
||||
|
||||
return self.sock_in
|
||||
|
||||
def setup_socket_ipv6(self):
|
||||
mgmt_ip = cfg.get_mgmt_ip()
|
||||
if mgmt_ip is None:
|
||||
# Don't setup socket unless we have a mgmt ip
|
||||
return None
|
||||
|
||||
self.update_config()
|
||||
|
||||
# Close sockets, if necessary
|
||||
for s in [self.sock_out, self.sock_in]:
|
||||
if s is not None:
|
||||
s.close()
|
||||
|
||||
self.sock_out = socket.socket(socket.AF_INET6,
|
||||
socket.SOCK_DGRAM)
|
||||
self.sock_in = socket.socket(socket.AF_INET6,
|
||||
socket.SOCK_DGRAM)
|
||||
|
||||
self.sock_out.setblocking(0)
|
||||
self.sock_in.setblocking(0)
|
||||
|
||||
self.sock_out.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
self.sock_in.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
|
||||
self.sock_out.bind((mgmt_ip, 0))
|
||||
self.sock_in.bind(('', self.port))
|
||||
|
||||
if self.mcast_addr:
|
||||
# These options are for outgoing multicast messages
|
||||
mgmt_ifindex = utils.if_nametoindex(cfg.get_mgmt_iface())
|
||||
self.sock_out.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, mgmt_ifindex)
|
||||
self.sock_out.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 1)
|
||||
# Since only the controllers are sending to this address,
|
||||
# we want the loopback so the local agent can receive it
|
||||
self.sock_out.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, 1)
|
||||
|
||||
# Register the multicast group
|
||||
if_index_packed = struct.pack('I', mgmt_ifindex)
|
||||
group = socket.inet_pton(socket.AF_INET6, self.mcast_addr) + if_index_packed
|
||||
self.sock_in.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, group)
|
||||
|
||||
return self.sock_in
|
||||
|
||||
def setup_socket(self):
|
||||
self.socket_lock_acquire()
|
||||
|
||||
try:
|
||||
sock_in = None
|
||||
if utils.get_management_version() == constants.ADDRESS_VERSION_IPV6:
|
||||
sock_in = self.setup_socket_ipv6()
|
||||
else:
|
||||
sock_in = self.setup_socket_ipv4()
|
||||
self.socket_lock_release()
|
||||
return sock_in
|
||||
except Exception:
|
||||
LOG.exception("Failed to setup socket")
|
||||
|
||||
# Close sockets, if necessary
|
||||
for s in [self.sock_out, self.sock_in]:
|
||||
if s is not None:
|
||||
s.close()
|
||||
|
||||
self.socket_lock_release()
|
||||
|
||||
return None
|
||||
|
||||
def audit_socket(self):
|
||||
if not self.mcast_addr:
|
||||
# Multicast address not configured, therefore nothing to do
|
||||
return
|
||||
|
||||
# Ensure multicast address is still allocated
|
||||
cmd = "ip maddr show %s | awk 'BEGIN {ORS=\"\"}; {if ($2 == \"%s\") print $2}'" % \
|
||||
(cfg.get_mgmt_iface(), self.mcast_addr)
|
||||
try:
|
||||
result = subprocess.check_output(cmd, shell=True).decode(sys.stdout.encoding)
|
||||
|
||||
if result == self.mcast_addr:
|
||||
return
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.error("Command output: %s", e.output)
|
||||
return
|
||||
|
||||
# Close the socket and set it up again
|
||||
LOG.info("Detected missing multicast addr (%s). Reconfiguring", self.mcast_addr)
|
||||
while self.setup_socket() is None:
|
||||
LOG.info("Unable to setup sockets. Waiting to retry")
|
||||
time.sleep(5)
|
||||
LOG.info("Multicast address reconfigured")
|
51
software/software/certificates.py
Normal file
51
software/software/certificates.py
Normal file
@ -0,0 +1,51 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
dev_certificate = b"""-----BEGIN CERTIFICATE-----
|
||||
MIIDejCCAmKgAwIBAgICEAQwDQYJKoZIhvcNAQELBQAwQjELMAkGA1UEBhMCQ0Ex
|
||||
EDAOBgNVBAgMB09udGFyaW8xITAfBgNVBAoMGFdpbmQgUml2ZXIgU3lzdGVtcywg
|
||||
SW5jLjAeFw0xNzA4MTgxNDM3MjlaFw0yNzA4MTYxNDM3MjlaMEExCzAJBgNVBAYT
|
||||
AkNBMRAwDgYDVQQIDAdPbnRhcmlvMSAwHgYDVQQKDBdXaW5kIFJpdmVyIFN5c3Rl
|
||||
bXMsIEluYzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALcs0/Te6x69
|
||||
lxQOxudrF+uSC5F9r5bKUnZNWUKHyXKlN4SzZgWGs+fb/DqXIm7piuoQ6GH7GEQd
|
||||
BEN1j/bwp30LZlv0Ur+8jhCvEdqsIP3vUXfv7pv0bomVs0Q8ZRI/FYZhjxYlyFKr
|
||||
gZFV9WPP8S9SwfClHjaYRUudvwvjHHnnnkZ9blVFbXU0Xe83A8fWd0HNqAU1TlmK
|
||||
4CeSi4FI4aRKiXJnOvgv2UoJMI57rBIVKYRUH8uuFpPofOwjOM/Rd6r3Ir+4/CX6
|
||||
+/NALOBIEN6M05ZzoiyiH8NHELknQBqzNs0cXObJWpaSinAOcBnPCc7DNRwgQzjR
|
||||
SdcE9FG1+LcCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3Bl
|
||||
blNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFDRbal2KxU0hQyv4
|
||||
MVnWrW96+aWoMB8GA1UdIwQYMBaAFJaLO1x8+jti7V6pLGbUyqpy0M36MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQBmcPFZzEoPtuMPCFvJ/0cmngp8yvCGxWz3JEDkdGYSCVGs
|
||||
TG5e9DeltaHOk6yLvZSRY1so30GQnyB9q8v4DwEGVslKg8u9w/WEU81wl6Q2FZ5s
|
||||
XRP6TASQ0Lbg9e4b3bnTITJJ8jT/zF29NaohgC2fg0UwVuldZLfa7FihJB4//OC1
|
||||
UdNEcmdqTVRqN2oco1n3ZUWKXvG2AvGsoiqu+lsWX1MXacoFvJexSACLrUvOoXMW
|
||||
i38Ofp7XMCAm3rM0cXv7Uc9WCrgnTWbEvDgjGfRAmcM9moWGoWX6E46Xkojpkfle
|
||||
Ss6CHAMK42aZ/+MWQlZEzNK49PtomGMjn5SuoK8u
|
||||
-----END CERTIFICATE-----"""
|
||||
|
||||
formal_certificate = b"""-----BEGIN CERTIFICATE-----
|
||||
MIIDezCCAmOgAwIBAgICEAMwDQYJKoZIhvcNAQELBQAwQjELMAkGA1UEBhMCQ0Ex
|
||||
EDAOBgNVBAgMB09udGFyaW8xITAfBgNVBAoMGFdpbmQgUml2ZXIgU3lzdGVtcywg
|
||||
SW5jLjAeFw0xNzA4MTgxNDM1MTJaFw0yNzA4MTYxNDM1MTJaMEIxCzAJBgNVBAYT
|
||||
AkNBMRAwDgYDVQQIDAdPbnRhcmlvMSEwHwYDVQQKDBhXaW5kIFJpdmVyIFN5c3Rl
|
||||
bXMsIEluYy4wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC+0fS8ybg8
|
||||
M37lW+lcR9LmQAR2zUJdbnl2L0fj3W/7W+PMm3mJWeQDTf19wf+qHHrgEkjxGp10
|
||||
BSXWZYdPyCdOjAay/Ew1s/waFeAQZpf4vv/9D1Y/4sVkqct9ibo5NVgvVsjqKVnX
|
||||
IVhyzHlhBSUqYhZlS/SOx8JcLQWSUMJoP2XR4Tv28xIXi0Fuyp8QBwUmSwmvfPy4
|
||||
0yxzfON/b8kHld5aTY353KLXh/5YWsn1zRlOYfS1OuJk4LGjm6HvmZtxPNUZk4vI
|
||||
NA24rH4FKkuxyM3x8aPi3LE4G6GSrJDuNi28xzOj864rlFoyLODy/mov1YMR/g4k
|
||||
d3mG6UbRckPxAgMBAAGjezB5MAkGA1UdEwQCMAAwLAYJYIZIAYb4QgENBB8WHU9w
|
||||
ZW5TU0wgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBTjyMN/AX07rEmB
|
||||
6sz6pnyt/m+eSzAfBgNVHSMEGDAWgBSWiztcfPo7Yu1eqSxm1MqqctDN+jANBgkq
|
||||
hkiG9w0BAQsFAAOCAQEASpyCu/adGTvNjyy/tV+sL/kaVEKLA7q36HUrzQkTjMPX
|
||||
y8L8PVZoeWprkz7cvYTyHmVTPLBvFkGEFVn8LWi9fTTp/UrHnxw6fvb+V78mOypi
|
||||
4A1aU9+dh3L6arpd4jZ4hDiLhEClesGCYVTVBdsrh3zSOc51nT4hosyBVpRd/VgQ
|
||||
jhGJBBMEXASZceady4ajK5jnR3wF8oW/he4NYF97qh8WWKVsIYbwgLS0rT58q7qq
|
||||
vpjPxMOahUdACkyPyt/XJICTlkanVD7KgG3oLWpc+3FWPHGr+F7mspPLZqUcEFDV
|
||||
bGF+oDJ7p/tqHsNvPlRDVGqh0QdiAkKeS/SJC9jmAw==
|
||||
-----END CERTIFICATE-----
|
||||
"""
|
@ -18,7 +18,7 @@ from software.api.app import setup_app
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# todo(abailey): these need to be part of config
|
||||
API_PORT = 5490
|
||||
API_PORT = 5496
|
||||
# Limit socket blocking to 5 seconds to allow for thread to shutdown
|
||||
API_SOCKET_TIMEOUT = 5.0
|
||||
|
||||
|
56
software/software/constants.py
Normal file
56
software/software/constants.py
Normal file
@ -0,0 +1,56 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
try:
|
||||
# The tsconfig module is only available at runtime
|
||||
import tsconfig.tsconfig as tsc
|
||||
|
||||
INITIAL_CONFIG_COMPLETE_FLAG = os.path.join(
|
||||
tsc.PLATFORM_CONF_PATH, ".initial_config_complete")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
CLI_OPT_ALL = '--all'
|
||||
CLI_OPT_DRY_RUN = '--dry-run'
|
||||
CLI_OPT_RECURSIVE = '--recursive'
|
||||
CLI_OPT_RELEASE = '--release'
|
||||
|
||||
ADDRESS_VERSION_IPV4 = 4
|
||||
ADDRESS_VERSION_IPV6 = 6
|
||||
CONTROLLER_FLOATING_HOSTNAME = "controller"
|
||||
|
||||
AVAILABLE = 'Available'
|
||||
APPLIED = 'Applied'
|
||||
PARTIAL_APPLY = 'Partial-Apply'
|
||||
PARTIAL_REMOVE = 'Partial-Remove'
|
||||
COMMITTED = 'Committed'
|
||||
UNKNOWN = 'n/a'
|
||||
|
||||
STATUS_OBSOLETE = 'OBS'
|
||||
STATUS_RELEASED = 'REL'
|
||||
STATUS_DEVELOPEMENT = 'DEV'
|
||||
|
||||
PATCH_AGENT_STATE_IDLE = "idle"
|
||||
PATCH_AGENT_STATE_INSTALLING = "installing"
|
||||
PATCH_AGENT_STATE_INSTALL_FAILED = "install-failed"
|
||||
PATCH_AGENT_STATE_INSTALL_REJECTED = "install-rejected"
|
||||
|
||||
PATCH_STORAGE_DIR = "/opt/software"
|
||||
|
||||
OSTREE_REF = "starlingx"
|
||||
OSTREE_REMOTE = "debian"
|
||||
FEED_OSTREE_BASE_DIR = "/var/www/pages/feed"
|
||||
SYSROOT_OSTREE = "/sysroot/ostree/repo"
|
||||
OSTREE_BASE_DEPLOYMENT_DIR = "/ostree/deploy/debian/deploy/"
|
||||
PATCH_SCRIPTS_STAGING_DIR = "/var/www/pages/updates/software-scripts"
|
||||
|
||||
LOOPBACK_INTERFACE_NAME = "lo"
|
||||
|
||||
SEMANTIC_PREAPPLY = 'pre-apply'
|
||||
SEMANTIC_PREREMOVE = 'pre-remove'
|
||||
SEMANTIC_ACTIONS = [SEMANTIC_PREAPPLY, SEMANTIC_PREREMOVE]
|
67
software/software/exceptions.py
Normal file
67
software/software/exceptions.py
Normal file
@ -0,0 +1,67 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class PatchError(Exception):
|
||||
"""Base class for software exceptions."""
|
||||
|
||||
def __init__(self, message=None):
|
||||
super(PatchError, self).__init__(message)
|
||||
self.message = message
|
||||
|
||||
def __str__(self):
|
||||
return self.message or ""
|
||||
|
||||
|
||||
class MetadataFail(PatchError):
|
||||
"""Metadata error."""
|
||||
pass
|
||||
|
||||
|
||||
class ContentFail(PatchError):
|
||||
"""Content handling error."""
|
||||
pass
|
||||
|
||||
|
||||
class OSTreeTarFail(PatchError):
|
||||
"""OSTree Tarball error."""
|
||||
pass
|
||||
|
||||
|
||||
class OSTreeCommandFail(PatchError):
|
||||
"""OSTree Commands error."""
|
||||
pass
|
||||
|
||||
|
||||
class SemanticFail(PatchError):
|
||||
"""Semantic check error."""
|
||||
pass
|
||||
|
||||
|
||||
class RepoFail(PatchError):
|
||||
"""Repo error."""
|
||||
pass
|
||||
|
||||
|
||||
class PatchFail(PatchError):
|
||||
"""General patching error."""
|
||||
pass
|
||||
|
||||
|
||||
class PatchValidationFailure(PatchError):
|
||||
"""Patch validation error."""
|
||||
pass
|
||||
|
||||
|
||||
class PatchMismatchFailure(PatchError):
|
||||
"""Patch validation error."""
|
||||
pass
|
||||
|
||||
|
||||
class PatchInvalidRequest(PatchError):
|
||||
"""Invalid API request."""
|
||||
pass
|
66
software/software/messages.py
Normal file
66
software/software/messages.py
Normal file
@ -0,0 +1,66 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
from software.software_functions import LOG
|
||||
|
||||
PATCHMSG_UNKNOWN = 0
|
||||
PATCHMSG_HELLO = 1
|
||||
PATCHMSG_HELLO_ACK = 2
|
||||
PATCHMSG_SYNC_REQ = 3
|
||||
PATCHMSG_SYNC_COMPLETE = 4
|
||||
PATCHMSG_HELLO_AGENT = 5
|
||||
PATCHMSG_HELLO_AGENT_ACK = 6
|
||||
PATCHMSG_QUERY_DETAILED = 7
|
||||
PATCHMSG_QUERY_DETAILED_RESP = 8
|
||||
PATCHMSG_AGENT_INSTALL_REQ = 9
|
||||
PATCHMSG_AGENT_INSTALL_RESP = 10
|
||||
PATCHMSG_DROP_HOST_REQ = 11
|
||||
PATCHMSG_SEND_LATEST_FEED_COMMIT = 12
|
||||
|
||||
PATCHMSG_STR = {
|
||||
PATCHMSG_UNKNOWN: "unknown",
|
||||
PATCHMSG_HELLO: "hello",
|
||||
PATCHMSG_HELLO_ACK: "hello-ack",
|
||||
PATCHMSG_SYNC_REQ: "sync-req",
|
||||
PATCHMSG_SYNC_COMPLETE: "sync-complete",
|
||||
PATCHMSG_HELLO_AGENT: "hello-agent",
|
||||
PATCHMSG_HELLO_AGENT_ACK: "hello-agent-ack",
|
||||
PATCHMSG_QUERY_DETAILED: "query-detailed",
|
||||
PATCHMSG_QUERY_DETAILED_RESP: "query-detailed-resp",
|
||||
PATCHMSG_AGENT_INSTALL_REQ: "agent-install-req",
|
||||
PATCHMSG_AGENT_INSTALL_RESP: "agent-install-resp",
|
||||
PATCHMSG_DROP_HOST_REQ: "drop-host-req",
|
||||
PATCHMSG_SEND_LATEST_FEED_COMMIT: "send-latest-feed-commit",
|
||||
}
|
||||
|
||||
|
||||
class PatchMessage(object):
|
||||
def __init__(self, msgtype=PATCHMSG_UNKNOWN):
|
||||
self.msgtype = msgtype
|
||||
self.msgversion = 1
|
||||
self.message = {}
|
||||
|
||||
def decode(self, data):
|
||||
if 'msgtype' in data:
|
||||
self.msgtype = data['msgtype']
|
||||
if 'msgversion' in data:
|
||||
self.msgversion = data['msgversion']
|
||||
|
||||
def encode(self):
|
||||
self.message['msgtype'] = self.msgtype
|
||||
self.message['msgversion'] = self.msgversion
|
||||
|
||||
def data(self):
|
||||
return {'msgtype': self.msgtype}
|
||||
|
||||
def msgtype_str(self):
|
||||
if self.msgtype in PATCHMSG_STR:
|
||||
return PATCHMSG_STR[self.msgtype]
|
||||
return "invalid-type"
|
||||
|
||||
def handle(self, sock, addr): # pylint: disable=unused-argument
|
||||
LOG.info("Unhandled message type: %s", self.msgtype)
|
324
software/software/ostree_utils.py
Normal file
324
software/software/ostree_utils.py
Normal file
@ -0,0 +1,324 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
import logging
|
||||
import sh
|
||||
import subprocess
|
||||
|
||||
from software import constants
|
||||
from software.exceptions import OSTreeCommandFail
|
||||
|
||||
LOG = logging.getLogger('main_logger')
|
||||
|
||||
|
||||
def get_ostree_latest_commit(ostree_ref, repo_path):
|
||||
"""
|
||||
Query ostree using ostree log <ref> --repo=<path>
|
||||
|
||||
:param ostree_ref: the ostree ref.
|
||||
example: starlingx
|
||||
:param repo_path: the path to the ostree repo:
|
||||
example: /var/www/pages/feed/rel-22.06/ostree_repo
|
||||
:return: The most recent commit of the repo
|
||||
"""
|
||||
|
||||
# Sample command and output that is parsed to get the commit
|
||||
#
|
||||
# Command: ostree log starlingx --repo=/var/www/pages/feed/rel-22.02/ostree_repo
|
||||
#
|
||||
# Output:
|
||||
#
|
||||
# commit 478bc21c1702b9b667b5a75fac62a3ef9203cc1767cbe95e89dface6dc7f205e
|
||||
# ContentChecksum: 61fc5bb4398d73027595a4d839daeb404200d0899f6e7cdb24bb8fb6549912ba
|
||||
# Date: 2022-04-28 18:58:57 +0000
|
||||
#
|
||||
# Commit-id: starlingx-intel-x86-64-20220428185802
|
||||
#
|
||||
# commit ad7057a94a1d06e38eaedee2ce3fe56826ae817497469bce5d5ac05bc506aaa7
|
||||
# ContentChecksum: dc42a42427a4f9e4de1210327c12b12ea3ad6a5d232497a903cc6478ca381e8b
|
||||
# Date: 2022-04-28 18:05:43 +0000
|
||||
#
|
||||
# Commit-id: starlingx-intel-x86-64-20220428180512
|
||||
|
||||
cmd = "ostree log %s --repo=%s" % (ostree_ref, repo_path)
|
||||
try:
|
||||
output = subprocess.run(cmd, shell=True, check=True, capture_output=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
info_msg = "OSTree log Error: return code: %s , Output: %s" \
|
||||
% (e.returncode, e.stderr.decode("utf-8"))
|
||||
LOG.info(info_msg)
|
||||
msg = "Failed to fetch ostree log for %s." % repo_path
|
||||
raise OSTreeCommandFail(msg)
|
||||
# Store the output of the above command in a string
|
||||
output_string = output.stdout.decode('utf-8')
|
||||
|
||||
# Parse the string to get the latest commit for the ostree
|
||||
split_output_string = output_string.split()
|
||||
latest_commit = split_output_string[1]
|
||||
return latest_commit
|
||||
|
||||
|
||||
def get_feed_latest_commit(patch_sw_version):
|
||||
"""
|
||||
Query ostree feed using ostree log <ref> --repo=<path>
|
||||
|
||||
:param patch_sw_version: software version for the feed
|
||||
example: 22.06
|
||||
:return: The latest commit for the feed repo
|
||||
"""
|
||||
repo_path = "%s/rel-%s/ostree_repo" % (constants.FEED_OSTREE_BASE_DIR,
|
||||
patch_sw_version)
|
||||
return get_ostree_latest_commit(constants.OSTREE_REF, repo_path)
|
||||
|
||||
|
||||
def get_sysroot_latest_commit():
|
||||
"""
|
||||
Query ostree sysroot to determine the currently active commit
|
||||
:return: The latest commit for sysroot repo
|
||||
"""
|
||||
return get_ostree_latest_commit(constants.OSTREE_REF, constants.SYSROOT_OSTREE)
|
||||
|
||||
|
||||
def get_latest_deployment_commit():
|
||||
"""
|
||||
Get the active deployment commit ID
|
||||
:return: The commit ID associated with the active commit
|
||||
"""
|
||||
|
||||
# Sample command and output that is parsed to get the active commit
|
||||
# associated with the deployment
|
||||
#
|
||||
# Command: ostree admin status
|
||||
#
|
||||
# Output:
|
||||
#
|
||||
# debian 0658a62854647b89caf5c0e9ed6ff62a6c98363ada13701d0395991569248d7e.0 (pending)
|
||||
# origin refspec: starlingx
|
||||
# * debian a5d8f8ca9bbafa85161083e9ca2259ff21e5392b7595a67f3bc7e7ab8cb583d9.0
|
||||
# Unlocked: hotfix
|
||||
# origin refspec: starlingx
|
||||
|
||||
cmd = "ostree admin status"
|
||||
|
||||
try:
|
||||
output = subprocess.run(cmd, shell=True, check=True, capture_output=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = "Failed to fetch ostree admin status."
|
||||
info_msg = "OSTree Admin Status Error: return code: %s , Output: %s" \
|
||||
% (e.returncode, e.stderr.decode("utf-8"))
|
||||
LOG.info(info_msg)
|
||||
raise OSTreeCommandFail(msg)
|
||||
|
||||
# Store the output of the above command in a string
|
||||
output_string = output.stdout.decode('utf-8')
|
||||
|
||||
# Parse the string to get the active commit on this deployment
|
||||
# Trim everything before * as * represents the active deployment commit
|
||||
trimmed_output_string = output_string[output_string.index("*"):]
|
||||
split_output_string = trimmed_output_string.split()
|
||||
active_deployment_commit = split_output_string[2]
|
||||
return active_deployment_commit
|
||||
|
||||
|
||||
def update_repo_summary_file(repo_path):
|
||||
"""
|
||||
Updates the summary file for the specified ostree repo
|
||||
:param repo_path: the path to the ostree repo:
|
||||
example: /var/www/pages/feed/rel-22.06/ostree_repo
|
||||
"""
|
||||
cmd = "ostree summary --update --repo=%s" % repo_path
|
||||
|
||||
try:
|
||||
subprocess.run(cmd, shell=True, check=True, capture_output=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = "Failed to update summary file for ostree repo %s." % (repo_path)
|
||||
info_msg = "OSTree Summary Update Error: return code: %s , Output: %s" \
|
||||
% (e.returncode, e.stderr.decode("utf-8"))
|
||||
LOG.info(info_msg)
|
||||
raise OSTreeCommandFail(msg)
|
||||
|
||||
|
||||
def reset_ostree_repo_head(commit, repo_path):
|
||||
"""
|
||||
Resets the ostree repo HEAD to the commit that is specified
|
||||
:param commit: an existing commit on the ostree repo which we need the HEAD to point to
|
||||
example: 478bc21c1702b9b667b5a75fac62a3ef9203cc1767cbe95e89dface6dc7f205e
|
||||
:param repo_path: the path to the ostree repo:
|
||||
example: /var/www/pages/feed/rel-22.06/ostree_repo
|
||||
"""
|
||||
cmd = "ostree reset %s %s --repo=%s" % (constants.OSTREE_REF, commit, repo_path)
|
||||
try:
|
||||
subprocess.run(cmd, shell=True, check=True, capture_output=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = "Failed to reset head of ostree repo: %s to commit: %s" % (repo_path, commit)
|
||||
info_msg = "OSTree Reset Error: return code: %s , Output: %s" \
|
||||
% (e.returncode, e.stderr.decode("utf-8"))
|
||||
LOG.info(info_msg)
|
||||
raise OSTreeCommandFail(msg)
|
||||
|
||||
|
||||
def pull_ostree_from_remote():
|
||||
"""
|
||||
Pull from remote ostree to sysroot ostree
|
||||
"""
|
||||
|
||||
cmd = "ostree pull %s --depth=-1" % constants.OSTREE_REMOTE
|
||||
|
||||
try:
|
||||
subprocess.run(cmd, shell=True, check=True, capture_output=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = "Failed to pull from %s remote into sysroot ostree" % constants.OSTREE_REMOTE
|
||||
info_msg = "OSTree Pull Error: return code: %s , Output: %s" \
|
||||
% (e.returncode, e.stderr.decode("utf-8"))
|
||||
LOG.info(info_msg)
|
||||
raise OSTreeCommandFail(msg)
|
||||
|
||||
|
||||
def delete_ostree_repo_commit(commit, repo_path):
|
||||
"""
|
||||
Delete the specified commit from the ostree repo
|
||||
:param commit: an existing commit on the ostree repo which we need to delete
|
||||
example: 478bc21c1702b9b667b5a75fac62a3ef9203cc1767cbe95e89dface6dc7f205e
|
||||
:param repo_path: the path to the ostree repo:
|
||||
example: /var/www/pages/feed/rel-22.06/ostree_repo
|
||||
"""
|
||||
|
||||
cmd = "ostree prune --delete-commit %s --repo=%s" % (commit, repo_path)
|
||||
try:
|
||||
subprocess.run(cmd, shell=True, check=True, capture_output=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = "Failed to delete commit %s from ostree repo %s" % (commit, repo_path)
|
||||
info_msg = "OSTree Delete Commit Error: return code: %s , Output: %s" \
|
||||
% (e.returncode, e.stderr.decode("utf-8"))
|
||||
LOG.info(info_msg)
|
||||
raise OSTreeCommandFail(msg)
|
||||
|
||||
|
||||
def create_deployment():
|
||||
"""
|
||||
Create a new deployment while retaining the previous ones
|
||||
"""
|
||||
|
||||
cmd = "ostree admin deploy %s --no-prune --retain" % constants.OSTREE_REF
|
||||
try:
|
||||
subprocess.run(cmd, shell=True, check=True, capture_output=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = "Failed to create an ostree deployment for sysroot ref %s." % constants.OSTREE_REF
|
||||
info_msg = "OSTree Deployment Error: return code: %s , Output: %s" \
|
||||
% (e.returncode, e.stderr.decode("utf-8"))
|
||||
LOG.info(info_msg)
|
||||
raise OSTreeCommandFail(msg)
|
||||
|
||||
|
||||
def fetch_pending_deployment():
|
||||
"""
|
||||
Fetch the deployment ID of the pending deployment
|
||||
:return: The deployment ID of the pending deployment
|
||||
"""
|
||||
|
||||
cmd = "ostree admin status | grep pending |awk '{printf $2}'"
|
||||
|
||||
try:
|
||||
output = subprocess.run(cmd, shell=True, check=True, capture_output=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = "Failed to fetch ostree admin status."
|
||||
info_msg = "OSTree Admin Status Error: return code: %s , Output: %s" \
|
||||
% (e.returncode, e.stderr.decode("utf-8"))
|
||||
LOG.info(info_msg)
|
||||
raise OSTreeCommandFail(msg)
|
||||
|
||||
# Store the output of the above command in a string
|
||||
pending_deployment = output.stdout.decode('utf-8')
|
||||
|
||||
return pending_deployment
|
||||
|
||||
|
||||
def mount_new_deployment(deployment_dir):
|
||||
"""
|
||||
Unmount /usr and /etc from the file system and remount it to directory
|
||||
<depoyment_dir>/usr and <depoyment_dir>/etc respectively
|
||||
:param deployment_dir: a path on the filesystem which points to the pending
|
||||
deployment
|
||||
example: /ostree/deploy/debian/deploy/<deployment_id>
|
||||
"""
|
||||
try:
|
||||
new_usr_mount_dir = "%s/usr" % (deployment_dir)
|
||||
new_etc_mount_dir = "%s/etc" % (deployment_dir)
|
||||
sh.mount("--bind", "-o", "ro,noatime", new_usr_mount_dir, "/usr")
|
||||
sh.mount("--bind", "-o", "rw,noatime", new_etc_mount_dir, "/etc")
|
||||
except sh.ErrorReturnCode as e:
|
||||
msg = "Failed to re-mount /usr and /etc."
|
||||
info_msg = "OSTree Deployment Mount Error: Output: %s" \
|
||||
% (e.stderr.decode("utf-8"))
|
||||
LOG.info(info_msg)
|
||||
raise OSTreeCommandFail(msg)
|
||||
finally:
|
||||
try:
|
||||
sh.mount("/usr/local/kubernetes/current/stage1")
|
||||
sh.mount("/usr/local/kubernetes/current/stage2")
|
||||
except sh.ErrorReturnCode:
|
||||
msg = "Failed to mount kubernetes. Please manually run these commands:\n" \
|
||||
"sudo mount /usr/local/kubernetes/current/stage1\n" \
|
||||
"sudo mount /usr/local/kubernetes/current/stage2\n"
|
||||
LOG.info(msg)
|
||||
|
||||
|
||||
def delete_older_deployments():
|
||||
"""
|
||||
Delete all older deployments after a reboot to save space
|
||||
"""
|
||||
# Sample command and output that is parsed to get the list of
|
||||
# deployment IDs
|
||||
#
|
||||
# Command: ostree admin status | grep debian
|
||||
#
|
||||
# Output:
|
||||
#
|
||||
# * debian 3334dc80691a38c0ba6c519ec4b4b449f8420e98ac4d8bded3436ade56bb229d.2
|
||||
# debian 3334dc80691a38c0ba6c519ec4b4b449f8420e98ac4d8bded3436ade56bb229d.1 (rollback)
|
||||
# debian 3334dc80691a38c0ba6c519ec4b4b449f8420e98ac4d8bded3436ade56bb229d.0
|
||||
|
||||
LOG.info("Inside delete_older_deployments of ostree_utils")
|
||||
cmd = "ostree admin status | grep debian"
|
||||
|
||||
try:
|
||||
output = subprocess.run(cmd, shell=True, check=True, capture_output=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = "Failed to fetch ostree admin status."
|
||||
info_msg = "OSTree Admin Status Error: return code: %s , Output: %s" \
|
||||
% (e.returncode, e.stderr.decode("utf-8"))
|
||||
LOG.info(info_msg)
|
||||
raise OSTreeCommandFail(msg)
|
||||
|
||||
# Store the output of the above command in a string
|
||||
output_string = output.stdout.decode('utf-8')
|
||||
|
||||
# Parse the string to get the latest commit for the ostree
|
||||
split_output_string = output_string.split()
|
||||
deployment_id_list = []
|
||||
for index, deployment_id in enumerate(split_output_string):
|
||||
if deployment_id == "debian":
|
||||
deployment_id_list.append(split_output_string[index + 1])
|
||||
|
||||
# After a reboot, the deployment ID at the 0th index of the list
|
||||
# is always the active deployment and the deployment ID at the
|
||||
# 1st index of the list is always the fallback deployment.
|
||||
# We want to delete all deployments except the two mentioned above.
|
||||
# This means we will undeploy all deployments starting from the
|
||||
# 2nd index of deployment_id_list
|
||||
|
||||
for index in reversed(range(2, len(deployment_id_list))):
|
||||
try:
|
||||
cmd = "ostree admin undeploy %s" % index
|
||||
output = subprocess.run(cmd, shell=True, check=True, capture_output=True)
|
||||
info_log = "Deleted ostree deployment %s" % deployment_id_list[index]
|
||||
LOG.info(info_log)
|
||||
except subprocess.CalledProcessError as e:
|
||||
msg = "Failed to undeploy ostree deployment %s." % deployment_id_list[index]
|
||||
info_msg = "OSTree Undeploy Error: return code: %s , Output: %s" \
|
||||
% (e.returncode, e.stderr.decode("utf-8"))
|
||||
LOG.info(info_msg)
|
||||
raise OSTreeCommandFail(msg)
|
85
software/software/release_signing.py
Normal file
85
software/software/release_signing.py
Normal file
@ -0,0 +1,85 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
from Cryptodome.Signature import PKCS1_PSS
|
||||
from Cryptodome.Hash import SHA256
|
||||
from software import release_verify
|
||||
|
||||
# To save memory, read and hash 1M of files at a time
|
||||
default_blocksize = 1 * 1024 * 1024
|
||||
|
||||
# When we sign patches, look for private keys in the following paths
|
||||
#
|
||||
# The (currently hardcoded) path on the signing server will be replaced
|
||||
# by the capability to specify filename from calling function.
|
||||
private_key_files = {release_verify.cert_type_formal_str: '/signing/keys/formal-private-key.pem',
|
||||
release_verify.cert_type_dev_str: os.path.expandvars('$MY_REPO/build-tools/signing/dev-private-key.pem')
|
||||
}
|
||||
|
||||
|
||||
def sign_files(filenames, signature_file, private_key=None, cert_type=None):
|
||||
"""
|
||||
Utility function for signing data in files.
|
||||
:param filenames: A list of files containing the data to be signed
|
||||
:param signature_file: The name of the file to which the signature will be
|
||||
stored
|
||||
:param private_key: If specified, sign with this private key. Otherwise,
|
||||
the files in private_key_files will be searched for
|
||||
and used, if found.
|
||||
:param cert_type: If specified, and private_key is not specified, sign
|
||||
with a key of the specified type. e.g. 'dev' or 'formal'
|
||||
"""
|
||||
|
||||
# Hash the data across all files
|
||||
blocksize = default_blocksize
|
||||
data_hash = SHA256.new()
|
||||
for filename in filenames:
|
||||
with open(filename, 'rb') as infile:
|
||||
data = infile.read(blocksize)
|
||||
while len(data) > 0:
|
||||
data_hash.update(data)
|
||||
data = infile.read(blocksize)
|
||||
|
||||
# Find a private key to use, if not already provided
|
||||
need_resign_with_formal = False
|
||||
if private_key is None:
|
||||
if cert_type is not None:
|
||||
# A Specific key is asked for
|
||||
assert (cert_type in list(private_key_files)), "cert_type=%s is not a known cert type" % cert_type
|
||||
dict_key = cert_type
|
||||
filename = private_key_files[dict_key]
|
||||
# print 'cert_type given: Checking to see if ' + filename + ' exists\n'
|
||||
if not os.path.exists(filename) and dict_key == release_verify.cert_type_formal_str:
|
||||
# The formal key is asked for, but is not locally available,
|
||||
# substitute the dev key, and we will try to resign with the formal later.
|
||||
dict_key = release_verify.cert_type_dev_str
|
||||
filename = private_key_files[dict_key]
|
||||
need_resign_with_formal = True
|
||||
if os.path.exists(filename):
|
||||
# print 'Getting private key from ' + filename + '\n'
|
||||
private_key = release_verify.read_RSA_key(open(filename, 'rb').read())
|
||||
else:
|
||||
# Search for available keys
|
||||
for dict_key in private_key_files.keys():
|
||||
filename = private_key_files[dict_key]
|
||||
# print 'Search for available keys: Checking to see if ' + filename + ' exists\n'
|
||||
if os.path.exists(filename):
|
||||
# print 'Getting private key from ' + filename + '\n'
|
||||
private_key = release_verify.read_RSA_key(open(filename, 'rb').read())
|
||||
|
||||
assert (private_key is not None), "Could not find signing key"
|
||||
|
||||
# Encrypt the hash (sign the data) with the key we find
|
||||
signer = PKCS1_PSS.new(private_key)
|
||||
signature = signer.sign(data_hash)
|
||||
|
||||
# Save it
|
||||
with open(signature_file, 'wb') as outfile:
|
||||
outfile.write(signature)
|
||||
|
||||
return need_resign_with_formal
|
191
software/software/release_verify.py
Normal file
191
software/software/release_verify.py
Normal file
@ -0,0 +1,191 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
|
||||
from Cryptodome.Signature import PKCS1_v1_5
|
||||
from Cryptodome.Signature import PKCS1_PSS
|
||||
from Cryptodome.Hash import SHA256
|
||||
from Cryptodome.PublicKey import RSA
|
||||
from Cryptodome.Util.asn1 import DerSequence
|
||||
from binascii import a2b_base64
|
||||
|
||||
from software.certificates import dev_certificate
|
||||
from software.certificates import formal_certificate
|
||||
|
||||
# To save memory, read and hash 1M of files at a time
|
||||
default_blocksize = 1 * 1024 * 1024
|
||||
|
||||
dev_certificate_marker = '/etc/pki/wrs/dev_certificate_enable.bin'
|
||||
DEV_CERT_CONTENTS = b'Titanium patching'
|
||||
LOG = logging.getLogger('main_logger')
|
||||
|
||||
cert_type_dev_str = 'dev'
|
||||
cert_type_formal_str = 'formal'
|
||||
cert_type_dev = [cert_type_dev_str]
|
||||
cert_type_formal = [cert_type_formal_str]
|
||||
cert_type_all = [cert_type_dev_str, cert_type_formal_str]
|
||||
|
||||
|
||||
def verify_hash(data_hash, signature_bytes, certificate_list):
|
||||
"""
|
||||
Checks that a hash's signature can be validated against an approved
|
||||
certificate
|
||||
:param data_hash: A hash of the data to be validated
|
||||
:param signature_bytes: A pre-generated signature (typically, the hash
|
||||
encrypted with a private key)
|
||||
:param certificate_list: A list of approved certificates or public keys
|
||||
which the signature is validated against
|
||||
:return: True if the signature was validated against a certificate
|
||||
"""
|
||||
verified = False
|
||||
for cert in certificate_list:
|
||||
if verified:
|
||||
break
|
||||
pub_key = read_RSA_key(cert)
|
||||
pub_key.exportKey()
|
||||
|
||||
# PSS is the recommended signature scheme, but some tools (like OpenSSL)
|
||||
# use the older v1_5 scheme. We try to validate against both.
|
||||
#
|
||||
# We use PSS for patch validation, but use v1_5 for ISO validation
|
||||
# since we want to generate detached sigs that a customer can validate
|
||||
# OpenSSL
|
||||
verifier = PKCS1_PSS.new(pub_key)
|
||||
try:
|
||||
verified = verifier.verify(data_hash, signature_bytes) # pylint: disable=not-callable
|
||||
except ValueError:
|
||||
verified = False
|
||||
|
||||
if not verified:
|
||||
verifier = PKCS1_v1_5.new(pub_key)
|
||||
try:
|
||||
verified = verifier.verify(data_hash, signature_bytes) # pylint: disable=not-callable
|
||||
except ValueError:
|
||||
verified = False
|
||||
|
||||
return verified
|
||||
|
||||
|
||||
def get_public_certificates_by_type(cert_type=None):
|
||||
"""
|
||||
Builds a list of accepted certificates which can be used to validate
|
||||
further things. This list may contain multiple certificates depending on
|
||||
the configuration of the system and the value of cert_type.
|
||||
|
||||
:param cert_type: A list of strings, certificate types to include in list
|
||||
'formal' - include formal certificate if available
|
||||
'dev' - include developer certificate if available
|
||||
:return: A list of certificates in PEM format
|
||||
"""
|
||||
|
||||
if cert_type is None:
|
||||
cert_type = cert_type_all
|
||||
|
||||
cert_list = []
|
||||
|
||||
if cert_type_formal_str in cert_type:
|
||||
cert_list.append(formal_certificate)
|
||||
|
||||
if cert_type_dev_str in cert_type:
|
||||
cert_list.append(dev_certificate)
|
||||
|
||||
return cert_list
|
||||
|
||||
|
||||
def get_public_certificates():
|
||||
"""
|
||||
Builds a list of accepted certificates which can be used to validate
|
||||
further things. This list may contain multiple certificates depending on
|
||||
the configuration of the system (for instance, should we include the
|
||||
developer certificate in the list).
|
||||
:return: A list of certificates in PEM format
|
||||
"""
|
||||
cert_list = [formal_certificate]
|
||||
|
||||
# We enable the dev certificate based on the presence of a file. This file
|
||||
# contains a hash of an arbitrary string ('Titanum patching') which has been
|
||||
# encrypted with our formal private key. If the file is present (and valid)
|
||||
# then we add the developer key to the approved certificates list
|
||||
if os.path.exists(dev_certificate_marker):
|
||||
with open(dev_certificate_marker, 'rb') as infile:
|
||||
signature = infile.read()
|
||||
data_hash = SHA256.new(DEV_CERT_CONTENTS)
|
||||
if verify_hash(data_hash, signature, cert_list):
|
||||
cert_list.append(dev_certificate)
|
||||
else:
|
||||
msg = "Invalid data found in " + dev_certificate_marker
|
||||
LOG.error(msg)
|
||||
|
||||
return cert_list
|
||||
|
||||
|
||||
def read_RSA_key(key_data):
|
||||
"""
|
||||
Utility function for reading an RSA key half from encoded data
|
||||
:param key_data: PEM data containing raw key or X.509 certificate
|
||||
:return: An RSA key object
|
||||
"""
|
||||
try:
|
||||
# Handle data that is just a raw key
|
||||
key = RSA.importKey(key_data)
|
||||
except ValueError:
|
||||
# The RSA.importKey function cannot read X.509 certificates directly
|
||||
# (depending on the version of the Crypto library). Instead, we
|
||||
# may need to extract the key from the certificate before building
|
||||
# the key object
|
||||
#
|
||||
# We need to strip the BEGIN and END lines from PEM first
|
||||
x509lines = key_data.replace(' ', '').split()
|
||||
x509text = ''.join(x509lines[1:-1])
|
||||
x509data = DerSequence()
|
||||
x509data.decode(a2b_base64(x509text))
|
||||
|
||||
# X.509 contains a few parts. The first part (index 0) is the
|
||||
# certificate itself, (TBS or "to be signed" cert) and the 7th field
|
||||
# of that cert is subjectPublicKeyInfo, which can be imported.
|
||||
# RFC3280
|
||||
tbsCert = DerSequence()
|
||||
tbsCert.decode(x509data[0])
|
||||
|
||||
# Initialize RSA key from the subjectPublicKeyInfo field
|
||||
key = RSA.importKey(tbsCert[6])
|
||||
return key
|
||||
|
||||
|
||||
def verify_files(filenames, signature_file, cert_type=None):
|
||||
"""
|
||||
Verify data files against a detached signature.
|
||||
:param filenames: A list of files containing the data which was signed
|
||||
:param public_key_file: A file containing the public key or certificate
|
||||
corresponding to the key which signed the data
|
||||
:param signature_file: The name of the file containing the signature
|
||||
:param cert_type: Only use specified certififcate type to verify (dev/formal)
|
||||
:return: True if the signature was verified, False otherwise
|
||||
"""
|
||||
|
||||
# Hash the data across all files
|
||||
blocksize = default_blocksize
|
||||
data_hash = SHA256.new()
|
||||
for filename in filenames:
|
||||
with open(filename, 'rb') as infile:
|
||||
data = infile.read(blocksize)
|
||||
while len(data) > 0:
|
||||
data_hash.update(data)
|
||||
data = infile.read(blocksize)
|
||||
|
||||
# Get the signature
|
||||
with open(signature_file, 'rb') as sig_file:
|
||||
signature_bytes = sig_file.read()
|
||||
|
||||
# Verify the signature
|
||||
if cert_type is None:
|
||||
certificate_list = get_public_certificates()
|
||||
else:
|
||||
certificate_list = get_public_certificates_by_type(cert_type=cert_type)
|
||||
return verify_hash(data_hash, signature_bytes, certificate_list)
|
744
software/software/software_agent.py
Normal file
744
software/software/software_agent.py
Normal file
@ -0,0 +1,744 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import requests
|
||||
import select
|
||||
import shutil
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from software import ostree_utils
|
||||
from software.software_functions import configure_logging
|
||||
from software.software_functions import LOG
|
||||
import software.software_config as cfg
|
||||
from software.base import PatchService
|
||||
from software.exceptions import OSTreeCommandFail
|
||||
import software.utils as utils
|
||||
import software.messages as messages
|
||||
import software.constants as constants
|
||||
|
||||
from tsconfig.tsconfig import http_port
|
||||
from tsconfig.tsconfig import install_uuid
|
||||
from tsconfig.tsconfig import subfunctions
|
||||
from tsconfig.tsconfig import SW_VERSION
|
||||
|
||||
pidfile_path = "/var/run/software_agent.pid"
|
||||
agent_running_after_reboot_flag = \
|
||||
"/var/run/software_agent_running_after_reboot"
|
||||
node_is_patched_file = "/var/run/node_is_patched"
|
||||
node_is_patched_rr_file = "/var/run/node_is_patched_rr"
|
||||
patch_installing_file = "/var/run/patch_installing"
|
||||
patch_failed_file = "/var/run/patch_install_failed"
|
||||
node_is_locked_file = "/var/run/.node_locked"
|
||||
ostree_pull_completed_deployment_pending_file = \
|
||||
"/var/run/ostree_pull_completed_deployment_pending"
|
||||
mount_pending_file = "/var/run/mount_pending"
|
||||
insvc_patch_scripts = "/run/software/software-scripts"
|
||||
insvc_patch_flags = "/run/software/software-flags"
|
||||
insvc_patch_restart_agent = "/run/software/.restart.software-agent"
|
||||
|
||||
run_insvc_patch_scripts_cmd = "/usr/sbin/run-software-scripts"
|
||||
|
||||
pa = None
|
||||
|
||||
http_port_real = http_port
|
||||
|
||||
|
||||
def setflag(fname):
|
||||
try:
|
||||
with open(fname, "w") as f:
|
||||
f.write("%d\n" % os.getpid())
|
||||
except Exception:
|
||||
LOG.exception("Failed to update %s flag", fname)
|
||||
|
||||
|
||||
def clearflag(fname):
|
||||
if os.path.exists(fname):
|
||||
try:
|
||||
os.remove(fname)
|
||||
except Exception:
|
||||
LOG.exception("Failed to clear %s flag", fname)
|
||||
|
||||
|
||||
def pull_restart_scripts_from_controller():
|
||||
# If the rsync fails, it raises an exception to
|
||||
# the caller "handle_install()" and fails the
|
||||
# host-install request for this host
|
||||
output = subprocess.check_output(["rsync",
|
||||
"-acv",
|
||||
"--delete",
|
||||
"--exclude", "tmp",
|
||||
"rsync://controller/repo/patch-scripts/",
|
||||
"%s/" % insvc_patch_scripts],
|
||||
stderr=subprocess.STDOUT)
|
||||
LOG.info("Synced restart scripts from controller: %s", output)
|
||||
|
||||
|
||||
def check_install_uuid():
|
||||
controller_install_uuid_url = "http://controller:%s/feed/rel-%s/install_uuid" % (http_port_real, SW_VERSION)
|
||||
try:
|
||||
req = requests.get(controller_install_uuid_url)
|
||||
if req.status_code != 200:
|
||||
# If we're on controller-1, controller-0 may not have the install_uuid
|
||||
# matching this release, if we're in an upgrade. If the file doesn't exist,
|
||||
# bypass this check
|
||||
if socket.gethostname() == "controller-1":
|
||||
return True
|
||||
|
||||
LOG.error("Failed to get install_uuid from controller")
|
||||
return False
|
||||
except requests.ConnectionError:
|
||||
LOG.error("Failed to connect to controller")
|
||||
return False
|
||||
|
||||
controller_install_uuid = str(req.text).rstrip()
|
||||
|
||||
if install_uuid != controller_install_uuid:
|
||||
LOG.error("Local install_uuid=%s doesn't match controller=%s", install_uuid, controller_install_uuid)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class PatchMessageSendLatestFeedCommit(messages.PatchMessage):
|
||||
def __init__(self):
|
||||
messages.PatchMessage.__init__(self, messages.PATCHMSG_SEND_LATEST_FEED_COMMIT)
|
||||
|
||||
def decode(self, data):
|
||||
global pa
|
||||
messages.PatchMessage.decode(self, data)
|
||||
if 'latest_feed_commit' in data:
|
||||
pa.latest_feed_commit = data['latest_feed_commit']
|
||||
|
||||
def encode(self):
|
||||
messages.PatchMessage.encode(self)
|
||||
|
||||
def handle(self, sock, addr):
|
||||
global pa
|
||||
# Check if the node is patch current
|
||||
pa.query()
|
||||
|
||||
|
||||
class PatchMessageHelloAgent(messages.PatchMessage):
|
||||
def __init__(self):
|
||||
messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO_AGENT)
|
||||
self.patch_op_counter = 0
|
||||
|
||||
def decode(self, data):
|
||||
messages.PatchMessage.decode(self, data)
|
||||
if 'patch_op_counter' in data:
|
||||
self.patch_op_counter = data['patch_op_counter']
|
||||
|
||||
def encode(self):
|
||||
messages.PatchMessage.encode(self)
|
||||
|
||||
def handle(self, sock, addr):
|
||||
# Send response
|
||||
|
||||
#
|
||||
# If a user tries to do a host-install on an unlocked node,
|
||||
# without bypassing the lock check (either via in-service
|
||||
# patch or --force option), the agent will set its state
|
||||
# to Install-Rejected in order to report back the rejection.
|
||||
# However, since this should just be a transient state,
|
||||
# we don't want the client reporting the Install-Rejected
|
||||
# state indefinitely, so reset it to Idle after a minute or so.
|
||||
#
|
||||
if pa.state == constants.PATCH_AGENT_STATE_INSTALL_REJECTED:
|
||||
if os.path.exists(node_is_locked_file):
|
||||
# Node has been locked since rejected attempt. Reset the state
|
||||
pa.state = constants.PATCH_AGENT_STATE_IDLE
|
||||
elif (time.time() - pa.rejection_timestamp) > 60:
|
||||
# Rejected state for more than a minute. Reset it.
|
||||
pa.state = constants.PATCH_AGENT_STATE_IDLE
|
||||
|
||||
if self.patch_op_counter > 0:
|
||||
pa.handle_patch_op_counter(self.patch_op_counter)
|
||||
|
||||
resp = PatchMessageHelloAgentAck()
|
||||
resp.send(sock)
|
||||
|
||||
def send(self, sock): # pylint: disable=unused-argument
|
||||
LOG.error("Should not get here")
|
||||
|
||||
|
||||
class PatchMessageHelloAgentAck(messages.PatchMessage):
|
||||
def __init__(self):
|
||||
messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO_AGENT_ACK)
|
||||
|
||||
def encode(self):
|
||||
global pa
|
||||
messages.PatchMessage.encode(self)
|
||||
self.message['query_id'] = pa.query_id
|
||||
self.message['out_of_date'] = pa.changes
|
||||
self.message['hostname'] = socket.gethostname()
|
||||
self.message['requires_reboot'] = pa.node_is_patched
|
||||
self.message['patch_failed'] = pa.patch_failed
|
||||
self.message['sw_version'] = SW_VERSION
|
||||
self.message['state'] = pa.state
|
||||
|
||||
def handle(self, sock, addr):
|
||||
LOG.error("Should not get here")
|
||||
|
||||
def send(self, sock):
|
||||
global pa
|
||||
self.encode()
|
||||
message = json.dumps(self.message)
|
||||
sock.sendto(str.encode(message), (pa.controller_address, cfg.controller_port))
|
||||
|
||||
|
||||
class PatchMessageQueryDetailed(messages.PatchMessage):
|
||||
def __init__(self):
|
||||
messages.PatchMessage.__init__(self, messages.PATCHMSG_QUERY_DETAILED)
|
||||
|
||||
def decode(self, data):
|
||||
messages.PatchMessage.decode(self, data)
|
||||
|
||||
def encode(self):
|
||||
# Nothing to add to the HELLO_AGENT, so just call the super class
|
||||
messages.PatchMessage.encode(self)
|
||||
|
||||
def handle(self, sock, addr):
|
||||
# Send response
|
||||
LOG.info("Handling detailed query")
|
||||
resp = PatchMessageQueryDetailedResp()
|
||||
resp.send(sock)
|
||||
|
||||
def send(self, sock): # pylint: disable=unused-argument
|
||||
LOG.error("Should not get here")
|
||||
|
||||
|
||||
class PatchMessageQueryDetailedResp(messages.PatchMessage):
|
||||
def __init__(self):
|
||||
messages.PatchMessage.__init__(self, messages.PATCHMSG_QUERY_DETAILED_RESP)
|
||||
|
||||
def encode(self):
|
||||
global pa
|
||||
messages.PatchMessage.encode(self)
|
||||
self.message['latest_sysroot_commit'] = pa.latest_sysroot_commit
|
||||
self.message['nodetype'] = cfg.nodetype
|
||||
self.message['sw_version'] = SW_VERSION
|
||||
self.message['subfunctions'] = subfunctions
|
||||
self.message['state'] = pa.state
|
||||
|
||||
def handle(self, sock, addr):
|
||||
LOG.error("Should not get here")
|
||||
|
||||
def send(self, sock):
|
||||
self.encode()
|
||||
message = json.dumps(self.message)
|
||||
sock.sendall(str.encode(message))
|
||||
|
||||
|
||||
class PatchMessageAgentInstallReq(messages.PatchMessage):
|
||||
def __init__(self):
|
||||
messages.PatchMessage.__init__(self, messages.PATCHMSG_AGENT_INSTALL_REQ)
|
||||
self.force = False
|
||||
|
||||
def decode(self, data):
|
||||
messages.PatchMessage.decode(self, data)
|
||||
if 'force' in data:
|
||||
self.force = data['force']
|
||||
|
||||
def encode(self):
|
||||
# Nothing to add to the HELLO_AGENT, so just call the super class
|
||||
messages.PatchMessage.encode(self)
|
||||
|
||||
def handle(self, sock, addr):
|
||||
LOG.info("Handling host install request, force=%s", self.force)
|
||||
global pa
|
||||
resp = PatchMessageAgentInstallResp()
|
||||
|
||||
if not self.force:
|
||||
setflag(node_is_patched_rr_file)
|
||||
|
||||
if not os.path.exists(node_is_locked_file):
|
||||
if self.force:
|
||||
LOG.info("Installing on unlocked node, with force option")
|
||||
else:
|
||||
LOG.info("Rejecting install request on unlocked node")
|
||||
pa.state = constants.PATCH_AGENT_STATE_INSTALL_REJECTED
|
||||
pa.rejection_timestamp = time.time()
|
||||
resp.status = False
|
||||
resp.reject_reason = 'Node must be locked.'
|
||||
resp.send(sock, addr)
|
||||
return
|
||||
resp.status = pa.handle_install()
|
||||
resp.send(sock, addr)
|
||||
|
||||
def send(self, sock): # pylint: disable=unused-argument
|
||||
LOG.error("Should not get here")
|
||||
|
||||
|
||||
class PatchMessageAgentInstallResp(messages.PatchMessage):
|
||||
def __init__(self):
|
||||
messages.PatchMessage.__init__(self, messages.PATCHMSG_AGENT_INSTALL_RESP)
|
||||
self.status = False
|
||||
self.reject_reason = None
|
||||
|
||||
def encode(self):
|
||||
global pa
|
||||
messages.PatchMessage.encode(self)
|
||||
self.message['status'] = self.status
|
||||
if self.reject_reason is not None:
|
||||
self.message['reject_reason'] = self.reject_reason
|
||||
|
||||
def handle(self, sock, addr):
|
||||
LOG.error("Should not get here")
|
||||
|
||||
def send(self, sock, addr):
|
||||
address = (addr[0], cfg.controller_port)
|
||||
self.encode()
|
||||
message = json.dumps(self.message)
|
||||
sock.sendto(str.encode(message), address)
|
||||
|
||||
# Send a hello ack to follow it
|
||||
resp = PatchMessageHelloAgentAck()
|
||||
resp.send(sock)
|
||||
|
||||
|
||||
class PatchAgent(PatchService):
|
||||
def __init__(self):
|
||||
PatchService.__init__(self)
|
||||
self.sock_out = None
|
||||
self.sock_in = None
|
||||
self.controller_address = None
|
||||
self.listener = None
|
||||
self.changes = False
|
||||
self.latest_feed_commit = None
|
||||
self.latest_sysroot_commit = None
|
||||
self.patch_op_counter = 0
|
||||
self.node_is_patched = os.path.exists(node_is_patched_file)
|
||||
self.node_is_patched_timestamp = 0
|
||||
self.query_id = 0
|
||||
self.state = constants.PATCH_AGENT_STATE_IDLE
|
||||
self.last_config_audit = 0
|
||||
self.rejection_timestamp = 0
|
||||
self.last_repo_revision = None
|
||||
|
||||
# Check state flags
|
||||
if os.path.exists(patch_installing_file):
|
||||
# We restarted while installing. Change to failed
|
||||
setflag(patch_failed_file)
|
||||
os.remove(patch_installing_file)
|
||||
|
||||
if os.path.exists(patch_failed_file):
|
||||
self.state = constants.PATCH_AGENT_STATE_INSTALL_FAILED
|
||||
|
||||
self.patch_failed = os.path.exists(patch_failed_file)
|
||||
|
||||
def update_config(self):
|
||||
cfg.read_config()
|
||||
|
||||
if self.port != cfg.agent_port:
|
||||
self.port = cfg.agent_port
|
||||
|
||||
# Loopback interface does not support multicast messaging, therefore
|
||||
# revert to using unicast messaging when configured against the
|
||||
# loopback device
|
||||
if cfg.get_mgmt_iface() == constants.LOOPBACK_INTERFACE_NAME:
|
||||
self.mcast_addr = None
|
||||
self.controller_address = cfg.get_mgmt_ip()
|
||||
else:
|
||||
self.mcast_addr = cfg.agent_mcast_group
|
||||
self.controller_address = cfg.controller_mcast_group
|
||||
|
||||
def setup_tcp_socket(self):
|
||||
address_family = utils.get_management_family()
|
||||
self.listener = socket.socket(address_family, socket.SOCK_STREAM)
|
||||
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
self.listener.bind(('', self.port))
|
||||
self.listener.listen(2) # Allow two connections, for two controllers
|
||||
|
||||
def query(self):
|
||||
"""Check current patch state """
|
||||
if not check_install_uuid():
|
||||
LOG.info("Failed install_uuid check. Skipping query")
|
||||
return False
|
||||
|
||||
# Generate a unique query id
|
||||
self.query_id = random.random()
|
||||
|
||||
# determine OSTREE state of the system and the patches
|
||||
self.changes = False
|
||||
|
||||
active_sysroot_commit = ostree_utils.get_sysroot_latest_commit()
|
||||
self.latest_sysroot_commit = active_sysroot_commit
|
||||
self.last_repo_revision = active_sysroot_commit
|
||||
|
||||
# latest_feed_commit is sent from patch controller
|
||||
# if unprovisioned (no mgmt ip) attempt to query it
|
||||
if self.latest_feed_commit is None:
|
||||
if self.sock_out is None:
|
||||
try:
|
||||
self.latest_feed_commit = ostree_utils.get_feed_latest_commit(SW_VERSION)
|
||||
except OSTreeCommandFail:
|
||||
LOG.warning("Unable to query latest feed commit")
|
||||
# latest_feed_commit will remain as None
|
||||
|
||||
if self.latest_feed_commit:
|
||||
if active_sysroot_commit != self.latest_feed_commit:
|
||||
LOG.info("Active Sysroot Commit:%s does not match "
|
||||
"active controller's Feed Repo Commit: %s",
|
||||
active_sysroot_commit, self.latest_feed_commit)
|
||||
self.changes = True
|
||||
|
||||
return True
|
||||
|
||||
def handle_install(self,
|
||||
verbose_to_stdout=False,
|
||||
disallow_insvc_patch=False,
|
||||
delete_older_deployments=False):
|
||||
#
|
||||
# The disallow_insvc_patch parameter is set when we're installing
|
||||
# the patch during init. At that time, we don't want to deal with
|
||||
# in-service patch scripts, so instead we'll treat any patch as
|
||||
# a reboot-required when this parameter is set. Rather than running
|
||||
# any scripts, the RR flag will be set, which will result in the node
|
||||
# being rebooted immediately upon completion of the installation.
|
||||
#
|
||||
# The delete_older_deployments is set when the system has
|
||||
# been rebooted.
|
||||
#
|
||||
|
||||
LOG.info("Handling install")
|
||||
|
||||
# Check the INSTALL_UUID first. If it doesn't match the active
|
||||
# controller, we don't want to install patches.
|
||||
if not check_install_uuid():
|
||||
LOG.error("Failed install_uuid check. Skipping install")
|
||||
|
||||
self.patch_failed = True
|
||||
setflag(patch_failed_file)
|
||||
self.state = constants.PATCH_AGENT_STATE_INSTALL_FAILED
|
||||
|
||||
# Send a hello to provide a state update
|
||||
if self.sock_out is not None:
|
||||
hello_ack = PatchMessageHelloAgentAck()
|
||||
hello_ack.send(self.sock_out)
|
||||
|
||||
return False
|
||||
|
||||
self.state = constants.PATCH_AGENT_STATE_INSTALLING
|
||||
setflag(patch_installing_file)
|
||||
|
||||
if delete_older_deployments:
|
||||
ostree_utils.delete_older_deployments()
|
||||
|
||||
try:
|
||||
# Create insvc patch directories
|
||||
if not os.path.exists(insvc_patch_scripts):
|
||||
os.makedirs(insvc_patch_scripts, 0o700)
|
||||
if not os.path.exists(insvc_patch_flags):
|
||||
os.makedirs(insvc_patch_flags, 0o700)
|
||||
except Exception:
|
||||
LOG.exception("Failed to create in-service patch directories")
|
||||
|
||||
# Send a hello to provide a state update
|
||||
if self.sock_out is not None:
|
||||
hello_ack = PatchMessageHelloAgentAck()
|
||||
hello_ack.send(self.sock_out)
|
||||
|
||||
# Build up the install set
|
||||
if verbose_to_stdout:
|
||||
print("Checking for software updates...")
|
||||
self.query() # sets self.changes
|
||||
|
||||
changed = False
|
||||
success = True
|
||||
|
||||
if self.changes or \
|
||||
os.path.exists(ostree_pull_completed_deployment_pending_file) or \
|
||||
os.path.exists(mount_pending_file):
|
||||
try:
|
||||
# Pull changes from remote to the sysroot ostree
|
||||
# The remote value is configured inside
|
||||
# "/sysroot/ostree/repo/config" file
|
||||
ostree_utils.pull_ostree_from_remote()
|
||||
setflag(ostree_pull_completed_deployment_pending_file)
|
||||
except OSTreeCommandFail:
|
||||
LOG.exception("Failed to pull changes and create deployment"
|
||||
"during host-install.")
|
||||
success = False
|
||||
|
||||
try:
|
||||
# Create a new deployment once the changes are pulled
|
||||
ostree_utils.create_deployment()
|
||||
|
||||
changed = True
|
||||
clearflag(ostree_pull_completed_deployment_pending_file)
|
||||
|
||||
except OSTreeCommandFail:
|
||||
LOG.exception("Failed to pull changes and create deployment"
|
||||
"during host-install.")
|
||||
success = False
|
||||
|
||||
if changed:
|
||||
# Update the node_is_patched flag
|
||||
setflag(node_is_patched_file)
|
||||
|
||||
self.node_is_patched = True
|
||||
if verbose_to_stdout:
|
||||
print("This node has been patched.")
|
||||
|
||||
if os.path.exists(node_is_patched_rr_file):
|
||||
LOG.info("Reboot is required. Skipping patch-scripts")
|
||||
elif disallow_insvc_patch:
|
||||
LOG.info("Disallowing patch-scripts. Treating as reboot-required")
|
||||
setflag(node_is_patched_rr_file)
|
||||
else:
|
||||
LOG.info("Mounting the new deployment")
|
||||
try:
|
||||
pending_deployment = ostree_utils.fetch_pending_deployment()
|
||||
deployment_dir = constants.OSTREE_BASE_DEPLOYMENT_DIR + pending_deployment
|
||||
setflag(mount_pending_file)
|
||||
ostree_utils.mount_new_deployment(deployment_dir)
|
||||
clearflag(mount_pending_file)
|
||||
LOG.info("Running in-service patch-scripts")
|
||||
pull_restart_scripts_from_controller()
|
||||
subprocess.check_output(run_insvc_patch_scripts_cmd, stderr=subprocess.STDOUT)
|
||||
|
||||
# Clear the node_is_patched flag, since we've handled it in-service
|
||||
clearflag(node_is_patched_file)
|
||||
self.node_is_patched = False
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.exception("In-Service patch installation failed")
|
||||
LOG.error("Command output: %s", e.output)
|
||||
success = False
|
||||
|
||||
# Clear the in-service patch dirs
|
||||
if os.path.exists(insvc_patch_scripts):
|
||||
shutil.rmtree(insvc_patch_scripts, ignore_errors=True)
|
||||
if os.path.exists(insvc_patch_flags):
|
||||
shutil.rmtree(insvc_patch_flags, ignore_errors=True)
|
||||
|
||||
if success:
|
||||
self.patch_failed = False
|
||||
clearflag(patch_failed_file)
|
||||
self.state = constants.PATCH_AGENT_STATE_IDLE
|
||||
else:
|
||||
# Update the patch_failed flag
|
||||
self.patch_failed = True
|
||||
setflag(patch_failed_file)
|
||||
self.state = constants.PATCH_AGENT_STATE_INSTALL_FAILED
|
||||
|
||||
clearflag(patch_installing_file)
|
||||
self.query()
|
||||
|
||||
if self.changes:
|
||||
LOG.warning("Installing the patch did not change the patch current status")
|
||||
|
||||
# Send a hello to provide a state update
|
||||
if self.sock_out is not None:
|
||||
hello_ack = PatchMessageHelloAgentAck()
|
||||
hello_ack.send(self.sock_out)
|
||||
|
||||
# Indicate if the method was successful
|
||||
# success means no change needed, or a change worked.
|
||||
return success
|
||||
|
||||
def handle_patch_op_counter(self, counter):
|
||||
changed = False
|
||||
if os.path.exists(node_is_patched_file):
|
||||
# The node has been patched. Run a query if:
|
||||
# - node_is_patched didn't exist previously
|
||||
# - node_is_patched timestamp changed
|
||||
timestamp = os.path.getmtime(node_is_patched_file)
|
||||
if not self.node_is_patched:
|
||||
self.node_is_patched = True
|
||||
self.node_is_patched_timestamp = timestamp
|
||||
changed = True
|
||||
elif self.node_is_patched_timestamp != timestamp:
|
||||
self.node_is_patched_timestamp = timestamp
|
||||
changed = True
|
||||
elif self.node_is_patched:
|
||||
self.node_is_patched = False
|
||||
self.node_is_patched_timestamp = 0
|
||||
changed = True
|
||||
|
||||
if self.patch_op_counter < counter:
|
||||
self.patch_op_counter = counter
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
rc = self.query()
|
||||
if not rc:
|
||||
# Query failed. Reset the op counter
|
||||
self.patch_op_counter = 0
|
||||
|
||||
def run(self):
|
||||
self.setup_socket()
|
||||
|
||||
while self.sock_out is None:
|
||||
# Check every thirty seconds?
|
||||
# Once we've got a conf file, tied into packstack,
|
||||
# we'll get restarted when the file is updated,
|
||||
# and this should be unnecessary.
|
||||
time.sleep(30)
|
||||
self.setup_socket()
|
||||
|
||||
self.setup_tcp_socket()
|
||||
|
||||
# Ok, now we've got our socket.
|
||||
# Let's let the controllers know we're here
|
||||
hello_ack = PatchMessageHelloAgentAck()
|
||||
hello_ack.send(self.sock_out)
|
||||
|
||||
first_hello = True
|
||||
|
||||
connections = []
|
||||
|
||||
timeout = time.time() + 30.0
|
||||
remaining = 30
|
||||
|
||||
while True:
|
||||
inputs = [self.sock_in, self.listener] + connections
|
||||
outputs = []
|
||||
|
||||
rlist, wlist, xlist = select.select(inputs, outputs, inputs, remaining)
|
||||
|
||||
remaining = int(timeout - time.time())
|
||||
if remaining <= 0 or remaining > 30:
|
||||
timeout = time.time() + 30.0
|
||||
remaining = 30
|
||||
|
||||
if (len(rlist) == 0 and
|
||||
len(wlist) == 0 and
|
||||
len(xlist) == 0):
|
||||
# Timeout hit
|
||||
self.audit_socket()
|
||||
continue
|
||||
|
||||
for s in rlist:
|
||||
if s == self.listener:
|
||||
conn, addr = s.accept()
|
||||
connections.append(conn)
|
||||
continue
|
||||
|
||||
data = ''
|
||||
addr = None
|
||||
msg = None
|
||||
|
||||
if s == self.sock_in:
|
||||
# Receive from UDP
|
||||
data, addr = s.recvfrom(1024)
|
||||
else:
|
||||
# Receive from TCP
|
||||
while True:
|
||||
try:
|
||||
packet = s.recv(1024)
|
||||
except socket.error:
|
||||
LOG.exception("Socket error on recv")
|
||||
data = ''
|
||||
break
|
||||
|
||||
if packet:
|
||||
data += packet.decode()
|
||||
|
||||
if data == '':
|
||||
break
|
||||
|
||||
try:
|
||||
json.loads(data)
|
||||
break
|
||||
except ValueError:
|
||||
# Message is incomplete
|
||||
continue
|
||||
else:
|
||||
# End of TCP message received
|
||||
break
|
||||
|
||||
if data == '':
|
||||
# Connection dropped
|
||||
connections.remove(s)
|
||||
s.close()
|
||||
continue
|
||||
|
||||
msgdata = json.loads(data)
|
||||
|
||||
# For now, discard any messages that are not msgversion==1
|
||||
if 'msgversion' in msgdata and msgdata['msgversion'] != 1:
|
||||
continue
|
||||
|
||||
if 'msgtype' in msgdata:
|
||||
if msgdata['msgtype'] == messages.PATCHMSG_HELLO_AGENT:
|
||||
if first_hello:
|
||||
self.query()
|
||||
first_hello = False
|
||||
|
||||
msg = PatchMessageHelloAgent()
|
||||
elif msgdata['msgtype'] == messages.PATCHMSG_QUERY_DETAILED:
|
||||
msg = PatchMessageQueryDetailed()
|
||||
elif msgdata['msgtype'] == messages.PATCHMSG_SEND_LATEST_FEED_COMMIT:
|
||||
msg = PatchMessageSendLatestFeedCommit()
|
||||
elif msgdata['msgtype'] == messages.PATCHMSG_AGENT_INSTALL_REQ:
|
||||
msg = PatchMessageAgentInstallReq()
|
||||
|
||||
if msg is None:
|
||||
msg = messages.PatchMessage()
|
||||
|
||||
msg.decode(msgdata)
|
||||
if s == self.sock_in:
|
||||
msg.handle(self.sock_out, addr)
|
||||
else:
|
||||
msg.handle(s, addr)
|
||||
|
||||
for s in xlist:
|
||||
if s in connections:
|
||||
connections.remove(s)
|
||||
s.close()
|
||||
|
||||
# Check for in-service patch restart flag
|
||||
if os.path.exists(insvc_patch_restart_agent):
|
||||
# Make sure it's safe to restart, ie. no reqs queued
|
||||
rlist, wlist, xlist = select.select(inputs, outputs, inputs, 0)
|
||||
if (len(rlist) == 0 and
|
||||
len(wlist) == 0 and
|
||||
len(xlist) == 0):
|
||||
# Restart
|
||||
LOG.info("In-service patch restart flag detected. Exiting.")
|
||||
os.remove(insvc_patch_restart_agent)
|
||||
exit(0)
|
||||
|
||||
|
||||
def main():
|
||||
global pa
|
||||
|
||||
configure_logging()
|
||||
|
||||
cfg.read_config()
|
||||
|
||||
pa = PatchAgent()
|
||||
pa.query()
|
||||
if os.path.exists(agent_running_after_reboot_flag):
|
||||
delete_older_deployments_flag = False
|
||||
else:
|
||||
setflag(agent_running_after_reboot_flag)
|
||||
delete_older_deployments_flag = True
|
||||
|
||||
if len(sys.argv) <= 1:
|
||||
pa.run()
|
||||
elif sys.argv[1] == "--install":
|
||||
if not check_install_uuid():
|
||||
# In certain cases, the lighttpd server could still be running using
|
||||
# its default port 80, as opposed to the port configured in platform.conf
|
||||
global http_port_real
|
||||
LOG.info("Failed install_uuid check via http_port=%s. Trying with default port 80", http_port_real)
|
||||
http_port_real = 80
|
||||
|
||||
pa.handle_install(verbose_to_stdout=True,
|
||||
disallow_insvc_patch=True,
|
||||
delete_older_deployments=delete_older_deployments_flag)
|
||||
elif sys.argv[1] == "--status":
|
||||
rc = 0
|
||||
if pa.changes:
|
||||
rc = 1
|
||||
exit(rc)
|
1449
software/software/software_client.py
Normal file
1449
software/software/software_client.py
Normal file
File diff suppressed because it is too large
Load Diff
124
software/software/software_config.py
Normal file
124
software/software/software_config.py
Normal file
@ -0,0 +1,124 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
import configparser
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
|
||||
import tsconfig.tsconfig as tsc
|
||||
|
||||
import software.utils as utils
|
||||
import software.constants as constants
|
||||
|
||||
controller_mcast_group = None
|
||||
agent_mcast_group = None
|
||||
controller_port = 0
|
||||
agent_port = 0
|
||||
api_port = 0
|
||||
mgmt_if = None
|
||||
nodetype = None
|
||||
platform_conf_mtime = 0
|
||||
software_conf_mtime = 0
|
||||
software_conf = '/etc/software/software.conf'
|
||||
|
||||
|
||||
def read_config():
|
||||
global software_conf_mtime
|
||||
global software_conf
|
||||
|
||||
if software_conf_mtime == os.stat(software_conf).st_mtime:
|
||||
# The file has not changed since it was last read
|
||||
return
|
||||
|
||||
defaults = {
|
||||
'controller_mcast_group': "239.1.1.3",
|
||||
'agent_mcast_group': "239.1.1.4",
|
||||
'api_port': "5493",
|
||||
'controller_port': "5494",
|
||||
'agent_port': "5495",
|
||||
}
|
||||
|
||||
global controller_mcast_group
|
||||
global agent_mcast_group
|
||||
global api_port
|
||||
global controller_port
|
||||
global agent_port
|
||||
|
||||
config = configparser.ConfigParser(defaults)
|
||||
|
||||
config.read(software_conf)
|
||||
software_conf_mtime = os.stat(software_conf).st_mtime
|
||||
|
||||
controller_mcast_group = config.get('runtime',
|
||||
'controller_multicast')
|
||||
agent_mcast_group = config.get('runtime', 'agent_multicast')
|
||||
|
||||
api_port = config.getint('runtime', 'api_port')
|
||||
controller_port = config.getint('runtime', 'controller_port')
|
||||
agent_port = config.getint('runtime', 'agent_port')
|
||||
|
||||
# The platform.conf file has no section headers, which causes problems
|
||||
# for ConfigParser. So we'll fake it out.
|
||||
ini_str = '[platform_conf]\n' + open(tsc.PLATFORM_CONF_FILE, 'r').read()
|
||||
ini_fp = io.StringIO(ini_str)
|
||||
config.read_file(ini_fp)
|
||||
|
||||
try:
|
||||
value = str(config.get('platform_conf', 'nodetype'))
|
||||
|
||||
global nodetype
|
||||
nodetype = value
|
||||
except configparser.Error:
|
||||
logging.exception("Failed to read nodetype from config")
|
||||
|
||||
|
||||
def get_mgmt_ip():
|
||||
# Check if initial config is complete
|
||||
if not os.path.exists('/etc/platform/.initial_config_complete'):
|
||||
return None
|
||||
mgmt_hostname = socket.gethostname()
|
||||
return utils.gethostbyname(mgmt_hostname)
|
||||
|
||||
|
||||
# Because the software daemons are launched before manifests are
|
||||
# applied, the content of some settings in platform.conf can change,
|
||||
# such as the management interface. As such, we can't just directly
|
||||
# use tsc.management_interface
|
||||
#
|
||||
def get_mgmt_iface():
|
||||
# Check if initial config is complete
|
||||
if not os.path.exists(constants.INITIAL_CONFIG_COMPLETE_FLAG):
|
||||
return None
|
||||
|
||||
global mgmt_if
|
||||
global platform_conf_mtime
|
||||
|
||||
if mgmt_if is not None and \
|
||||
platform_conf_mtime == os.stat(tsc.PLATFORM_CONF_FILE).st_mtime:
|
||||
# The platform.conf file hasn't been modified since we read it,
|
||||
# so return the cached value.
|
||||
return mgmt_if
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
|
||||
# The platform.conf file has no section headers, which causes problems
|
||||
# for ConfigParser. So we'll fake it out.
|
||||
ini_str = '[platform_conf]\n' + open(tsc.PLATFORM_CONF_FILE, 'r').read()
|
||||
ini_fp = io.StringIO(ini_str)
|
||||
config.read_file(ini_fp)
|
||||
|
||||
try:
|
||||
value = str(config.get('platform_conf', 'management_interface'))
|
||||
|
||||
mgmt_if = value
|
||||
|
||||
platform_conf_mtime = os.stat(tsc.PLATFORM_CONF_FILE).st_mtime
|
||||
except configparser.Error:
|
||||
logging.exception("Failed to read management_interface from config")
|
||||
return None
|
||||
return mgmt_if
|
2711
software/software/software_controller.py
Normal file
2711
software/software/software_controller.py
Normal file
File diff suppressed because it is too large
Load Diff
987
software/software/software_functions.py
Normal file
987
software/software/software_functions.py
Normal file
@ -0,0 +1,987 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
import getopt
|
||||
import glob
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tarfile
|
||||
import tempfile
|
||||
from lxml import etree as ElementTree
|
||||
from xml.dom import minidom
|
||||
|
||||
from software.release_verify import verify_files
|
||||
from software.release_verify import cert_type_all
|
||||
from software.release_signing import sign_files
|
||||
from software.exceptions import MetadataFail
|
||||
from software.exceptions import PatchFail
|
||||
from software.exceptions import PatchValidationFailure
|
||||
from software.exceptions import PatchMismatchFailure
|
||||
|
||||
import software.constants as constants
|
||||
|
||||
try:
|
||||
# The tsconfig module is only available at runtime
|
||||
from tsconfig.tsconfig import SW_VERSION
|
||||
except Exception:
|
||||
SW_VERSION = "unknown"
|
||||
|
||||
# Constants
|
||||
patch_dir = constants.PATCH_STORAGE_DIR
|
||||
avail_dir = "%s/metadata/available" % patch_dir
|
||||
applied_dir = "%s/metadata/applied" % patch_dir
|
||||
committed_dir = "%s/metadata/committed" % patch_dir
|
||||
semantics_dir = "%s/semantics" % patch_dir
|
||||
|
||||
# these next 4 variables may need to change to support ostree
|
||||
repo_root_dir = "/var/www/pages/updates"
|
||||
repo_dir = {SW_VERSION: "%s/rel-%s" % (repo_root_dir, SW_VERSION)}
|
||||
|
||||
root_package_dir = "%s/packages" % patch_dir
|
||||
root_scripts_dir = "/opt/software/software-scripts"
|
||||
package_dir = {SW_VERSION: "%s/%s" % (root_package_dir, SW_VERSION)}
|
||||
|
||||
logfile = "/var/log/software.log"
|
||||
apilogfile = "/var/log/software-api.log"
|
||||
|
||||
LOG = logging.getLogger('main_logger')
|
||||
auditLOG = logging.getLogger('audit_logger')
|
||||
audit_log_msg_prefix = 'User: sysadmin/admin Action: '
|
||||
|
||||
detached_signature_file = "signature.v2"
|
||||
|
||||
|
||||
def handle_exception(exc_type, exc_value, exc_traceback):
|
||||
"""
|
||||
Exception handler to log any uncaught exceptions
|
||||
"""
|
||||
LOG.error("Uncaught exception",
|
||||
exc_info=(exc_type, exc_value, exc_traceback))
|
||||
sys.__excepthook__(exc_type, exc_value, exc_traceback)
|
||||
|
||||
|
||||
def configure_logging(logtofile=True, level=logging.INFO):
|
||||
if logtofile:
|
||||
my_exec = os.path.basename(sys.argv[0])
|
||||
|
||||
log_format = '%(asctime)s: ' \
|
||||
+ my_exec + '[%(process)s]: ' \
|
||||
+ '%(filename)s(%(lineno)s): ' \
|
||||
+ '%(levelname)s: %(message)s'
|
||||
|
||||
formatter = logging.Formatter(log_format, datefmt="%FT%T")
|
||||
|
||||
LOG.setLevel(level)
|
||||
main_log_handler = logging.FileHandler(logfile)
|
||||
main_log_handler.setFormatter(formatter)
|
||||
LOG.addHandler(main_log_handler)
|
||||
|
||||
try:
|
||||
os.chmod(logfile, 0o640)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
auditLOG.setLevel(level)
|
||||
api_log_handler = logging.FileHandler(apilogfile)
|
||||
api_log_handler.setFormatter(formatter)
|
||||
auditLOG.addHandler(api_log_handler)
|
||||
try:
|
||||
os.chmod(apilogfile, 0o640)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Log uncaught exceptions to file
|
||||
sys.excepthook = handle_exception
|
||||
else:
|
||||
logging.basicConfig(level=level)
|
||||
|
||||
|
||||
def audit_log_info(msg=''):
|
||||
msg = audit_log_msg_prefix + msg
|
||||
auditLOG.info(msg)
|
||||
|
||||
|
||||
def get_md5(path):
|
||||
"""
|
||||
Utility function for generating the md5sum of a file
|
||||
:param path: Path to file
|
||||
"""
|
||||
md5 = hashlib.md5()
|
||||
block_size = 8192
|
||||
with open(path, 'rb') as f:
|
||||
for chunk in iter(lambda: f.read(block_size), b''):
|
||||
md5.update(chunk)
|
||||
return int(md5.hexdigest(), 16)
|
||||
|
||||
|
||||
def add_text_tag_to_xml(parent,
|
||||
name,
|
||||
text):
|
||||
"""
|
||||
Utility function for adding a text tag to an XML object
|
||||
:param parent: Parent element
|
||||
:param name: Element name
|
||||
:param text: Text value
|
||||
:return:The created element
|
||||
"""
|
||||
tag = ElementTree.SubElement(parent, name)
|
||||
tag.text = text
|
||||
return tag
|
||||
|
||||
|
||||
def write_xml_file(top,
|
||||
fname):
|
||||
# Generate the file, in a readable format if possible
|
||||
outfile = open(fname, 'w')
|
||||
rough_xml = ElementTree.tostring(top)
|
||||
if platform.python_version() == "2.7.2":
|
||||
# The 2.7.2 toprettyxml() function unnecessarily indents
|
||||
# childless tags, adding whitespace. In the case of the
|
||||
# yum comps.xml file, it makes the file unusable, so just
|
||||
# write the rough xml
|
||||
outfile.write(rough_xml)
|
||||
else:
|
||||
outfile.write(minidom.parseString(rough_xml).toprettyxml(indent=" "))
|
||||
|
||||
|
||||
def get_release_from_patch(patchfile):
|
||||
rel = ""
|
||||
try:
|
||||
cmd = "tar xf %s -O metadata.tar | tar x -O" % patchfile
|
||||
metadata_str = subprocess.check_output(cmd, shell=True)
|
||||
root = ElementTree.fromstring(metadata_str)
|
||||
# Extract release version
|
||||
rel = root.findtext('sw_version')
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.error("Failed to run tar command")
|
||||
LOG.error("Command output: %s", e.output)
|
||||
raise e
|
||||
except Exception as e:
|
||||
print("Failed to parse patch software version")
|
||||
raise e
|
||||
return rel
|
||||
|
||||
|
||||
class BasePackageData(object):
|
||||
"""
|
||||
Information about the base package data provided by the load
|
||||
"""
|
||||
def __init__(self):
|
||||
self.pkgs = {}
|
||||
self.loaddirs()
|
||||
|
||||
def loaddirs(self):
|
||||
# Load up available package info
|
||||
base_dir = constants.FEED_OSTREE_BASE_DIR
|
||||
if not os.path.exists(base_dir):
|
||||
# Return, since this could be running off-box
|
||||
return
|
||||
|
||||
# Look for release dirs
|
||||
for reldir in glob.glob("%s/rel-*" % base_dir):
|
||||
pattern = re.compile("%s/rel-(.*)" % base_dir)
|
||||
m = pattern.match(reldir)
|
||||
sw_rel = m.group(1)
|
||||
|
||||
if sw_rel in self.pkgs:
|
||||
# We've already parsed this dir once
|
||||
continue
|
||||
|
||||
self.pkgs[sw_rel] = {}
|
||||
|
||||
# Clean up deleted data
|
||||
for sw_rel in self.pkgs:
|
||||
if not os.path.exists("%s/rel-%s" % (base_dir, sw_rel)):
|
||||
del self.pkgs[sw_rel]
|
||||
|
||||
def check_release(self, sw_rel):
|
||||
return (sw_rel in self.pkgs)
|
||||
|
||||
def find_version(self, sw_rel, pkgname, arch):
|
||||
if sw_rel not in self.pkgs or \
|
||||
pkgname not in self.pkgs[sw_rel] or \
|
||||
arch not in self.pkgs[sw_rel][pkgname]:
|
||||
return None
|
||||
|
||||
return self.pkgs[sw_rel][pkgname][arch]
|
||||
|
||||
|
||||
class PatchData(object):
|
||||
"""
|
||||
Aggregated patch data
|
||||
"""
|
||||
def __init__(self):
|
||||
#
|
||||
# The metadata dict stores all metadata associated with a patch.
|
||||
# This dict is keyed on patch_id, with metadata for each patch stored
|
||||
# in a nested dict. (See parse_metadata method for more info)
|
||||
#
|
||||
self.metadata = {}
|
||||
|
||||
#
|
||||
# The contents dict stores the lists of RPMs provided by each patch,
|
||||
# indexed by patch_id.
|
||||
#
|
||||
self.contents = {}
|
||||
|
||||
def add_patch(self, new_patch):
|
||||
# We can just use "update" on these dicts because they are indexed by patch_id
|
||||
self.metadata.update(new_patch.metadata)
|
||||
self.contents.update(new_patch.contents)
|
||||
|
||||
def update_patch(self, updated_patch):
|
||||
for patch_id in list(updated_patch.metadata):
|
||||
# Update all fields except repostate
|
||||
cur_repostate = self.metadata[patch_id]['repostate']
|
||||
self.metadata[patch_id].update(updated_patch.metadata[patch_id])
|
||||
self.metadata[patch_id]['repostate'] = cur_repostate
|
||||
|
||||
def delete_patch(self, patch_id):
|
||||
del self.contents[patch_id]
|
||||
del self.metadata[patch_id]
|
||||
|
||||
@staticmethod
|
||||
def modify_metadata_text(filename,
|
||||
key,
|
||||
value):
|
||||
"""
|
||||
Open an xml file, find first element matching 'key' and replace the text with 'value'
|
||||
"""
|
||||
new_filename = "%s.new" % filename
|
||||
tree = ElementTree.parse(filename)
|
||||
|
||||
# Prevent a proliferation of carriage returns when we write this XML back out to file.
|
||||
for e in tree.getiterator():
|
||||
if e.text is not None:
|
||||
e.text = e.text.rstrip()
|
||||
if e.tail is not None:
|
||||
e.tail = e.tail.rstrip()
|
||||
|
||||
root = tree.getroot()
|
||||
|
||||
# Make the substitution
|
||||
e = root.find(key)
|
||||
if e is None:
|
||||
msg = "modify_metadata_text: failed to find tag '%s'" % key
|
||||
LOG.error(msg)
|
||||
raise PatchValidationFailure(msg)
|
||||
e.text = value
|
||||
|
||||
# write the modified file
|
||||
outfile = open(new_filename, 'w')
|
||||
rough_xml = ElementTree.tostring(root)
|
||||
if platform.python_version() == "2.7.2":
|
||||
# The 2.7.2 toprettyxml() function unnecessarily indents
|
||||
# childless tags, adding whitespace. In the case of the
|
||||
# yum comps.xml file, it makes the file unusable, so just
|
||||
# write the rough xml
|
||||
outfile.write(rough_xml)
|
||||
else:
|
||||
outfile.write(minidom.parseString(rough_xml).toprettyxml(indent=" "))
|
||||
outfile.close()
|
||||
os.rename(new_filename, filename)
|
||||
|
||||
def parse_metadata(self,
|
||||
filename,
|
||||
repostate=None):
|
||||
"""
|
||||
Parse an individual patch metadata XML file
|
||||
:param filename: XML file
|
||||
:param repostate: Indicates Applied, Available, or Committed
|
||||
:return: Patch ID
|
||||
"""
|
||||
tree = ElementTree.parse(filename)
|
||||
root = tree.getroot()
|
||||
|
||||
#
|
||||
# <patch>
|
||||
# <id>PATCH_0001</id>
|
||||
# <summary>Brief description</summary>
|
||||
# <description>Longer description</description>
|
||||
# <install_instructions/>
|
||||
# <warnings/>
|
||||
# <status>Dev</status>
|
||||
# <unremovable/>
|
||||
# <reboot_required/>
|
||||
# </patch>
|
||||
#
|
||||
|
||||
patch_id = root.findtext("id")
|
||||
if patch_id is None:
|
||||
LOG.error("Patch metadata contains no id tag")
|
||||
return None
|
||||
|
||||
self.metadata[patch_id] = {}
|
||||
|
||||
self.metadata[patch_id]["repostate"] = repostate
|
||||
|
||||
# Patch state is unknown at this point
|
||||
self.metadata[patch_id]["patchstate"] = "n/a"
|
||||
|
||||
self.metadata[patch_id]["sw_version"] = "unknown"
|
||||
|
||||
for key in ["status",
|
||||
"unremovable",
|
||||
"sw_version",
|
||||
"summary",
|
||||
"description",
|
||||
"install_instructions",
|
||||
"restart_script",
|
||||
"warnings",
|
||||
"apply_active_release_only"]:
|
||||
value = root.findtext(key)
|
||||
if value is not None:
|
||||
self.metadata[patch_id][key] = value
|
||||
|
||||
# Default reboot_required to Y
|
||||
rr_value = root.findtext("reboot_required")
|
||||
if rr_value is None or rr_value != "N":
|
||||
self.metadata[patch_id]["reboot_required"] = "Y"
|
||||
else:
|
||||
self.metadata[patch_id]["reboot_required"] = "N"
|
||||
|
||||
patch_sw_version = self.metadata[patch_id]["sw_version"]
|
||||
global package_dir
|
||||
if patch_sw_version not in package_dir:
|
||||
package_dir[patch_sw_version] = "%s/%s" % (root_package_dir, patch_sw_version)
|
||||
repo_dir[patch_sw_version] = "%s/rel-%s" % (repo_root_dir, patch_sw_version)
|
||||
|
||||
self.metadata[patch_id]["requires"] = []
|
||||
for req in root.findall("requires"):
|
||||
for req_patch in req.findall("req_patch_id"):
|
||||
self.metadata[patch_id]["requires"].append(req_patch.text)
|
||||
|
||||
self.contents[patch_id] = {}
|
||||
|
||||
for content in root.findall("contents/ostree"):
|
||||
self.contents[patch_id]["number_of_commits"] = content.findall("number_of_commits")[0].text
|
||||
self.contents[patch_id]["base"] = {}
|
||||
self.contents[patch_id]["base"]["commit"] = content.findall("base/commit")[0].text
|
||||
self.contents[patch_id]["base"]["checksum"] = content.findall("base/checksum")[0].text
|
||||
for i in range(int(self.contents[patch_id]["number_of_commits"])):
|
||||
self.contents[patch_id]["commit%s" % (i + 1)] = {}
|
||||
self.contents[patch_id]["commit%s" % (i + 1)]["commit"] = \
|
||||
content.findall("commit%s/commit" % (i + 1))[0].text
|
||||
self.contents[patch_id]["commit%s" % (i + 1)]["checksum"] = \
|
||||
content.findall("commit%s/checksum" % (i + 1))[0].text
|
||||
|
||||
return patch_id
|
||||
|
||||
def load_all_metadata(self,
|
||||
loaddir,
|
||||
repostate=None):
|
||||
"""
|
||||
Parse all metadata files in the specified dir
|
||||
:return:
|
||||
"""
|
||||
for fname in glob.glob("%s/*.xml" % loaddir):
|
||||
self.parse_metadata(fname, repostate)
|
||||
|
||||
def load_all(self):
|
||||
# Reset the data
|
||||
self.__init__()
|
||||
self.load_all_metadata(applied_dir, repostate=constants.APPLIED)
|
||||
self.load_all_metadata(avail_dir, repostate=constants.AVAILABLE)
|
||||
self.load_all_metadata(committed_dir, repostate=constants.COMMITTED)
|
||||
|
||||
def query_line(self,
|
||||
patch_id,
|
||||
index):
|
||||
if index is None:
|
||||
return None
|
||||
|
||||
if index == "contents":
|
||||
return self.contents[patch_id]
|
||||
|
||||
if index not in self.metadata[patch_id]:
|
||||
return None
|
||||
|
||||
value = self.metadata[patch_id][index]
|
||||
return value
|
||||
|
||||
|
||||
class PatchMetadata(object):
|
||||
"""
|
||||
Creating metadata for a single patch
|
||||
"""
|
||||
def __init__(self):
|
||||
self.id = None
|
||||
self.sw_version = None
|
||||
self.summary = None
|
||||
self.description = None
|
||||
self.install_instructions = None
|
||||
self.warnings = None
|
||||
self.status = None
|
||||
self.unremovable = None
|
||||
self.reboot_required = None
|
||||
self.apply_active_release_only = None
|
||||
self.requires = []
|
||||
self.contents = {}
|
||||
|
||||
def add_rpm(self,
|
||||
fname):
|
||||
"""
|
||||
Add an RPM to the patch
|
||||
:param fname: RPM filename
|
||||
:return:
|
||||
"""
|
||||
rpmname = os.path.basename(fname)
|
||||
self.contents[rpmname] = True
|
||||
|
||||
def gen_xml(self,
|
||||
fname="metadata.xml"):
|
||||
"""
|
||||
Generate patch metadata XML file
|
||||
:param fname: Path to output file
|
||||
:return:
|
||||
"""
|
||||
top = ElementTree.Element('patch')
|
||||
|
||||
add_text_tag_to_xml(top, 'id',
|
||||
self.id)
|
||||
add_text_tag_to_xml(top, 'sw_version',
|
||||
self.sw_version)
|
||||
add_text_tag_to_xml(top, 'summary',
|
||||
self.summary)
|
||||
add_text_tag_to_xml(top, 'description',
|
||||
self.description)
|
||||
add_text_tag_to_xml(top, 'install_instructions',
|
||||
self.install_instructions)
|
||||
add_text_tag_to_xml(top, 'warnings',
|
||||
self.warnings)
|
||||
add_text_tag_to_xml(top, 'status',
|
||||
self.status)
|
||||
add_text_tag_to_xml(top, 'unremovable',
|
||||
self.unremovable)
|
||||
add_text_tag_to_xml(top, 'reboot_required',
|
||||
self.reboot_required)
|
||||
add_text_tag_to_xml(top, 'apply_active_release_only',
|
||||
self.apply_active_release_only)
|
||||
|
||||
content = ElementTree.SubElement(top, 'contents')
|
||||
for rpmname in sorted(list(self.contents)):
|
||||
add_text_tag_to_xml(content, 'rpm', rpmname)
|
||||
|
||||
req = ElementTree.SubElement(top, 'requires')
|
||||
for req_patch in sorted(self.requires):
|
||||
add_text_tag_to_xml(req, 'req_patch_id', req_patch)
|
||||
|
||||
write_xml_file(top, fname)
|
||||
|
||||
|
||||
class PatchFile(object):
|
||||
"""
|
||||
Patch file
|
||||
"""
|
||||
def __init__(self):
|
||||
self.meta = PatchMetadata()
|
||||
self.rpmlist = {}
|
||||
|
||||
def add_rpm(self,
|
||||
fname):
|
||||
"""
|
||||
Add an RPM to the patch
|
||||
:param fname: Path to RPM
|
||||
:param personality: Optional: Node type to which
|
||||
the package belongs. Can be a
|
||||
string or a list of strings.
|
||||
:return:
|
||||
"""
|
||||
# Add the RPM to the metadata
|
||||
self.meta.add_rpm(fname)
|
||||
|
||||
# Add the RPM to the patch
|
||||
self.rpmlist[os.path.abspath(fname)] = True
|
||||
|
||||
def gen_patch(self, outdir):
|
||||
"""
|
||||
Generate the patch file, named PATCHID.patch
|
||||
:param outdir: Output directory for the patch
|
||||
:return:
|
||||
"""
|
||||
if not self.rpmlist:
|
||||
raise MetadataFail("Cannot generate empty patch")
|
||||
|
||||
patchfile = "%s/%s.patch" % (outdir, self.meta.id)
|
||||
|
||||
# Create a temporary working directory
|
||||
tmpdir = tempfile.mkdtemp(prefix="software_")
|
||||
|
||||
# Save the current directory, so we can chdir back after
|
||||
orig_wd = os.getcwd()
|
||||
|
||||
# Change to the tmpdir
|
||||
os.chdir(tmpdir)
|
||||
|
||||
# Copy RPM files to tmpdir
|
||||
for rpmfile in list(self.rpmlist):
|
||||
shutil.copy(rpmfile, tmpdir)
|
||||
|
||||
# add file signatures to RPMs
|
||||
try:
|
||||
subprocess.check_call(["sign-rpms", "-d", tmpdir])
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("Failed to to add file signatures to RPMs. Call to sign-rpms process returned non-zero exit status %i" % e.returncode)
|
||||
os.chdir(orig_wd)
|
||||
shutil.rmtree(tmpdir)
|
||||
raise SystemExit(e.returncode)
|
||||
|
||||
# generate tar file
|
||||
tar = tarfile.open("software.tar", "w")
|
||||
for rpmfile in list(self.rpmlist):
|
||||
tar.add(os.path.basename(rpmfile))
|
||||
tar.close()
|
||||
|
||||
# Generate the metadata xml file
|
||||
self.meta.gen_xml("metadata.xml")
|
||||
|
||||
# assemble the patch
|
||||
PatchFile.write_patch(patchfile)
|
||||
|
||||
# Change back to original working dir
|
||||
os.chdir(orig_wd)
|
||||
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
print("Patch is %s" % patchfile)
|
||||
|
||||
@staticmethod
|
||||
def write_patch(patchfile, cert_type=None):
|
||||
# Write the patch file. Assumes we are in a directory containing
|
||||
# metadata.tar and software.tar.
|
||||
|
||||
# Generate the metadata tarfile
|
||||
tar = tarfile.open("metadata.tar", "w")
|
||||
tar.add("metadata.xml")
|
||||
tar.close()
|
||||
|
||||
filelist = ["metadata.tar", "software.tar"]
|
||||
if os.path.exists("semantics.tar"):
|
||||
filelist.append("semantics.tar")
|
||||
|
||||
# Generate the signature file
|
||||
sig = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
|
||||
for f in filelist:
|
||||
sig ^= get_md5(f)
|
||||
|
||||
sigfile = open("signature", "w")
|
||||
sigfile.write("%x" % sig)
|
||||
sigfile.close()
|
||||
|
||||
# Generate the detached signature
|
||||
#
|
||||
# Note: if cert_type requests a formal signature, but the signing key
|
||||
# is not found, we'll instead sign with the 'dev' key and
|
||||
# need_resign_with_formal is set to True.
|
||||
need_resign_with_formal = sign_files(
|
||||
filelist,
|
||||
detached_signature_file,
|
||||
cert_type=cert_type)
|
||||
|
||||
# Create the patch
|
||||
tar = tarfile.open(patchfile, "w:gz")
|
||||
for f in filelist:
|
||||
tar.add(f)
|
||||
tar.add("signature")
|
||||
tar.add(detached_signature_file)
|
||||
tar.close()
|
||||
|
||||
if need_resign_with_formal:
|
||||
try:
|
||||
# Try to ensure "sign_patch_formal.sh" will be in our PATH
|
||||
if 'MY_REPO' in os.environ:
|
||||
os.environ['PATH'] += os.pathsep + os.environ['MY_REPO'] + "/build-tools"
|
||||
if 'MY_PATCH_REPO' in os.environ:
|
||||
os.environ['PATH'] += os.pathsep + os.environ['MY_PATCH_REPO'] + "/build-tools"
|
||||
|
||||
# Note: This can fail if the user is not authorized to sign with the formal key.
|
||||
subprocess.check_call(["sign_patch_formal.sh", patchfile])
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("Failed to sign official patch. Call to sign_patch_formal.sh process returned non-zero exit status %i" % e.returncode)
|
||||
raise SystemExit(e.returncode)
|
||||
|
||||
@staticmethod
|
||||
def read_patch(path, cert_type=None):
|
||||
# We want to enable signature checking by default
|
||||
# Note: cert_type=None is required if we are to enforce 'no dev patches on a formal load' rule.
|
||||
|
||||
# Open the patch file and extract the contents to the current dir
|
||||
tar = tarfile.open(path, "r:gz")
|
||||
|
||||
filelist = []
|
||||
for f in tar.getmembers():
|
||||
filelist.append(f.name)
|
||||
|
||||
if detached_signature_file not in filelist:
|
||||
msg = "Patch not signed"
|
||||
LOG.warning(msg)
|
||||
|
||||
for f in filelist:
|
||||
tar.extract(f)
|
||||
|
||||
# Filelist used for signature validation and verification
|
||||
sig_filelist = ["metadata.tar", "software.tar"]
|
||||
if "semantics.tar" in filelist:
|
||||
sig_filelist.append("semantics.tar")
|
||||
|
||||
# Verify the data integrity signature first
|
||||
sigfile = open("signature", "r")
|
||||
sig = int(sigfile.read(), 16)
|
||||
sigfile.close()
|
||||
|
||||
expected_sig = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
|
||||
for f in sig_filelist:
|
||||
sig ^= get_md5(f)
|
||||
|
||||
if sig != expected_sig:
|
||||
msg = "Patch failed verification"
|
||||
LOG.error(msg)
|
||||
raise PatchValidationFailure(msg)
|
||||
|
||||
# Verify detached signature
|
||||
if os.path.exists(detached_signature_file):
|
||||
sig_valid = verify_files(
|
||||
sig_filelist,
|
||||
detached_signature_file,
|
||||
cert_type=cert_type)
|
||||
if sig_valid is True:
|
||||
msg = "Signature verified, patch has been signed"
|
||||
if cert_type is None:
|
||||
LOG.info(msg)
|
||||
else:
|
||||
msg = "Signature check failed"
|
||||
if cert_type is None:
|
||||
LOG.error(msg)
|
||||
raise PatchValidationFailure(msg)
|
||||
else:
|
||||
msg = "Patch has not been signed"
|
||||
if cert_type is None:
|
||||
LOG.error(msg)
|
||||
raise PatchValidationFailure(msg)
|
||||
|
||||
tar = tarfile.open("metadata.tar")
|
||||
tar.extractall()
|
||||
|
||||
@staticmethod
|
||||
def query_patch(patch, field=None):
|
||||
|
||||
abs_patch = os.path.abspath(patch)
|
||||
|
||||
# Create a temporary working directory
|
||||
tmpdir = tempfile.mkdtemp(prefix="patch_")
|
||||
|
||||
# Save the current directory, so we can chdir back after
|
||||
orig_wd = os.getcwd()
|
||||
|
||||
# Change to the tmpdir
|
||||
os.chdir(tmpdir)
|
||||
|
||||
r = {}
|
||||
|
||||
try:
|
||||
if field is None or field == "cert":
|
||||
# Need to determine the cert_type
|
||||
for cert_type_str in cert_type_all:
|
||||
try:
|
||||
PatchFile.read_patch(abs_patch, cert_type=[cert_type_str])
|
||||
except PatchValidationFailure:
|
||||
pass
|
||||
else:
|
||||
# Successfully opened the file for reading, and we have discovered the cert_type
|
||||
r["cert"] = cert_type_str
|
||||
break
|
||||
|
||||
if "cert" not in r:
|
||||
# If cert is unknown, then file is not yet open for reading.
|
||||
# Try to open it for reading now, using all available keys.
|
||||
# We can't omit cert_type, or pass None, because that will trigger the code
|
||||
# path used by installed product, in which dev keys are not accepted unless
|
||||
# a magic file exists.
|
||||
PatchFile.read_patch(abs_patch, cert_type=cert_type_all)
|
||||
|
||||
thispatch = PatchData()
|
||||
patch_id = thispatch.parse_metadata("metadata.xml")
|
||||
|
||||
if field is None or field == "id":
|
||||
r["id"] = patch_id
|
||||
|
||||
if field is None:
|
||||
for f in ["status", "sw_version", "unremovable", "summary",
|
||||
"description", "install_instructions",
|
||||
"warnings", "reboot_required", "apply_active_release_only"]:
|
||||
r[f] = thispatch.query_line(patch_id, f)
|
||||
else:
|
||||
if field not in ['id', 'cert']:
|
||||
r[field] = thispatch.query_line(patch_id, field)
|
||||
|
||||
except PatchValidationFailure as e:
|
||||
msg = "Patch validation failed during extraction"
|
||||
LOG.exception(msg)
|
||||
raise e
|
||||
except PatchMismatchFailure as e:
|
||||
msg = "Patch Mismatch during extraction"
|
||||
LOG.exception(msg)
|
||||
raise e
|
||||
except tarfile.TarError:
|
||||
msg = "Failed during patch extraction"
|
||||
LOG.exception(msg)
|
||||
raise PatchValidationFailure(msg)
|
||||
finally:
|
||||
# Change back to original working dir
|
||||
os.chdir(orig_wd)
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
return r
|
||||
|
||||
@staticmethod
|
||||
def modify_patch(patch,
|
||||
key,
|
||||
value):
|
||||
rc = False
|
||||
abs_patch = os.path.abspath(patch)
|
||||
new_abs_patch = "%s.new" % abs_patch
|
||||
|
||||
# Create a temporary working directory
|
||||
tmpdir = tempfile.mkdtemp(prefix="patch_")
|
||||
|
||||
# Save the current directory, so we can chdir back after
|
||||
orig_wd = os.getcwd()
|
||||
|
||||
# Change to the tmpdir
|
||||
os.chdir(tmpdir)
|
||||
|
||||
try:
|
||||
cert_type = None
|
||||
meta_data = PatchFile.query_patch(abs_patch)
|
||||
if 'cert' in meta_data:
|
||||
cert_type = meta_data['cert']
|
||||
PatchFile.read_patch(abs_patch, cert_type=cert_type)
|
||||
PatchData.modify_metadata_text("metadata.xml", key, value)
|
||||
PatchFile.write_patch(new_abs_patch, cert_type=cert_type)
|
||||
os.rename(new_abs_patch, abs_patch)
|
||||
rc = True
|
||||
|
||||
except PatchValidationFailure as e:
|
||||
raise e
|
||||
except PatchMismatchFailure as e:
|
||||
raise e
|
||||
except tarfile.TarError:
|
||||
msg = "Failed during patch extraction"
|
||||
LOG.exception(msg)
|
||||
raise PatchValidationFailure(msg)
|
||||
except Exception as e:
|
||||
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
|
||||
message = template.format(type(e).__name__, e.args)
|
||||
print(message)
|
||||
finally:
|
||||
# Change back to original working dir
|
||||
os.chdir(orig_wd)
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
return rc
|
||||
|
||||
@staticmethod
|
||||
def extract_patch(patch,
|
||||
metadata_dir=avail_dir,
|
||||
metadata_only=False,
|
||||
existing_content=None,
|
||||
base_pkgdata=None):
|
||||
"""
|
||||
Extract the metadata and patch contents
|
||||
:param patch: Patch file
|
||||
:param metadata_dir: Directory to store the metadata XML file
|
||||
:return:
|
||||
"""
|
||||
thispatch = None
|
||||
|
||||
abs_patch = os.path.abspath(patch)
|
||||
abs_metadata_dir = os.path.abspath(metadata_dir)
|
||||
# Create a temporary working directory
|
||||
tmpdir = tempfile.mkdtemp(prefix="patch_")
|
||||
|
||||
# Save the current directory, so we can chdir back after
|
||||
orig_wd = os.getcwd()
|
||||
|
||||
# Change to the tmpdir
|
||||
os.chdir(tmpdir)
|
||||
|
||||
try:
|
||||
# Open the patch file and extract the contents to the tmpdir
|
||||
PatchFile.read_patch(abs_patch)
|
||||
|
||||
thispatch = PatchData()
|
||||
patch_id = thispatch.parse_metadata("metadata.xml")
|
||||
|
||||
if patch_id is None:
|
||||
print("Failed to import patch")
|
||||
# Change back to original working dir
|
||||
os.chdir(orig_wd)
|
||||
shutil.rmtree(tmpdir)
|
||||
return None
|
||||
|
||||
if not metadata_only and base_pkgdata is not None:
|
||||
# Run version validation tests first
|
||||
patch_sw_version = thispatch.metadata[patch_id]["sw_version"]
|
||||
if not base_pkgdata.check_release(patch_sw_version):
|
||||
msg = "Patch %s software release (%s) is not installed" % (patch_id, patch_sw_version)
|
||||
LOG.exception(msg)
|
||||
raise PatchValidationFailure(msg)
|
||||
|
||||
if metadata_only:
|
||||
# This is a re-import. Ensure the content lines up
|
||||
if existing_content is None \
|
||||
or existing_content != thispatch.contents[patch_id]:
|
||||
msg = "Contents of re-imported patch do not match"
|
||||
LOG.exception(msg)
|
||||
raise PatchMismatchFailure(msg)
|
||||
|
||||
patch_sw_version = thispatch.metadata[patch_id]["sw_version"]
|
||||
abs_ostree_tar_dir = package_dir[patch_sw_version]
|
||||
if not os.path.exists(abs_ostree_tar_dir):
|
||||
os.makedirs(abs_ostree_tar_dir)
|
||||
|
||||
shutil.move("metadata.xml",
|
||||
"%s/%s-metadata.xml" % (abs_metadata_dir, patch_id))
|
||||
shutil.move("software.tar",
|
||||
"%s/%s-software.tar" % (abs_ostree_tar_dir, patch_id))
|
||||
|
||||
# restart_script may not exist in metadata.
|
||||
if thispatch.metadata[patch_id].get("restart_script"):
|
||||
if not os.path.exists(root_scripts_dir):
|
||||
os.makedirs(root_scripts_dir)
|
||||
restart_script_name = thispatch.metadata[patch_id]["restart_script"]
|
||||
shutil.move(restart_script_name,
|
||||
"%s/%s" % (root_scripts_dir, restart_script_name))
|
||||
|
||||
except PatchValidationFailure as e:
|
||||
raise e
|
||||
except PatchMismatchFailure as e:
|
||||
raise e
|
||||
except tarfile.TarError:
|
||||
msg = "Failed during patch extraction"
|
||||
LOG.exception(msg)
|
||||
raise PatchValidationFailure(msg)
|
||||
except KeyError:
|
||||
msg = "Failed during patch extraction"
|
||||
LOG.exception(msg)
|
||||
raise PatchValidationFailure(msg)
|
||||
except OSError:
|
||||
msg = "Failed during patch extraction"
|
||||
LOG.exception(msg)
|
||||
raise PatchFail(msg)
|
||||
except IOError: # pylint: disable=duplicate-except
|
||||
msg = "Failed during patch extraction"
|
||||
LOG.exception(msg)
|
||||
raise PatchFail(msg)
|
||||
finally:
|
||||
# Change back to original working dir
|
||||
os.chdir(orig_wd)
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
return thispatch
|
||||
|
||||
|
||||
def patch_build():
|
||||
configure_logging(logtofile=False)
|
||||
|
||||
try:
|
||||
opts, remainder = getopt.getopt(sys.argv[1:],
|
||||
'',
|
||||
['id=',
|
||||
'release=',
|
||||
'summary=',
|
||||
'status=',
|
||||
'unremovable',
|
||||
'reboot-required=',
|
||||
'desc=',
|
||||
'warn=',
|
||||
'inst=',
|
||||
'req=',
|
||||
'controller=',
|
||||
'controller-worker=',
|
||||
'controller-worker-lowlatency=',
|
||||
'worker=',
|
||||
'worker-lowlatency=',
|
||||
'storage=',
|
||||
'all-nodes=',
|
||||
'pre-apply=',
|
||||
'pre-remove=',
|
||||
'apply-active-release-only'])
|
||||
except getopt.GetoptError:
|
||||
print("Usage: %s [ <args> ] ... <rpm list>"
|
||||
% os.path.basename(sys.argv[0]))
|
||||
print("Options:")
|
||||
print("\t--id <id> Patch ID")
|
||||
print("\t--release <version> Platform release version")
|
||||
print("\t--status <status> Patch Status Code (ie. O, R, V)")
|
||||
print("\t--unremovable Marks patch as unremovable")
|
||||
print("\t--reboot-required <Y|N> Marks patch as reboot-required (default=Y)")
|
||||
print("\t--summary <summary> Patch Summary")
|
||||
print("\t--desc <description> Patch Description")
|
||||
print("\t--warn <warnings> Patch Warnings")
|
||||
print("\t--inst <instructions> Patch Install Instructions")
|
||||
print("\t--req <patch_id> Required Patch")
|
||||
print("\t--controller <rpm> New package for controller")
|
||||
print("\t--worker <rpm> New package for worker node")
|
||||
print("\t--worker-lowlatency <rpm> New package for worker-lowlatency node")
|
||||
print("\t--storage <rpm> New package for storage node")
|
||||
print("\t--controller-worker <rpm> New package for combined node")
|
||||
print("\t--controller-worker-lowlatency <rpm> New package for lowlatency combined node")
|
||||
print("\t--all-nodes <rpm> New package for all node types")
|
||||
print("\t--pre-apply <script> Add pre-apply semantic check")
|
||||
print("\t--pre-remove <script> Add pre-remove semantic check")
|
||||
print("\t--apply-active-release-only Patch can only be applied if corresponding")
|
||||
print("\t release is active")
|
||||
|
||||
exit(1)
|
||||
|
||||
pf = PatchFile()
|
||||
|
||||
# Default the release
|
||||
pf.meta.sw_version = os.environ['PLATFORM_RELEASE']
|
||||
|
||||
for opt, arg in opts:
|
||||
if opt == "--id":
|
||||
pf.meta.id = arg
|
||||
elif opt == "--release":
|
||||
pf.meta.sw_version = arg
|
||||
elif opt == "--summary":
|
||||
pf.meta.summary = arg
|
||||
elif opt == "--status":
|
||||
pf.meta.status = arg
|
||||
elif opt == "--unremovable":
|
||||
pf.meta.unremovable = "Y"
|
||||
elif opt == "--reboot-required":
|
||||
if arg != "Y" and arg != "N":
|
||||
print("The --reboot-required option requires either Y or N as argument.")
|
||||
exit(1)
|
||||
pf.meta.reboot_required = arg
|
||||
elif opt == "--desc":
|
||||
pf.meta.description = arg
|
||||
elif opt == "--warn":
|
||||
pf.meta.warnings = arg
|
||||
elif opt == "--inst":
|
||||
pf.meta.install_instructions = arg
|
||||
elif opt == "--req":
|
||||
pf.meta.requires.append(arg)
|
||||
elif opt == "--apply-active-release-only":
|
||||
pf.meta.apply_active_release_only = "Y"
|
||||
|
||||
if pf.meta.id is None:
|
||||
print("The --id argument is mandatory.")
|
||||
exit(1)
|
||||
|
||||
for rpmfile in remainder:
|
||||
pf.add_rpm(rpmfile)
|
||||
|
||||
pf.gen_patch(outdir=os.getcwd())
|
95
software/software/templates/query.xml
Normal file
95
software/software/templates/query.xml
Normal file
@ -0,0 +1,95 @@
|
||||
% if not pd is UNDEFINED:
|
||||
<pd>
|
||||
% if len(pd) > 0:
|
||||
% for patch_id in sorted(pd.keys()):
|
||||
${patchelem(patch_id)}
|
||||
% endfor
|
||||
% endif
|
||||
</pd>
|
||||
% endif
|
||||
% if not info is UNDEFINED or not warning is UNDEFINED or not error is UNDEFINED:
|
||||
<info>
|
||||
% if not info is UNDEFINED and len(info) > 0:
|
||||
${info}
|
||||
% endif
|
||||
</info>
|
||||
<warning>
|
||||
% if not warning is UNDEFINED and len(warning) > 0:
|
||||
${warning}
|
||||
% endif
|
||||
</warning>
|
||||
<error>
|
||||
% if not error is UNDEFINED and len(error) > 0:
|
||||
${error}
|
||||
% endif
|
||||
</error>
|
||||
% endif
|
||||
<%def name="patchelem(patch_id)">\
|
||||
<%p = pd[patch_id] %>\
|
||||
<patch>
|
||||
<patch_id>
|
||||
${patch_id}
|
||||
</patch_id>
|
||||
<status>
|
||||
% if p["status"] != "":
|
||||
${p["status"]}
|
||||
% endif
|
||||
</status>
|
||||
<sw_version>
|
||||
% if p["sw_version"] != "":
|
||||
${p["sw_version"]}
|
||||
% endif
|
||||
</sw_version>
|
||||
<repostate>
|
||||
% if p["repostate"] != "":
|
||||
${p["repostate"]}
|
||||
% endif
|
||||
</repostate>
|
||||
<patchstate>
|
||||
% if p["patchstate"] != "":
|
||||
${p["patchstate"]}
|
||||
% endif
|
||||
</patchstate>
|
||||
<status>
|
||||
% if p["status"] != "":
|
||||
${p["status"]}
|
||||
% endif
|
||||
</status>
|
||||
<unremovable>
|
||||
% if p["unremovable"] != "":
|
||||
${p["unremovable"]}
|
||||
% endif
|
||||
</unremovable>
|
||||
<reboot_required>
|
||||
% if p["reboot_required"] != "":
|
||||
${p["reboot_required"]}
|
||||
% endif
|
||||
</reboot_required>
|
||||
<summary>
|
||||
% if p["summary"] != "":
|
||||
${p["summary"]}
|
||||
% endif
|
||||
</summary>
|
||||
<description>
|
||||
% if p["description"] != "":
|
||||
${p["description"]}
|
||||
% endif
|
||||
</description>
|
||||
<install_instructions>
|
||||
% if p["install_instructions"] != "":
|
||||
${p["install_instructions"]}
|
||||
% endif
|
||||
</install_instructions>
|
||||
<warnings>
|
||||
% if p["warnings"] != "":
|
||||
${p["warnings"]}
|
||||
% endif
|
||||
</warnings>
|
||||
<requires>
|
||||
% if "requires" in p and len(p["requires"]) > 0:
|
||||
% for req in sorted(p["requires"]):
|
||||
<patch>${req}</patch>
|
||||
% endfor
|
||||
% endif
|
||||
</requires>
|
||||
</patch></%def>
|
56
software/software/templates/query_hosts.xml
Normal file
56
software/software/templates/query_hosts.xml
Normal file
@ -0,0 +1,56 @@
|
||||
% if not data is UNDEFINED and len(data) > 0:
|
||||
<data>
|
||||
% for host in data:
|
||||
${hostelem(host)}
|
||||
% endfor
|
||||
</data>
|
||||
% endif
|
||||
<%def name="hostelem(host)">\
|
||||
<%h = host %>\
|
||||
<host>
|
||||
<hostname>
|
||||
% if h["hostname"] != "":
|
||||
${h["hostname"]}
|
||||
% endif
|
||||
</hostname>
|
||||
<requires_reboot>
|
||||
% if h["requires_reboot"] != "":
|
||||
${h["requires_reboot"]}
|
||||
% endif
|
||||
</requires_reboot>
|
||||
<nodetype>
|
||||
% if h["nodetype"] != "":
|
||||
${h["nodetype"]}
|
||||
% endif
|
||||
</nodetype>
|
||||
<ip>
|
||||
% if h["ip"] != "":
|
||||
${h["ip"]}
|
||||
% endif
|
||||
</ip>
|
||||
<latest_sysroot_commit>
|
||||
% if h["latest_sysroot_commit"] != "":
|
||||
${h["latest_sysroot_commit"]}
|
||||
% endif
|
||||
</latest_sysroot_commit>
|
||||
<secs_since_ack>
|
||||
% if h["secs_since_ack"] != "":
|
||||
${h["secs_since_ack"]}
|
||||
% endif
|
||||
</secs_since_ack>
|
||||
<patch_failed>
|
||||
% if h["patch_failed"] != "":
|
||||
${h["patch_failed"]}
|
||||
% endif
|
||||
</patch_failed>
|
||||
<stale_details>
|
||||
% if h["stale_details"] != "":
|
||||
${h["stale_details"]}
|
||||
% endif
|
||||
</stale_details>
|
||||
<patch_current>
|
||||
% if h["patch_current"] != "":
|
||||
${h["patch_current"]}
|
||||
% endif
|
||||
</patch_current>
|
||||
</host></%def>
|
92
software/software/templates/show.xml
Normal file
92
software/software/templates/show.xml
Normal file
@ -0,0 +1,92 @@
|
||||
<contents>
|
||||
% if not contents is UNDEFINED and len(contents) > 0:
|
||||
% for patch_id in sorted(contents.keys()):
|
||||
<patch id=${patch_id}>
|
||||
% for pkg in sorted(contents[patch_id]):
|
||||
<pkg>${pkg}</pkg>
|
||||
% endfor
|
||||
</patch>
|
||||
% endfor
|
||||
% endif
|
||||
</contents>
|
||||
<error>
|
||||
% if not error is UNDEFINED and len(error) > 0:
|
||||
${error}
|
||||
% endif
|
||||
</error>
|
||||
<metadata>
|
||||
% if not metadata is UNDEFINED and len(metadata) > 0:
|
||||
% for patch_id in sorted(metadata.keys()):
|
||||
${showpatch(patch_id)}
|
||||
% endfor
|
||||
% endif
|
||||
</metadata>
|
||||
<%def name="showpatch(patch_id)">\
|
||||
<% p = metadata[patch_id] %>\
|
||||
<patch>
|
||||
<patch_id>
|
||||
${patch_id}
|
||||
</patch_id>
|
||||
<status>
|
||||
% if p["status"] != "":
|
||||
${p["status"]}
|
||||
% endif
|
||||
</status>
|
||||
<unremovable>
|
||||
% if p["unremovable"] != "":
|
||||
${p["unremovable"]}
|
||||
% endif
|
||||
</unremovable>
|
||||
<reboot_required>
|
||||
% if p["reboot_required"] != "":
|
||||
${p["reboot_required"]}
|
||||
% endif
|
||||
</reboot_required>
|
||||
<sw_version>
|
||||
% if p["sw_version"] != "":
|
||||
${p["sw_version"]}
|
||||
% endif
|
||||
</sw_version>
|
||||
<repostate>
|
||||
% if p["repostate"] != "":
|
||||
${p["repostate"]}
|
||||
% endif
|
||||
</repostate>
|
||||
<patchstate>
|
||||
% if p["patchstate"] != "":
|
||||
${p["patchstate"]}
|
||||
% endif
|
||||
</patchstate>
|
||||
<status>
|
||||
% if p["status"] != "":
|
||||
${p["status"]}
|
||||
% endif
|
||||
</status>
|
||||
<summary>
|
||||
% if p["summary"] != "":
|
||||
${p["summary"]}
|
||||
% endif
|
||||
</summary>
|
||||
<description>
|
||||
% if p["description"] != "":
|
||||
${p["description"]}
|
||||
% endif
|
||||
</description>
|
||||
<install_instructions>
|
||||
% if p["install_instructions"] != "":
|
||||
${p["install_instructions"]}
|
||||
% endif
|
||||
</install_instructions>
|
||||
<warnings>
|
||||
% if p["warnings"] != "":
|
||||
${p["warnings"]}
|
||||
% endif
|
||||
</warnings>
|
||||
<requires>
|
||||
% if "requires" in p and len(p["requires"]) > 0:
|
||||
% for req in sorted(p["requires"]):
|
||||
<patch>${req}</patch>
|
||||
% endfor
|
||||
% endif
|
||||
</requires>
|
||||
</patch></%def>
|
115
software/software/utils.py
Normal file
115
software/software/utils.py
Normal file
@ -0,0 +1,115 @@
|
||||
"""
|
||||
Copyright (c) 2023 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
import logging
|
||||
from netaddr import IPAddress
|
||||
import os
|
||||
import socket
|
||||
from socket import if_nametoindex as if_nametoindex_func
|
||||
|
||||
import software.constants as constants
|
||||
|
||||
|
||||
LOG = logging.getLogger('main_logger')
|
||||
|
||||
|
||||
def if_nametoindex(name):
|
||||
try:
|
||||
return if_nametoindex_func(name)
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
|
||||
def gethostbyname(hostname):
|
||||
"""gethostbyname with IPv6 support """
|
||||
try:
|
||||
return socket.getaddrinfo(hostname, None)[0][4][0]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_management_version():
|
||||
"""Determine whether management is IPv4 or IPv6 """
|
||||
controller_ip_string = gethostbyname(constants.CONTROLLER_FLOATING_HOSTNAME)
|
||||
if controller_ip_string:
|
||||
controller_ip_address = IPAddress(controller_ip_string)
|
||||
return controller_ip_address.version
|
||||
else:
|
||||
return constants.ADDRESS_VERSION_IPV4
|
||||
|
||||
|
||||
def get_management_family():
|
||||
ip_version = get_management_version()
|
||||
if ip_version == constants.ADDRESS_VERSION_IPV6:
|
||||
return socket.AF_INET6
|
||||
else:
|
||||
return socket.AF_INET
|
||||
|
||||
|
||||
def get_versioned_address_all():
|
||||
ip_version = get_management_version()
|
||||
if ip_version == constants.ADDRESS_VERSION_IPV6:
|
||||
return "::"
|
||||
else:
|
||||
return "0.0.0.0"
|
||||
|
||||
|
||||
def ip_to_url(ip_address_string):
|
||||
"""Add brackets if an IPv6 address """
|
||||
try:
|
||||
ip_address = IPAddress(ip_address_string)
|
||||
if ip_address.version == constants.ADDRESS_VERSION_IPV6:
|
||||
return "[%s]" % ip_address_string
|
||||
else:
|
||||
return ip_address_string
|
||||
except Exception:
|
||||
return ip_address_string
|
||||
|
||||
|
||||
def ip_to_versioned_localhost(ip_address_string):
|
||||
"""Add brackets if an IPv6 address """
|
||||
ip_address = IPAddress(ip_address_string)
|
||||
if ip_address.version == constants.ADDRESS_VERSION_IPV6:
|
||||
return "::1"
|
||||
else:
|
||||
return "localhost"
|
||||
|
||||
|
||||
def read_cached_file(filename, cache_info, reload_func=None):
|
||||
"""Read from a file if it has been modified.
|
||||
|
||||
:param cache_info: dictionary to hold opaque cache.
|
||||
:param reload_func: optional function to be called with data when
|
||||
file is reloaded due to a modification.
|
||||
|
||||
:returns: data from file
|
||||
|
||||
"""
|
||||
mtime = os.path.getmtime(filename)
|
||||
if not cache_info or mtime != cache_info.get('mtime'):
|
||||
LOG.debug("Reloading cached file %s", filename)
|
||||
with open(filename) as fap:
|
||||
cache_info['data'] = fap.read()
|
||||
cache_info['mtime'] = mtime
|
||||
if reload_func:
|
||||
reload_func(cache_info['data'])
|
||||
return cache_info['data']
|
||||
|
||||
|
||||
def safe_rstrip(value, chars=None):
|
||||
"""Removes trailing characters from a string if that does not make it empty
|
||||
|
||||
:param value: A string value that will be stripped.
|
||||
:param chars: Characters to remove.
|
||||
:return: Stripped value.
|
||||
|
||||
"""
|
||||
if not isinstance(value, str):
|
||||
LOG.warning("Failed to remove trailing character. Returning original "
|
||||
"object. Supplied object is not a string: %s", value)
|
||||
return value
|
||||
|
||||
return value.rstrip(chars) or value
|
@ -8,6 +8,7 @@
|
||||
envlist = pep8,py39,pylint,bandit,cover
|
||||
minversion = 2.3.2
|
||||
skipsdist = True
|
||||
stxdir = {toxinidir}/../..
|
||||
|
||||
[testenv]
|
||||
allowlist_externals = find
|
||||
@ -15,6 +16,8 @@ allowlist_externals = find
|
||||
basepython = python3
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
-e{[tox]stxdir}/config/tsconfig/tsconfig
|
||||
|
||||
install_command = pip install \
|
||||
-c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/starlingx/root/raw/branch/master/build-tools/requirements/debian/upper-constraints.txt} \
|
||||
{opts} {packages}
|
||||
@ -58,9 +61,9 @@ commands =
|
||||
# H904 Delay string interpolations at logging calls (off by default)
|
||||
enable-extensions = H106 H203,H904
|
||||
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,release-tag-*
|
||||
ignore =
|
||||
max-line-length = 80
|
||||
show-source = True
|
||||
ignore = E402,H306,H404,H405,W504,E501
|
||||
|
||||
[testenv:flake8]
|
||||
commands = flake8 {posargs}
|
||||
|
Loading…
x
Reference in New Issue
Block a user