From be09ccc58463454cd8f42ec4b6da207801508ffd Mon Sep 17 00:00:00 2001 From: Al Bailey Date: Mon, 28 Mar 2022 15:57:41 +0000 Subject: [PATCH] Fork cgcs-patch package as sw-patch for Debian The original cgcs-patch is rpm based which requires a complete re-write to work on ostree/dpkg systems like Debian. The code has been forked, since the older Centos env and python2.7 are end-of-life. Forking the code allows all new development to not require re-testing on Centos. The debian folder under cgcs-patch has been moved under sw-patch Renaming and refactoring will be done in later commits. pylint is un-clamped in order to work on python3.9 Some minor pylint suppressions have been added. Test Plan: Verify that this builds on Debian Verify that the ISO installs the new content on Debian without breaking packages that import cgcs_patch. Verify patching service runs on Debian Co-Authored-By: Jessica Castelino Story: 2009101 Task: 43076 Signed-off-by: Al Bailey Change-Id: I3f1bca749404053bae63d4bcc9fb2477cf909fcd --- .zuul.yaml | 34 +- bindep.txt | 2 + debian_pkg_dirs | 2 +- patch-alarm/patch-alarm/test-requirements.txt | 5 +- patch-alarm/patch-alarm/tox.ini | 9 +- sw-patch/bin/make_patch | 15 + sw-patch/bin/modify_patch | 15 + sw-patch/bin/patch-functions | 52 + sw-patch/bin/patch-tmpdirs.conf | 2 + sw-patch/bin/patch_build | 16 + sw-patch/bin/patch_check_goenabled.sh | 27 + sw-patch/bin/patching.conf | 7 + sw-patch/bin/patching.logrotate | 15 + sw-patch/bin/pmon-sw-patch-agent.conf | 19 + .../bin/pmon-sw-patch-controller-daemon.conf | 19 + sw-patch/bin/policy.json | 5 + sw-patch/bin/query_patch | 15 + sw-patch/bin/rpm-audit | 183 + sw-patch/bin/run-patch-scripts | 60 + sw-patch/bin/setup_patch_repo | 182 + sw-patch/bin/sw-patch | 16 + sw-patch/bin/sw-patch-agent | 16 + sw-patch/bin/sw-patch-agent-init.sh | 94 + sw-patch/bin/sw-patch-agent-restart | 20 + sw-patch/bin/sw-patch-agent.service | 16 + sw-patch/bin/sw-patch-controller-daemon | 16 + .../bin/sw-patch-controller-daemon-init.sh | 78 + .../bin/sw-patch-controller-daemon-restart | 20 + .../bin/sw-patch-controller-daemon.service | 16 + sw-patch/bin/sw-patch-controller-init.sh | 106 + sw-patch/bin/sw-patch-controller.service | 14 + sw-patch/bin/sw-patch-init.sh | 178 + sw-patch/bin/sw-patch.completion | 148 + sw-patch/bin/sw-patch.service | 16 + sw-patch/bin/upgrade-start-pkg-extract | 137 + sw-patch/cgcs-patch/.coveragerc | 7 + sw-patch/cgcs-patch/.stestr.conf | 2 + sw-patch/cgcs-patch/LICENSE | 202 + sw-patch/cgcs-patch/cgcs_patch/__init__.py | 6 + .../cgcs-patch/cgcs_patch/api/__init__.py | 30 + sw-patch/cgcs-patch/cgcs_patch/api/app.py | 43 + sw-patch/cgcs-patch/cgcs_patch/api/config.py | 23 + .../cgcs_patch/api/controllers/__init__.py | 6 + .../cgcs_patch/api/controllers/root.py | 293 ++ sw-patch/cgcs-patch/cgcs_patch/app.py | 24 + .../cgcs-patch/cgcs_patch/authapi/__init__.py | 25 + sw-patch/cgcs-patch/cgcs_patch/authapi/acl.py | 30 + sw-patch/cgcs-patch/cgcs_patch/authapi/app.py | 77 + .../cgcs_patch/authapi/auth_token.py | 40 + .../cgcs-patch/cgcs_patch/authapi/config.py | 23 + .../cgcs-patch/cgcs_patch/authapi/hooks.py | 100 + .../cgcs-patch/cgcs_patch/authapi/policy.py | 117 + sw-patch/cgcs-patch/cgcs_patch/base.py | 170 + .../cgcs-patch/cgcs_patch/certificates.py | 51 + sw-patch/cgcs-patch/cgcs_patch/config.py | 138 + sw-patch/cgcs-patch/cgcs_patch/constants.py | 51 + sw-patch/cgcs-patch/cgcs_patch/exceptions.py | 57 + sw-patch/cgcs-patch/cgcs_patch/messages.py | 64 + sw-patch/cgcs-patch/cgcs_patch/patch_agent.py | 941 +++++ .../cgcs-patch/cgcs_patch/patch_client.py | 1513 ++++++++ .../cgcs-patch/cgcs_patch/patch_controller.py | 2713 +++++++++++++ .../cgcs-patch/cgcs_patch/patch_functions.py | 1440 +++++++ .../cgcs-patch/cgcs_patch/patch_signing.py | 90 + .../cgcs-patch/cgcs_patch/patch_verify.py | 191 + .../cgcs_patch/templates/query.html | 92 + .../cgcs-patch/cgcs_patch/templates/query.xml | 95 + .../cgcs_patch/templates/query_agents.html | 32 + .../cgcs_patch/templates/query_hosts.xml | 75 + .../cgcs-patch/cgcs_patch/templates/show.html | 83 + .../cgcs-patch/cgcs_patch/templates/show.xml | 92 + .../cgcs-patch/cgcs_patch/tests/__init__.py | 0 .../cgcs-patch/cgcs_patch/tests/md5test.txt | 3422 +++++++++++++++++ .../cgcs_patch/tests/test_basics.py | 25 + .../cgcs_patch/tests/test_patch_agent.py | 30 + .../cgcs_patch/tests/test_patch_controller.py | 22 + .../cgcs_patch/tests/test_patch_utils.py | 146 + sw-patch/cgcs-patch/cgcs_patch/utils.py | 83 + sw-patch/cgcs-patch/cgcs_patch_id/README.txt | 34 + .../cgcs_patch_id/patch_id_allocator.py | 50 + .../patch_id_allocator_client.py | 66 + .../patch_id_allocator_server.conf | 16 + .../patch_id_allocator_server.py | 45 + sw-patch/cgcs-patch/pylint.rc | 430 +++ sw-patch/cgcs-patch/requirements.txt | 10 + sw-patch/cgcs-patch/setup.cfg | 23 + sw-patch/cgcs-patch/setup.py | 23 + sw-patch/cgcs-patch/test-requirements.txt | 13 + sw-patch/cgcs-patch/tox.ini | 146 + .../debian/deb_folder/cgcs-patch-agent.dirs | 0 .../deb_folder/cgcs-patch-agent.install | 0 .../cgcs-patch-agent.lintian-overrides | 0 .../deb_folder/cgcs-patch-controller.dirs | 0 .../deb_folder/cgcs-patch-controller.install | 0 .../cgcs-patch-controller.lintian-overrides | 0 .../debian/deb_folder/cgcs-patch.dirs | 0 .../debian/deb_folder/cgcs-patch.install | 0 .../deb_folder/cgcs-patch.lintian-overrides | 0 .../debian/deb_folder/changelog | 0 .../debian/deb_folder/control | 0 .../debian/deb_folder/copyright | 0 .../deb_folder/python3-cgcs-patch.install | 0 .../debian/deb_folder/rules | 0 .../debian/deb_folder/source/format | 0 .../debian/deb_folder/source/options | 0 .../deb_folder/systemd/00-cgcs-patch.preset | 0 .../debian/meta_data.yaml | 0 tox.ini | 14 +- 107 files changed, 15094 insertions(+), 15 deletions(-) create mode 100755 sw-patch/bin/make_patch create mode 100755 sw-patch/bin/modify_patch create mode 100644 sw-patch/bin/patch-functions create mode 100644 sw-patch/bin/patch-tmpdirs.conf create mode 100755 sw-patch/bin/patch_build create mode 100644 sw-patch/bin/patch_check_goenabled.sh create mode 100644 sw-patch/bin/patching.conf create mode 100644 sw-patch/bin/patching.logrotate create mode 100644 sw-patch/bin/pmon-sw-patch-agent.conf create mode 100644 sw-patch/bin/pmon-sw-patch-controller-daemon.conf create mode 100644 sw-patch/bin/policy.json create mode 100755 sw-patch/bin/query_patch create mode 100755 sw-patch/bin/rpm-audit create mode 100644 sw-patch/bin/run-patch-scripts create mode 100755 sw-patch/bin/setup_patch_repo create mode 100755 sw-patch/bin/sw-patch create mode 100755 sw-patch/bin/sw-patch-agent create mode 100755 sw-patch/bin/sw-patch-agent-init.sh create mode 100644 sw-patch/bin/sw-patch-agent-restart create mode 100644 sw-patch/bin/sw-patch-agent.service create mode 100755 sw-patch/bin/sw-patch-controller-daemon create mode 100755 sw-patch/bin/sw-patch-controller-daemon-init.sh create mode 100644 sw-patch/bin/sw-patch-controller-daemon-restart create mode 100644 sw-patch/bin/sw-patch-controller-daemon.service create mode 100644 sw-patch/bin/sw-patch-controller-init.sh create mode 100644 sw-patch/bin/sw-patch-controller.service create mode 100644 sw-patch/bin/sw-patch-init.sh create mode 100644 sw-patch/bin/sw-patch.completion create mode 100644 sw-patch/bin/sw-patch.service create mode 100644 sw-patch/bin/upgrade-start-pkg-extract create mode 100644 sw-patch/cgcs-patch/.coveragerc create mode 100644 sw-patch/cgcs-patch/.stestr.conf create mode 100644 sw-patch/cgcs-patch/LICENSE create mode 100644 sw-patch/cgcs-patch/cgcs_patch/__init__.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/api/__init__.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/api/app.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/api/config.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/api/controllers/__init__.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/api/controllers/root.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/app.py create mode 100755 sw-patch/cgcs-patch/cgcs_patch/authapi/__init__.py create mode 100755 sw-patch/cgcs-patch/cgcs_patch/authapi/acl.py create mode 100755 sw-patch/cgcs-patch/cgcs_patch/authapi/app.py create mode 100755 sw-patch/cgcs-patch/cgcs_patch/authapi/auth_token.py create mode 100755 sw-patch/cgcs-patch/cgcs_patch/authapi/config.py create mode 100755 sw-patch/cgcs-patch/cgcs_patch/authapi/hooks.py create mode 100755 sw-patch/cgcs-patch/cgcs_patch/authapi/policy.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/base.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/certificates.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/config.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/constants.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/exceptions.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/messages.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/patch_agent.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/patch_client.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/patch_controller.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/patch_functions.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/patch_signing.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/patch_verify.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/templates/query.html create mode 100644 sw-patch/cgcs-patch/cgcs_patch/templates/query.xml create mode 100644 sw-patch/cgcs-patch/cgcs_patch/templates/query_agents.html create mode 100644 sw-patch/cgcs-patch/cgcs_patch/templates/query_hosts.xml create mode 100644 sw-patch/cgcs-patch/cgcs_patch/templates/show.html create mode 100644 sw-patch/cgcs-patch/cgcs_patch/templates/show.xml create mode 100644 sw-patch/cgcs-patch/cgcs_patch/tests/__init__.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/tests/md5test.txt create mode 100644 sw-patch/cgcs-patch/cgcs_patch/tests/test_basics.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/tests/test_patch_agent.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/tests/test_patch_controller.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/tests/test_patch_utils.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch/utils.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch_id/README.txt create mode 100755 sw-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator.py create mode 100755 sw-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_client.py create mode 100644 sw-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_server.conf create mode 100755 sw-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_server.py create mode 100644 sw-patch/cgcs-patch/pylint.rc create mode 100644 sw-patch/cgcs-patch/requirements.txt create mode 100644 sw-patch/cgcs-patch/setup.cfg create mode 100644 sw-patch/cgcs-patch/setup.py create mode 100644 sw-patch/cgcs-patch/test-requirements.txt create mode 100644 sw-patch/cgcs-patch/tox.ini rename {cgcs-patch => sw-patch}/debian/deb_folder/cgcs-patch-agent.dirs (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/cgcs-patch-agent.install (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/cgcs-patch-agent.lintian-overrides (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/cgcs-patch-controller.dirs (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/cgcs-patch-controller.install (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/cgcs-patch-controller.lintian-overrides (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/cgcs-patch.dirs (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/cgcs-patch.install (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/cgcs-patch.lintian-overrides (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/changelog (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/control (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/copyright (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/python3-cgcs-patch.install (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/rules (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/source/format (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/source/options (100%) rename {cgcs-patch => sw-patch}/debian/deb_folder/systemd/00-cgcs-patch.preset (100%) rename {cgcs-patch => sw-patch}/debian/meta_data.yaml (100%) diff --git a/.zuul.yaml b/.zuul.yaml index c44460df..bcd4149a 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -15,6 +15,8 @@ - patch-alarm-tox-pylint - patch-alarm-tox-py27 - patch-alarm-tox-py39 + - sw-patch-tox-pylint + - sw-patch-tox-py39 gate: jobs: - openstack-tox-linters @@ -26,6 +28,8 @@ - patch-alarm-tox-pylint - patch-alarm-tox-py27 - patch-alarm-tox-py39 + - sw-patch-tox-pylint + - sw-patch-tox-py39 post: jobs: - stx-update-upload-git-mirror @@ -79,7 +83,6 @@ required-projects: - starlingx/config - starlingx/fault - - starlingx/root files: - cgcs-patch/cgcs-patch/* vars: @@ -87,6 +90,34 @@ python_version: 3.9 tox_extra_args: -c cgcs-patch/cgcs-patch/tox.ini +- job: + name: sw-patch-tox-py39 + parent: tox-py39 + nodeset: debian-bullseye + required-projects: + - starlingx/config + - starlingx/fault + files: + - sw-patch/cgcs-patch/* + vars: + tox_envlist: py39 + python_version: 3.9 + tox_extra_args: -c sw-patch/cgcs-patch/tox.ini + +- job: + name: sw-patch-tox-pylint + parent: tox + nodeset: debian-bullseye + required-projects: + - starlingx/config + - starlingx/fault + files: + - sw-patch/cgcs-patch/* + vars: + tox_envlist: pylint + python_version: 3.9 + tox_extra_args: -c sw-patch/cgcs-patch/tox.ini + - job: name: patch-alarm-tox-pylint @@ -121,7 +152,6 @@ required-projects: - starlingx/config - starlingx/fault - - starlingx/root files: - patch-alarm/patch-alarm/* vars: diff --git a/bindep.txt b/bindep.txt index 6a94fba9..41ed8109 100644 --- a/bindep.txt +++ b/bindep.txt @@ -1,5 +1,7 @@ # This is a cross-platform list tracking distribution packages needed for install and tests; # see https://docs.openstack.org/infra/bindep/ for additional information. +# Do not install python2 rpms in a python3 only environment such as debian-bullseye +python-rpm [platform:dpkg !platform:debian-bullseye] python3-rpm [platform:dpkg] rpm-python [platform:rpm] diff --git a/debian_pkg_dirs b/debian_pkg_dirs index 3ec2cc02..dfe1b139 100644 --- a/debian_pkg_dirs +++ b/debian_pkg_dirs @@ -1,3 +1,3 @@ -cgcs-patch enable-dev-patch patch-alarm +sw-patch diff --git a/patch-alarm/patch-alarm/test-requirements.txt b/patch-alarm/patch-alarm/test-requirements.txt index 2d713866..061b905a 100644 --- a/patch-alarm/patch-alarm/test-requirements.txt +++ b/patch-alarm/patch-alarm/test-requirements.txt @@ -3,9 +3,12 @@ # process, which may cause wedges in the gate later. hacking>=1.1.0,<=2.0.0 # Apache-2.0 - +astroid <= 2.2.5 coverage!=4.4,>=4.0 # Apache-2.0 mock>=2.0.0 # BSD stestr>=1.0.0 # Apache-2.0 testtools>=2.2.0 # MIT pycryptodomex +isort<5;python_version>="3.0" +pylint<2.1.0;python_version<"3.0" # GPLv2 +pylint<2.4.0;python_version>="3.0" # GPLv2 diff --git a/patch-alarm/patch-alarm/tox.ini b/patch-alarm/patch-alarm/tox.ini index 481a7327..8b12a3a7 100644 --- a/patch-alarm/patch-alarm/tox.ini +++ b/patch-alarm/patch-alarm/tox.ini @@ -28,7 +28,7 @@ setenv = VIRTUAL_ENV={envdir} passenv = XDG_CACHE_HOME -sitepackages = False +sitepackages = True install_command = pip install \ -v -v -v \ -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/stable/stein/upper-constraints.txt} \ @@ -64,7 +64,6 @@ install_command = pip install \ -v -v -v \ -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/starlingx/root/raw/branch/master/build-tools/requirements/debian/upper-constraints.txt} \ {opts} {packages} - {opts} {packages} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt @@ -110,17 +109,13 @@ basepython = python3 deps = {[testenv]deps} flake8-bugbear usedevelop = False -#skip_install = True commands = flake8 {posargs} . [testenv:pylint] +basepython = python3 deps = {[testenv]deps} pylint - -basepython = python2.7 -sitepackages = False - commands = pylint patch_alarm --rcfile=./pylint.rc [testenv:cover] diff --git a/sw-patch/bin/make_patch b/sw-patch/bin/make_patch new file mode 100755 index 00000000..95cf05b3 --- /dev/null +++ b/sw-patch/bin/make_patch @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + + +import sys + +from cgcs_make_patch.make_patch_functions import make_patch + +if __name__ == "__main__": + sys.exit(make_patch()) diff --git a/sw-patch/bin/modify_patch b/sw-patch/bin/modify_patch new file mode 100755 index 00000000..470f0fc1 --- /dev/null +++ b/sw-patch/bin/modify_patch @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + + +import sys + +from cgcs_make_patch.make_patch_functions import modify_patch + +if __name__ == "__main__": + sys.exit(modify_patch()) diff --git a/sw-patch/bin/patch-functions b/sw-patch/bin/patch-functions new file mode 100644 index 00000000..2da087c7 --- /dev/null +++ b/sw-patch/bin/patch-functions @@ -0,0 +1,52 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This bash source file provides variables and functions that +# may be used by in-service patching scripts. +# + +# Source platform.conf, for nodetype and subfunctions +. /etc/platform/platform.conf + +declare PATCH_SCRIPTDIR=/run/patching/patch-scripts +declare PATCH_FLAGDIR=/run/patching/patch-flags +declare -i PATCH_STATUS_OK=0 +declare -i PATCH_STATUS_FAILED=1 + +declare logfile=/var/log/patching.log +declare NAME=$(basename $0) + +function loginfo() +{ + echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile +} + +function is_controller() +{ + [[ $nodetype == "controller" ]] +} + +function is_worker() +{ + [[ $nodetype == "worker" ]] +} + +function is_storage() +{ + [[ $nodetype == "storage" ]] +} + +function is_cpe() +{ + [[ $nodetype == "controller" && $subfunction =~ worker ]] +} + +function is_locked() +{ + test -f /var/run/.node_locked +} + diff --git a/sw-patch/bin/patch-tmpdirs.conf b/sw-patch/bin/patch-tmpdirs.conf new file mode 100644 index 00000000..b30284f2 --- /dev/null +++ b/sw-patch/bin/patch-tmpdirs.conf @@ -0,0 +1,2 @@ +d /run/patching 0700 root root - + diff --git a/sw-patch/bin/patch_build b/sw-patch/bin/patch_build new file mode 100755 index 00000000..580890d2 --- /dev/null +++ b/sw-patch/bin/patch_build @@ -0,0 +1,16 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import sys + +from cgcs_patch.patch_functions import patch_build + +if __name__ == "__main__": + sys.exit(patch_build()) + diff --git a/sw-patch/bin/patch_check_goenabled.sh b/sw-patch/bin/patch_check_goenabled.sh new file mode 100644 index 00000000..022367cf --- /dev/null +++ b/sw-patch/bin/patch_check_goenabled.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# Copyright (c) 2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# Patching "goenabled" check. +# If a patch has been applied on this node, it is now out-of-date and should be rebooted. + +NAME=$(basename $0) +SYSTEM_CHANGED_FLAG=/var/run/node_is_patched + +logfile=/var/log/patching.log + +function LOG { + logger "$NAME: $*" + echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile +} + +if [ -f $SYSTEM_CHANGED_FLAG ]; then + LOG "Node has been patched. Failing goenabled check." + exit 1 +fi + +exit 0 + diff --git a/sw-patch/bin/patching.conf b/sw-patch/bin/patching.conf new file mode 100644 index 00000000..b3c3adac --- /dev/null +++ b/sw-patch/bin/patching.conf @@ -0,0 +1,7 @@ +[runtime] +controller_multicast = 239.1.1.3 +agent_multicast = 239.1.1.4 +api_port = 5487 +controller_port = 5488 +agent_port = 5489 + diff --git a/sw-patch/bin/patching.logrotate b/sw-patch/bin/patching.logrotate new file mode 100644 index 00000000..2dbdeffa --- /dev/null +++ b/sw-patch/bin/patching.logrotate @@ -0,0 +1,15 @@ +/var/log/patching.log +/var/log/patching-api.log +/var/log/patching-insvc.log +{ + nodateext + size 10M + start 1 + rotate 10 + missingok + notifempty + compress + delaycompress + copytruncate +} + diff --git a/sw-patch/bin/pmon-sw-patch-agent.conf b/sw-patch/bin/pmon-sw-patch-agent.conf new file mode 100644 index 00000000..09872dab --- /dev/null +++ b/sw-patch/bin/pmon-sw-patch-agent.conf @@ -0,0 +1,19 @@ +[process] +process = sw-patch-agent +pidfile = /var/run/sw-patch-agent.pid +script = /etc/init.d/sw-patch-agent +style = lsb ; ocf or lsb +severity = major ; Process failure severity + ; critical : host is failed + ; major : host is degraded + ; minor : log is generated +restarts = 3 ; Number of back to back unsuccessful restarts before severity assertion +interval = 5 ; Number of seconds to wait between back-to-back unsuccessful restarts +debounce = 20 ; Number of seconds the process needs to run before declaring + ; it as running O.K. after a restart. + ; Time after which back-to-back restart count is cleared. +startuptime = 10 ; Seconds to wait after process start before starting the debounce monitor +mode = passive ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active: heartbeat monitoring, i.e. request / response messaging + diff --git a/sw-patch/bin/pmon-sw-patch-controller-daemon.conf b/sw-patch/bin/pmon-sw-patch-controller-daemon.conf new file mode 100644 index 00000000..834cd201 --- /dev/null +++ b/sw-patch/bin/pmon-sw-patch-controller-daemon.conf @@ -0,0 +1,19 @@ +[process] +process = sw-patch-controller-daemon +pidfile = /var/run/sw-patch-controller-daemon.pid +script = /etc/init.d/sw-patch-controller-daemon +style = lsb ; ocf or lsb +severity = major ; Process failure severity + ; critical : host is failed + ; major : host is degraded + ; minor : log is generated +restarts = 3 ; Number of back to back unsuccessful restarts before severity assertion +interval = 5 ; Number of seconds to wait between back-to-back unsuccessful restarts +debounce = 20 ; Number of seconds the process needs to run before declaring + ; it as running O.K. after a restart. + ; Time after which back-to-back restart count is cleared. +startuptime = 10 ; Seconds to wait after process start before starting the debounce monitor +mode = passive ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active: heartbeat monitoring, i.e. request / response messaging + diff --git a/sw-patch/bin/policy.json b/sw-patch/bin/policy.json new file mode 100644 index 00000000..94ac3a5b --- /dev/null +++ b/sw-patch/bin/policy.json @@ -0,0 +1,5 @@ +{ + "admin": "role:admin or role:administrator", + "admin_api": "is_admin:True", + "default": "rule:admin_api" +} diff --git a/sw-patch/bin/query_patch b/sw-patch/bin/query_patch new file mode 100755 index 00000000..f6637d90 --- /dev/null +++ b/sw-patch/bin/query_patch @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + + +import sys + +from cgcs_make_patch.make_patch_functions import query_patch + +if __name__ == "__main__": + sys.exit(query_patch()) diff --git a/sw-patch/bin/rpm-audit b/sw-patch/bin/rpm-audit new file mode 100755 index 00000000..3ae2755c --- /dev/null +++ b/sw-patch/bin/rpm-audit @@ -0,0 +1,183 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +if [[ $EUID -ne 0 ]]; then + echo "This utility must be run as root." >&2 + exit 1 +fi + +function show_usage() +{ + cat <>$SCRIPTLOG +############################################################ +`date "+%FT%T.%3N"`: Running $NUM_SCRIPTS in-service patch scripts: + +$SCRIPTS + +############################################################ +EOF + +declare -i FAILURES=0 +for cmd in $SCRIPTS +do + cat <>$SCRIPTLOG +############################################################ +`date "+%FT%T.%3N"`: Running $cmd + +EOF + + bash -x $cmd >>$SCRIPTLOG 2>&1 + rc=$? + if [ $rc -ne $PATCH_STATUS_OK ] + then + let -i FAILURES++ + fi + cat <>$SCRIPTLOG +`date "+%FT%T.%3N"`: Completed running $cmd (rc=$rc) +############################################################ + +EOF +done + +cat <>$SCRIPTLOG + +`date "+%FT%T.%3N"`: Completed running scripts with $FAILURES failures +############################################################ +EOF + +exit $FAILURES + diff --git a/sw-patch/bin/setup_patch_repo b/sw-patch/bin/setup_patch_repo new file mode 100755 index 00000000..4330354d --- /dev/null +++ b/sw-patch/bin/setup_patch_repo @@ -0,0 +1,182 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2018-2020 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import getopt +import os +import platform +import rpm +import shutil +import subprocess +import sys +import tempfile + +import cgcs_patch.patch_functions as pf +import cgcs_patch.patch_verify as pv +import cgcs_patch.constants as constants + +import logging +logging.getLogger('main_logger') +logging.basicConfig(level=logging.INFO) + +# Override the pv.dev_certificate_marker so we can verify signatures off-box +cgcs_patch_bindir = os.path.dirname(os.path.abspath(sys.argv[0])) +dev_cert_path = os.path.abspath(os.path.join(cgcs_patch_bindir, '../../enable-dev-patch/enable-dev-patch/dev_certificate_enable.bin')) + +pv.dev_certificate_marker = dev_cert_path + +def usage(): + print "Usage: %s -o ..." \ + % os.path.basename(sys.argv[0]) + exit(1) + + +def main(): + try: + opts, remainder = getopt.getopt(sys.argv[1:], + 'o:', + ['output=']) + except getopt.GetoptError: + usage() + + output = None + + for opt, arg in opts: + if opt == "--output" or opt == '-o': + output = arg + + if output is None: + usage() + + sw_version = os.environ['PLATFORM_RELEASE'] + + allpatches = pf.PatchData() + + output = os.path.abspath(output) + + pkgdir = os.path.join(output, 'Packages') + datadir = os.path.join(output, 'metadata') + committed_dir = os.path.join(datadir, 'committed') + + if os.path.exists(output): + # Check to see if the expected structure already exists, + # maybe we're appending a patch. + if not os.path.exists(committed_dir) or not os.path.exists(pkgdir): + print "Packages or metadata dir missing from existing %s. Aborting..." % output + exit(1) + + # Load the existing metadata + allpatches.load_all_metadata(committed_dir, constants.COMMITTED) + else: + os.mkdir(output, 0o755) + os.mkdir(datadir, 0o755) + os.mkdir(committed_dir, 0o755) + os.mkdir(pkgdir, 0o755) + + # Save the current directory, so we can chdir back after + orig_wd = os.getcwd() + + tmpdir = None + try: + for p in remainder: + fpath = os.path.abspath(p) + + # Create a temporary working directory + tmpdir = tempfile.mkdtemp(prefix="patchrepo_") + + # Change to the tmpdir + os.chdir(tmpdir) + + print "Parsing %s" % fpath + pf.PatchFile.read_patch(fpath) + + thispatch = pf.PatchData() + patch_id = thispatch.parse_metadata("metadata.xml", constants.COMMITTED) + + if patch_id in allpatches.metadata: + print "Skipping %s as it's already in the repo" % patch_id + # Change back to original working dir + os.chdir(orig_wd) + + shutil.rmtree(tmpdir) + tmpdir = None + + continue + + patch_sw_version = thispatch.query_line(patch_id, 'sw_version') + if patch_sw_version != sw_version: + raise Exception("%s is for release %s, not %s" % (patch_id, patch_sw_version, sw_version)) + + # Move the metadata to the "committed" dir, and the rpms to the Packages dir + shutil.move('metadata.xml', os.path.join(committed_dir, "%s-metadata.xml" % patch_id)) + for f in thispatch.query_line(patch_id, 'contents'): + shutil.move(f, pkgdir) + + allpatches.add_patch(patch_id, thispatch) + + # Change back to original working dir + os.chdir(orig_wd) + + shutil.rmtree(tmpdir) + tmpdir = None + except: + if tmpdir is not None: + # Change back to original working dir + os.chdir(orig_wd) + + shutil.rmtree(tmpdir) + tmpdir = None + raise + + allpatches.gen_release_groups_xml(sw_version, output) + + # Purge unneeded RPMs + keep = {} + for patch_id in allpatches.metadata.keys(): + for rpmname in allpatches.contents[patch_id]: + try: + pkgname, arch, pkgver = pf.parse_rpm_filename(rpmname) + except ValueError as e: + raise e + + if pkgname not in keep: + keep[pkgname] = { arch: pkgver } + continue + elif arch not in keep[pkgname]: + keep[pkgname][arch] = pkgver + continue + + # Compare versions + keep_pkgver = keep[pkgname][arch] + if pkgver > keep_pkgver: + # Find the rpmname + keep_rpmname = keep_pkgver.generate_rpm_filename(pkgname, arch) + + filename = os.path.join(pkgdir, keep_rpmname) + if os.path.exists(filename): + os.remove(filename) + + # Keep the new pkgver + keep[pkgname][arch] = pkgver + else: + filename = os.path.join(pkgdir, rpmname) + if os.path.exists(filename): + os.remove(filename) + + # Create the repo metadata + if os.path.exists('/usr/bin/createrepo_c'): + createrepo = '/usr/bin/createrepo_c' + else: + createrepo = 'createrepo' + + os.chdir(output) + subprocess.check_call([createrepo, '-g', 'comps.xml', '.']) + +if __name__ == "__main__": + sys.exit(main()) + diff --git a/sw-patch/bin/sw-patch b/sw-patch/bin/sw-patch new file mode 100755 index 00000000..de5839cc --- /dev/null +++ b/sw-patch/bin/sw-patch @@ -0,0 +1,16 @@ +#!/usr/bin/python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import sys + +from cgcs_patch.patch_client import main + +if __name__ == "__main__": + main() + diff --git a/sw-patch/bin/sw-patch-agent b/sw-patch/bin/sw-patch-agent new file mode 100755 index 00000000..427ed176 --- /dev/null +++ b/sw-patch/bin/sw-patch-agent @@ -0,0 +1,16 @@ +#!/usr/bin/python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import sys + +from cgcs_patch.patch_agent import main + +if __name__ == "__main__": + main() + diff --git a/sw-patch/bin/sw-patch-agent-init.sh b/sw-patch/bin/sw-patch-agent-init.sh new file mode 100755 index 00000000..d17ae901 --- /dev/null +++ b/sw-patch/bin/sw-patch-agent-init.sh @@ -0,0 +1,94 @@ +#!/bin/sh +# +# Copyright (c) 2014-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# chkconfig: 345 26 30 + +### BEGIN INIT INFO +# Provides: sw-patch-agent +# Required-Start: $syslog +# Required-Stop: $syslog +# Default-Start: 2 3 5 +# Default-Stop: 0 1 6 +# Short-Description: sw-patch-agent +# Description: Provides the CGCS Patch Agent Daemon +### END INIT INFO + +DESC="sw-patch-agent" +DAEMON="/usr/sbin/sw-patch-agent" +PIDFILE="/var/run/sw-patch-agent.pid" +PATCH_INSTALLING_FILE="/var/run/patch_installing" + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop() +{ + if [ -f $PATCH_INSTALLING_FILE ]; then + echo "Patches are installing. Waiting for install to complete." + while [ -f $PATCH_INSTALLING_FILE ]; do + # Verify the agent is still running + pid=$(cat $PATCH_INSTALLING_FILE) + cat /proc/$pid/cmdline 2>/dev/null | grep -q $DAEMON + if [ $? -ne 0 ]; then + echo "Patch agent not running." + break + fi + sleep 1 + done + echo "Continuing with shutdown." + fi + + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload) + stop + start + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart}" + exit 1 + ;; +esac + +exit 0 diff --git a/sw-patch/bin/sw-patch-agent-restart b/sw-patch/bin/sw-patch-agent-restart new file mode 100644 index 00000000..45e86798 --- /dev/null +++ b/sw-patch/bin/sw-patch-agent-restart @@ -0,0 +1,20 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +. /etc/patching/patch-functions + +# +# Triggering a restart of the patching daemons is done by +# creating a flag file and letting the daemon handle the restart. +# +loginfo "Requesting restart of patch-agent" + +restart_patch_agent_flag="/run/patching/.restart.patch-agent" +touch $restart_patch_agent_flag + +exit 0 + diff --git a/sw-patch/bin/sw-patch-agent.service b/sw-patch/bin/sw-patch-agent.service new file mode 100644 index 00000000..9ca3a253 --- /dev/null +++ b/sw-patch/bin/sw-patch-agent.service @@ -0,0 +1,16 @@ +[Unit] +Description=StarlingX Patching Agent +After=syslog.target network-online.target sw-patch.service +Before=pmon.service + +[Service] +Type=forking +User=root +ExecStart=/etc/init.d/sw-patch-agent start +ExecStop=/etc/init.d/sw-patch-agent stop +ExecReload=/etc/init.d/sw-patch-agent restart +PIDFile=/var/run/sw-patch-agent.pid + +[Install] +WantedBy=multi-user.target + diff --git a/sw-patch/bin/sw-patch-controller-daemon b/sw-patch/bin/sw-patch-controller-daemon new file mode 100755 index 00000000..5c0f0a8c --- /dev/null +++ b/sw-patch/bin/sw-patch-controller-daemon @@ -0,0 +1,16 @@ +#!/usr/bin/python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import sys + +from cgcs_patch.patch_controller import main + +if __name__ == "__main__": + main() + diff --git a/sw-patch/bin/sw-patch-controller-daemon-init.sh b/sw-patch/bin/sw-patch-controller-daemon-init.sh new file mode 100755 index 00000000..e85d492d --- /dev/null +++ b/sw-patch/bin/sw-patch-controller-daemon-init.sh @@ -0,0 +1,78 @@ +#!/bin/sh +# +# Copyright (c) 2014-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# chkconfig: 345 25 30 + +### BEGIN INIT INFO +# Provides: sw-patch-controller-daemon +# Required-Start: $syslog +# Required-Stop: $syslog +# Default-Start: 2 3 5 +# Default-Stop: 0 1 6 +# Short-Description: sw-patch-controller-daemon +# Description: Provides the CGCS Patch Controller Daemon +### END INIT INFO + +DESC="sw-patch-controller-daemon" +DAEMON="/usr/sbin/sw-patch-controller-daemon" +PIDFILE="/var/run/sw-patch-controller-daemon.pid" + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload) + stop + start + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart}" + exit 1 + ;; +esac + +exit 0 diff --git a/sw-patch/bin/sw-patch-controller-daemon-restart b/sw-patch/bin/sw-patch-controller-daemon-restart new file mode 100644 index 00000000..129348f0 --- /dev/null +++ b/sw-patch/bin/sw-patch-controller-daemon-restart @@ -0,0 +1,20 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +. /etc/patching/patch-functions + +# +# Triggering a restart of the patching daemons is done by +# creating a flag file and letting the daemon handle the restart. +# +loginfo "Requesting restart of patch-controller" + +restart_patch_controller_flag="/run/patching/.restart.patch-controller" +touch $restart_patch_controller_flag + +exit 0 + diff --git a/sw-patch/bin/sw-patch-controller-daemon.service b/sw-patch/bin/sw-patch-controller-daemon.service new file mode 100644 index 00000000..7b11291a --- /dev/null +++ b/sw-patch/bin/sw-patch-controller-daemon.service @@ -0,0 +1,16 @@ +[Unit] +Description=StarlingX Patching Controller Daemon +After=syslog.target network-online.target sw-patch.service sw-patch-controller.service +Before=pmon.service + +[Service] +Type=forking +User=root +ExecStart=/etc/init.d/sw-patch-controller-daemon start +ExecStop=/etc/init.d/sw-patch-controller-daemon stop +ExecReload=/etc/init.d/sw-patch-controller-daemon restart +PIDFile=/var/run/sw-patch-controller-daemon.pid + +[Install] +WantedBy=multi-user.target + diff --git a/sw-patch/bin/sw-patch-controller-init.sh b/sw-patch/bin/sw-patch-controller-init.sh new file mode 100644 index 00000000..c9190c9c --- /dev/null +++ b/sw-patch/bin/sw-patch-controller-init.sh @@ -0,0 +1,106 @@ +#!/bin/bash +# +# Copyright (c) 2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# StarlingX Patching Controller setup +# chkconfig: 345 20 24 +# description: CGCS Patching Controller init script + +### BEGIN INIT INFO +# Provides: sw-patch-controller +# Required-Start: $syslog +# Required-Stop: $syslog +# Default-Start: 2 3 5 +# Default-Stop: 0 1 6 +# Short-Description: sw-patch-controller +# Description: Provides the StarlingX Patch Controller Daemon +### END INIT INFO + +. /usr/bin/tsconfig + +NAME=$(basename $0) + +REPO_ID=updates +REPO_ROOT=/var/www/pages/${REPO_ID} +REPO_DIR=${REPO_ROOT}/rel-${SW_VERSION} +GROUPS_FILE=$REPO_DIR/comps.xml +PATCHING_DIR=/opt/patching + +logfile=/var/log/patching.log + +function LOG { + logger "$NAME: $*" + echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile +} + +function LOG_TO_FILE { + echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile +} + +function create_groups { + if [ -f $GROUPS_FILE ]; then + return 0 + fi + + cat >$GROUPS_FILE < + + +EOF +} + +function do_setup { + # Does the repo exist? + if [ ! -d $REPO_DIR ]; then + LOG "Creating repo" + mkdir -p $REPO_DIR + + # Setup the groups file + create_groups + + createrepo -g $GROUPS_FILE $REPO_DIR >> $logfile 2>&1 + fi + + if [ ! -d $PATCHING_DIR ]; then + LOG "Creating $PATCHING_DIR" + mkdir -p $PATCHING_DIR + fi + + # If we can ping the active controller, sync the repos + LOG_TO_FILE "ping -c 1 -w 1 controller" + ping -c 1 -w 1 controller >> $logfile 2>&1 || ping6 -c 1 -w 1 controller >> $logfile 2>&1 + if [ $? -ne 0 ]; then + LOG "Cannot ping controller. Nothing to do" + return 0 + fi + + # Sync the patching dir + LOG_TO_FILE "rsync -acv --delete rsync://controller/patching/ ${PATCHING_DIR}/" + rsync -acv --delete rsync://controller/patching/ ${PATCHING_DIR}/ >> $logfile 2>&1 + + # Sync the patching dir + LOG_TO_FILE "rsync -acv --delete rsync://controller/repo/ ${REPO_ROOT}/" + rsync -acv --delete rsync://controller/repo/ ${REPO_ROOT}/ >> $logfile 2>&1 +} + +case "$1" in + start) + do_setup + ;; + status) + ;; + stop) + # Nothing to do here + ;; + restart) + do_setup + ;; + *) + echo "Usage: $0 {status|start|stop|restart}" + exit 1 +esac + +exit 0 + diff --git a/sw-patch/bin/sw-patch-controller.service b/sw-patch/bin/sw-patch-controller.service new file mode 100644 index 00000000..d40535a7 --- /dev/null +++ b/sw-patch/bin/sw-patch-controller.service @@ -0,0 +1,14 @@ +[Unit] +Description=StarlingX Patching Controller +After=syslog.service network-online.target sw-patch.service +Before=sw-patch-agent.service sw-patch-controller-daemon.service + +[Service] +Type=oneshot +User=root +ExecStart=/etc/init.d/sw-patch-controller start +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target + diff --git a/sw-patch/bin/sw-patch-init.sh b/sw-patch/bin/sw-patch-init.sh new file mode 100644 index 00000000..0168f74f --- /dev/null +++ b/sw-patch/bin/sw-patch-init.sh @@ -0,0 +1,178 @@ +#!/bin/bash +# +# Copyright (c) 2014-2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# StarlingX Patching +# chkconfig: 345 20 23 +# description: StarlingX Patching init script + +### BEGIN INIT INFO +# Provides: sw-patch +# Required-Start: $syslog +# Required-Stop: $syslog +# Default-Start: 2 3 5 +# Default-Stop: 0 1 6 +# Short-Description: sw-patch +# Description: Provides the StarlingX Patching +### END INIT INFO + +NAME=$(basename $0) + +. /usr/bin/tsconfig +. /etc/platform/platform.conf + +logfile=/var/log/patching.log +patch_failed_file=/var/run/patch_install_failed +patched_during_init=/etc/patching/.patched_during_init + +function LOG_TO_FILE { + echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile +} + +function check_for_rr_patch { + if [ -f /var/run/node_is_patched_rr ]; then + if [ ! -f ${patched_during_init} ]; then + echo + echo "Node has been patched and requires an immediate reboot." + echo + LOG_TO_FILE "Node has been patched, with reboot-required flag set. Rebooting" + touch ${patched_during_init} + /sbin/reboot + else + echo + echo "Node has been patched during init a second consecutive time. Skipping reboot due to possible error" + echo + LOG_TO_FILE "Node has been patched during init a second consecutive time. Skipping reboot due to possible error" + touch ${patch_failed_file} + rm -f ${patched_during_init} + exit 1 + fi + else + rm -f ${patched_during_init} + fi +} + +function check_install_uuid { + # Check whether our installed load matches the active controller + CONTROLLER_UUID=`curl -sf http://controller:${http_port}/feed/rel-${SW_VERSION}/install_uuid` + if [ $? -ne 0 ]; then + if [ "$HOSTNAME" = "controller-1" ]; then + # If we're on controller-1, controller-0 may not have the install_uuid + # matching this release, if we're in an upgrade. If the file doesn't exist, + # bypass this check + return 0 + fi + + LOG_TO_FILE "Unable to retrieve installation uuid from active controller" + echo "Unable to retrieve installation uuid from active controller" + return 1 + fi + + if [ "$INSTALL_UUID" != "$CONTROLLER_UUID" ]; then + LOG_TO_FILE "This node is running a different load than the active controller and must be reinstalled" + echo "This node is running a different load than the active controller and must be reinstalled" + return 1 + fi + + return 0 +} + +# Check for installation failure +if [ -f /etc/platform/installation_failed ] ; then + LOG_TO_FILE "/etc/platform/installation_failed flag is set. Aborting." + echo "$(basename $0): Detected installation failure. Aborting." + exit 1 +fi + +# Clean up the RPM DB +if [ ! -f /var/run/.rpmdb_cleaned ]; then + LOG_TO_FILE "Cleaning RPM DB" + rm -f /var/lib/rpm/__db* + touch /var/run/.rpmdb_cleaned +fi + +# For AIO-SX, abort if config is not yet applied and this is running in init +if [ "${system_mode}" = "simplex" -a ! -f ${INITIAL_CONTROLLER_CONFIG_COMPLETE} -a "$1" = "start" ]; then + LOG_TO_FILE "Config is not yet applied. Skipping init patching" + exit 0 +fi + +# If the management interface is bonded, it may take some time +# before communications can be properly setup. +# Allow up to $DELAY_SEC seconds to reach controller. +DELAY_SEC=120 +START=`date +%s` +FOUND=0 +while [ $(date +%s) -lt $(( ${START} + ${DELAY_SEC} )) ]; do + ping -c 1 controller > /dev/null 2>&1 || ping6 -c 1 controller > /dev/null 2>&1 + if [ $? -eq 0 ]; then + FOUND=1 + break + fi + sleep 1 +done + +if [ ${FOUND} -eq 0 ]; then + # 'controller' is not available, just exit + LOG_TO_FILE "Unable to contact active controller (controller). Boot will continue." + exit 1 +fi + +RC=0 +case "$1" in + start) + if [ "${system_mode}" = "simplex" ]; then + # On a simplex CPE, we need to launch the http server first, + # before we can do the patch installation + LOG_TO_FILE "***** Launching lighttpd *****" + /etc/init.d/lighttpd start + + LOG_TO_FILE "***** Starting patch operation *****" + /usr/sbin/sw-patch-agent --install 2>>$logfile + if [ -f ${patch_failed_file} ]; then + RC=1 + LOG_TO_FILE "***** Patch operation failed *****" + fi + LOG_TO_FILE "***** Finished patch operation *****" + + LOG_TO_FILE "***** Shutting down lighttpd *****" + /etc/init.d/lighttpd stop + else + check_install_uuid + if [ $? -ne 0 ]; then + # The INSTALL_UUID doesn't match the active controller, so exit + exit 1 + fi + + LOG_TO_FILE "***** Starting patch operation *****" + /usr/sbin/sw-patch-agent --install 2>>$logfile + if [ -f ${patch_failed_file} ]; then + RC=1 + LOG_TO_FILE "***** Patch operation failed *****" + fi + LOG_TO_FILE "***** Finished patch operation *****" + fi + + check_for_rr_patch + ;; + stop) + # Nothing to do here + ;; + restart) + LOG_TO_FILE "***** Starting patch operation *****" + /usr/sbin/sw-patch-agent --install 2>>$logfile + if [ -f ${patch_failed_file} ]; then + RC=1 + LOG_TO_FILE "***** Patch operation failed *****" + fi + LOG_TO_FILE "***** Finished patch operation *****" + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 +esac + +exit $RC + diff --git a/sw-patch/bin/sw-patch.completion b/sw-patch/bin/sw-patch.completion new file mode 100644 index 00000000..4bca798d --- /dev/null +++ b/sw-patch/bin/sw-patch.completion @@ -0,0 +1,148 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This file provides bash-completion functionality for the sw-patch CLI +# + +function _swpatch() +{ + COMPREPLY=() + local cur="${COMP_WORDS[COMP_CWORD]}" + local prev="${COMP_WORDS[COMP_CWORD-1]}" + local subcommand=${COMP_WORDS[1]} + + # + # The available sw-patch subcommands + # + local subcommands=" + apply + commit + delete + query + query-dependencies + query-hosts + remove + show + upload + upload-dir + what-requires + drop-host + is-applied + is-available + report-app-dependencies + query-app-dependencies + " + if [ -f /etc/platform/.initial_config_complete ]; then + # Post-config, so the host-install commands are accessible + subcommands="${subcommands} host-install host-install-async" + else + # Pre-config, so the install-local command is accessible + subcommands="${subcommands} install-local" + fi + + # Appends the '/' when completing dir names + set mark-directories on + + if [ $COMP_CWORD -gt 1 ]; then + # + # Complete the arguments to the subcommands. + # + case "$subcommand" in + apply|delete|show|what-requires|is-applied|is-available) + # Query the list of known patches + local patches=$(sw-patch completion patches 2>/dev/null) + COMPREPLY=( $(compgen -W "${patches}" -- ${cur}) ) + return 0 + ;; + remove) + # Query the list of known patches + local patches=$(sw-patch completion patches 2>/dev/null) + COMPREPLY=( $(compgen -W "--skipappcheck ${patches}" -- ${cur}) ) + return 0 + ;; + host-install|host-install-async|drop-host) + if [ "${prev}" = "${subcommand}" -o "${prev}" = "--force" ]; then + # Query the list of known hosts + local names=$(sw-patch completion hosts 2>/dev/null) + COMPREPLY=( $(compgen -W "${names}" -- ${cur}) ) + else + # Only one host can be specified, so no more completion + COMPREPLY=( $(compgen -- ${cur}) ) + fi + return 0 + ;; + upload) + # Allow dirs and files with .patch extension for completion + COMPREPLY=( $(compgen -f -o plusdirs -X '!*.patch' -- ${cur}) ) + return 0 + ;; + upload-dir) + # Allow dirs only for completion + COMPREPLY=( $(compgen -d -- ${cur}) ) + return 0 + ;; + query) + if [ "${prev}" = "--release" ]; then + # If --release has been specified, provide installed releases for completion + local releases=$(/bin/ls -d /var/www/pages/feed/rel-* 2>/dev/null | sed 's#/var/www/pages/feed/rel-##') + COMPREPLY=( $(compgen -W "${releases}" -- ${cur}) ) + else + # --release is only completion option for query + COMPREPLY=( $(compgen -W "--release" -- ${cur}) ) + fi + return 0 + ;; + query-hosts|install-local) + # These subcommands have no options/arguments + COMPREPLY=( $(compgen -- ${cur}) ) + return 0 + ;; + query-dependencies) + # Query the list of known patches + local patches=$(sw-patch completion patches 2>/dev/null) + COMPREPLY=( $(compgen -W "--recursive ${patches}" -- ${cur}) ) + return 0 + ;; + commit) + if [ "${prev}" = "--release" ]; then + # If --release has been specified, provide installed releases for completion + local releases=$(/bin/ls -d /var/www/pages/feed/rel-* 2>/dev/null | sed 's#/var/www/pages/feed/rel-##') + COMPREPLY=( $(compgen -W "${releases}" -- ${cur}) ) + else + # Query the list of known patches + local patches=$(sw-patch completion patches 2>/dev/null) + COMPREPLY=( $(compgen -W "--all --dry-run --release ${patches}" -- ${cur}) ) + fi + return 0 + ;; + report-app-dependencies) + if [ "${prev}" = "${subcommand}" ]; then + COMPREPLY=( $(compgen -W "--app" -- ${cur}) ) + elif [ "${prev}" = "--app" ]; then + COMPREPLY= + else + local patches=$(sw-patch completion patches 2>/dev/null) + COMPREPLY=( $(compgen -W "${patches}" -- ${cur}) ) + fi + return 0 + ;; + query-app-dependencies) + return 0 + ;; + *) + ;; + esac + fi + + # Provide subcommands for completion + COMPREPLY=($(compgen -W "${subcommands}" -- ${cur})) + return 0 +} + +# Bind the above function to the sw-patch CLI +complete -F _swpatch -o filenames sw-patch + diff --git a/sw-patch/bin/sw-patch.service b/sw-patch/bin/sw-patch.service new file mode 100644 index 00000000..0468153b --- /dev/null +++ b/sw-patch/bin/sw-patch.service @@ -0,0 +1,16 @@ +[Unit] +Description=StarlingX Patching +After=syslog.target network-online.target +Before=sw-patch-agent.service + +[Service] +Type=oneshot +User=root +ExecStart=/etc/init.d/sw-patch start +RemainAfterExit=yes +StandardOutput=syslog+console +StandardError=syslog+console + +[Install] +WantedBy=multi-user.target + diff --git a/sw-patch/bin/upgrade-start-pkg-extract b/sw-patch/bin/upgrade-start-pkg-extract new file mode 100644 index 00000000..fd94bd59 --- /dev/null +++ b/sw-patch/bin/upgrade-start-pkg-extract @@ -0,0 +1,137 @@ +#!/bin/bash +# +# Copyright (c) 2018-2021 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +function show_usage() +{ + cat >&2 < + +This tool will extract required packages to support upgrade-start + +Options: + -r : Release ID for target release. + +EOF + exit 1 +} + +. /etc/build.info +if [ -z "${SW_VERSION}" ]; then + logger -t $0 "Unable to identify running release ID" + exit 1 +fi + +declare TGT_RELEASE= + +while getopts "r:h" opt; do + case $opt in + r) + TGT_RELEASE=$OPTARG + ;; + h) + show_usage + ;; + *) + logger -t $0 "Unsupported option" + show_usage + ;; + esac +done + +if [ -z "${TGT_RELEASE}" ]; then + logger -t $0 "You must specify the target release." + exit 1 +fi + +if [ "${TGT_RELEASE}" = "${SW_VERSION}" ]; then + logger -t $0 "Target release cannot be running release." + exit 1 +fi + +declare TGT_BASE_REPO=/var/www/pages/feed/rel-${TGT_RELEASE} +declare TGT_PATCHES_REPO=/var/www/pages/updates/rel-${TGT_RELEASE} + +if [ ! -d ${TGT_BASE_REPO} ]; then + logger -t $0 "Target release ${TGT_RELEASE} is not installed" + exit 1 +fi + +declare TGT_PATCHES_REPO_OPT="" +if [ -d ${TGT_PATCHES_REPO} ]; then + TGT_PATCHES_REPO_OPT="--repofrompath updates,${TGT_PATCHES_REPO}" +fi + +declare WORKDIR= + +function cleanup() { + if [ -n "${WORKDIR}" -a -d "${WORKDIR}" ]; then + rm -rf ${WORKDIR} + fi +} + +trap cleanup EXIT + +function extract_pkg() { + local pkgname=$1 + + ORIG_PWD=$PWD + cd $WORKDIR + + # Find the RPM + local pkgfile=$(dnf repoquery --disablerepo=* --repofrompath base,${TGT_BASE_REPO} ${TGT_PATCHES_REPO_OPT} --latest-limit=1 --location -q ${pkgname}) + if [ -z "${pkgfile}" ]; then + logger -t $0 "Could not find ${pkgname}" + exit 1 + fi + + # Chop off the file: from the start of the file location + local rpmfile=${pkgfile/file://} + + rpm2cpio ${rpmfile} | cpio -idm + if [ $? -ne 0 ]; then + logger -t $0 "Failed to extract $pkgname files from ${pkgfile/file://}" + exit 1 + fi + + cd ${ORIG_PWD} +} + +# Extract files from pxe-network-installer +WORKDIR=$(mktemp -d --tmpdir=/scratch pkgextract_XXXX) +if [ -z "${WORKDIR}" -o ! -d "${WORKDIR}" ]; then + logger -t $0 "Failed to create workdir" + exit 1 +fi +# Clean dnf cache in case a previous load had different package versions +dnf clean expire-cache +extract_pkg pxe-network-installer +rsync -ac ${WORKDIR}/usr/ /usr/ && +rsync -ac ${WORKDIR}/var/pxeboot/rel-${TGT_RELEASE}/ /var/pxeboot/rel-${TGT_RELEASE}/ && +rsync -c ${WORKDIR}/var/pxeboot/pxelinux.cfg.files/*-${TGT_RELEASE} /var/pxeboot/pxelinux.cfg.files/ && +rsync -ac ${WORKDIR}/var/www/pages/feed/rel-${TGT_RELEASE}/ /var/www/pages/feed/rel-${TGT_RELEASE}/ +if [ $? -ne 0 ]; then + logger -t $0 "rsync command failed, extracting pxe-network-installer" + exit 1 +fi +rm -rf ${WORKDIR} + +# Extract files from platform-kickstarts +WORKDIR=$(mktemp -d --tmpdir=/scratch pkgextract_XXXX) +if [ -z "${WORKDIR}" -o ! -d "${WORKDIR}" ]; then + logger -t $0 "Failed to create workdir" + exit 1 +fi +extract_pkg platform-kickstarts +rsync -ac ${WORKDIR}/var/www/pages/feed/rel-${TGT_RELEASE}/ /var/www/pages/feed/rel-${TGT_RELEASE}/ +if [ $? -ne 0 ]; then + logger -t $0 "rsync command failed, extracting platform-kickstarts" + exit 1 +fi +rm -rf ${WORKDIR} + +exit 0 + diff --git a/sw-patch/cgcs-patch/.coveragerc b/sw-patch/cgcs-patch/.coveragerc new file mode 100644 index 00000000..aa09b6e4 --- /dev/null +++ b/sw-patch/cgcs-patch/.coveragerc @@ -0,0 +1,7 @@ +[run] +branch = True +source = cgcs_patch +omit = cgcs_patch/tests/* + +[report] +ignore_errors = True diff --git a/sw-patch/cgcs-patch/.stestr.conf b/sw-patch/cgcs-patch/.stestr.conf new file mode 100644 index 00000000..7591fa0a --- /dev/null +++ b/sw-patch/cgcs-patch/.stestr.conf @@ -0,0 +1,2 @@ +[DEFAULT] +test_path=cgcs_patch/tests diff --git a/sw-patch/cgcs-patch/LICENSE b/sw-patch/cgcs-patch/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/sw-patch/cgcs-patch/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/sw-patch/cgcs-patch/cgcs_patch/__init__.py b/sw-patch/cgcs-patch/cgcs_patch/__init__.py new file mode 100644 index 00000000..72836125 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/__init__.py @@ -0,0 +1,6 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" diff --git a/sw-patch/cgcs-patch/cgcs_patch/api/__init__.py b/sw-patch/cgcs-patch/cgcs_patch/api/__init__.py new file mode 100644 index 00000000..b24632a8 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/api/__init__.py @@ -0,0 +1,30 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +from oslo_config import cfg + + +API_SERVICE_OPTS = [ + cfg.StrOpt('api_bind_ip', + default='127.0.0.1', + help='IP for the Patching controller API server to bind to', + ), + cfg.IntOpt('api_port', + default=5487, + help='The port for the Patching controller API server', + ), + cfg.IntOpt('api_limit_max', + default=1000, + help='the maximum number of items returned in a single ' + 'response from a collection resource'), +] + +CONF = cfg.CONF +opt_group = cfg.OptGroup(name='api', + title='Options for the Patching controller api service') +CONF.register_group(opt_group) +CONF.register_opts(API_SERVICE_OPTS) diff --git a/sw-patch/cgcs-patch/cgcs_patch/api/app.py b/sw-patch/cgcs-patch/cgcs_patch/api/app.py new file mode 100644 index 00000000..b831a86f --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/api/app.py @@ -0,0 +1,43 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import pecan + +from cgcs_patch.api import config + + +def get_pecan_config(): + # Set up the pecan configuration + filename = config.__file__.replace('.pyc', '.py') + return pecan.configuration.conf_from_file(filename) + + +def setup_app(pecan_config=None): + if not pecan_config: + pecan_config = get_pecan_config() + + pecan.configuration.set_config(dict(pecan_config), overwrite=True) + + app = pecan.make_app( + pecan_config.app.root, + static_root=pecan_config.app.static_root, + template_path=pecan_config.app.template_path, + debug=False, + force_canonical=getattr(pecan_config.app, 'force_canonical', True), + guess_content_type_from_ext=False, # Avoid mime-type lookup + ) + + return app + + +class VersionSelectorApplication(object): + def __init__(self): + pc = get_pecan_config() + self.v1 = setup_app(pecan_config=pc) + + def __call__(self, environ, start_response): + return self.v1(environ, start_response) diff --git a/sw-patch/cgcs-patch/cgcs_patch/api/config.py b/sw-patch/cgcs-patch/cgcs_patch/api/config.py new file mode 100644 index 00000000..91d449e7 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/api/config.py @@ -0,0 +1,23 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +# Server Specific Configurations +server = { + 'port': '5487', + 'host': '127.0.0.1' +} + +# Pecan Application Configurations +app = { + 'root': 'cgcs_patch.api.controllers.root.RootController', + 'modules': ['cgcs_patch.authapi'], + 'static_root': '%(confdir)s/public', + 'template_path': '%(confdir)s/../templates', + 'debug': False, + 'enable_acl': True, + 'acl_public_routes': [], +} diff --git a/sw-patch/cgcs-patch/cgcs_patch/api/controllers/__init__.py b/sw-patch/cgcs-patch/cgcs_patch/api/controllers/__init__.py new file mode 100644 index 00000000..72836125 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/api/controllers/__init__.py @@ -0,0 +1,6 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" diff --git a/sw-patch/cgcs-patch/cgcs_patch/api/controllers/root.py b/sw-patch/cgcs-patch/cgcs_patch/api/controllers/root.py new file mode 100644 index 00000000..ecce3472 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/api/controllers/root.py @@ -0,0 +1,293 @@ +""" +Copyright (c) 2014-2019 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import os +from pecan import expose +from pecan import request +import cgi +import glob + +from cgcs_patch.exceptions import PatchError +from cgcs_patch.patch_controller import pc + +from cgcs_patch.patch_functions import LOG + + +class PatchAPIController(object): + + @expose('json') + @expose('query.xml', content_type='application/xml') + def index(self): + return self.query() + + @expose('json') + @expose('query.xml', content_type='application/xml') + def query(self, **kwargs): + try: + pd = pc.patch_query_cached(**kwargs) + except PatchError as e: + return dict(error="Error: %s" % str(e)) + + return dict(pd=pd) + + @expose('json') + @expose('show.xml', content_type='application/xml') + def show(self, *args): + try: + result = pc.patch_query_specific_cached(list(args)) + except PatchError as e: + return dict(error="Error: %s" % str(e)) + + return result + + @expose('json') + @expose('query.xml', content_type='application/xml') + def apply(self, *args, **kwargs): + if pc.any_patch_host_installing(): + return dict(error="Rejected: One or more nodes are installing patches.") + + try: + result = pc.patch_apply_api(list(args), **kwargs) + except PatchError as e: + return dict(error="Error: %s" % str(e)) + + pc.patch_sync() + + return result + + @expose('json') + @expose('query.xml', content_type='application/xml') + def remove(self, *args, **kwargs): + if pc.any_patch_host_installing(): + return dict(error="Rejected: One or more nodes are installing patches.") + + try: + result = pc.patch_remove_api(list(args), **kwargs) + except PatchError as e: + return dict(error="Error: %s" % str(e)) + + pc.patch_sync() + + return result + + @expose('json') + @expose('query.xml', content_type='application/xml') + def delete(self, *args): + try: + result = pc.patch_delete_api(list(args)) + except PatchError as e: + return dict(error="Error: %s" % str(e)) + + pc.patch_sync() + + return result + + @expose('json') + @expose('query.xml', content_type='application/xml') + def upload(self): + assert isinstance(request.POST['file'], cgi.FieldStorage) + fileitem = request.POST['file'] + + if not fileitem.filename: + return dict(error="Error: No file uploaded") + + fn = '/scratch/' + os.path.basename(fileitem.filename) + + if hasattr(fileitem.file, 'fileno'): + # This technique cannot copy a very large file. It + # requires a lot of memory as all data from the + # source file is read into memory then written to + # the destination file one chunk + # open(fn, 'wb').write(fileitem.file.read()) + + # Copying file by chunks using OS system calls + # requires much less memory. A larger chunk + # size can be used to improve the copy speed; + # currently 64K chunk size is selected + dst = os.open(fn, os.O_WRONLY | os.O_CREAT) + src = fileitem.file.fileno() + size = 64 * 1024 + n = size + while n >= size: + s = os.read(src, size) + n = os.write(dst, s) + os.close(dst) + else: + open(fn, 'wb').write(fileitem.file.read()) + + try: + result = pc.patch_import_api([fn]) + except PatchError as e: + os.remove(fn) + return dict(error=str(e)) + + os.remove(fn) + + pc.patch_sync() + + return result + + @expose('json') + def upload_dir(self, **kwargs): + files = [] + for path in kwargs.values(): + LOG.info("upload-dir: Retrieving patches from %s", path) + for f in glob.glob(path + '/*.patch'): + if os.path.isfile(f): + files.append(f) + + if len(files) == 0: + return dict(error="No patches found") + + try: + result = pc.patch_import_api(sorted(files)) + except PatchError as e: + return dict(error=str(e)) + + pc.patch_sync() + + return result + + @expose('json') + def init_release(self, *args): + if len(list(args)) == 0: + return dict(error="Release must be specified") + + try: + result = pc.patch_init_release_api(list(args)[0]) + except PatchError as e: + return dict(error=str(e)) + + pc.patch_sync() + + return result + + @expose('json') + def del_release(self, *args): + if len(list(args)) == 0: + return dict(error="Release must be specified") + + try: + result = pc.patch_del_release_api(list(args)[0]) + except PatchError as e: + return dict(error=str(e)) + + pc.patch_sync() + + return result + + @expose('json') + @expose('query_hosts.xml', content_type='application/xml') + def query_hosts(self, *args): # pylint: disable=unused-argument + return dict(data=pc.query_host_cache()) + + @expose('json') + @expose('query.xml', content_type='application/xml') + def what_requires(self, *args): + try: + result = pc.patch_query_what_requires(list(args)) + except PatchError as e: + return dict(error="Error: %s" % str(e)) + + return result + + @expose('json') + @expose('query.xml', content_type='application/xml') + def host_install(self, *args): # pylint: disable=unused-argument + return dict(error="Deprecated: Use host_install_async") + + @expose('json') + @expose('query.xml', content_type='application/xml') + def host_install_async(self, *args): + if len(list(args)) == 0: + return dict(error="Host must be specified for install") + force = False + if len(list(args)) > 1 and 'force' in list(args)[1:]: + force = True + + try: + result = pc.patch_host_install(list(args)[0], force, async_req=True) + except PatchError as e: + return dict(error="Error: %s" % str(e)) + + return result + + @expose('json') + @expose('query.xml', content_type='application/xml') + def drop_host(self, *args): + if len(list(args)) == 0: + return dict(error="Host must be specified") + + try: + result = pc.drop_host(list(args)[0]) + except PatchError as e: + return dict(error="Error: %s" % str(e)) + + return result + + @expose('json') + def query_dependencies(self, *args, **kwargs): + try: + result = pc.patch_query_dependencies(list(args), **kwargs) + except PatchError as e: + return dict(error=str(e)) + + return result + + @expose('json') + def commit(self, *args): + try: + result = pc.patch_commit(list(args)) + except PatchError as e: + return dict(error=str(e)) + + pc.patch_sync() + + return result + + @expose('json') + def commit_dry_run(self, *args): + try: + result = pc.patch_commit(list(args), dry_run=True) + except PatchError as e: + return dict(error=str(e)) + + return result + + @expose('json') + def is_applied(self, *args): + return pc.is_applied(list(args)) + + @expose('json') + def is_available(self, *args): + return pc.is_available(list(args)) + + @expose('json') + def report_app_dependencies(self, *args, **kwargs): + try: + result = pc.report_app_dependencies(list(args), **kwargs) + except PatchError as e: + return dict(status=500, error=str(e)) + + pc.patch_sync() + + return result + + @expose('json') + def query_app_dependencies(self): + return pc.query_app_dependencies() + + +class RootController(object): + + @expose() + @expose('json') + def index(self): + return "Titanium Cloud Patching API, Available versions: /v1" + + patch = PatchAPIController() + v1 = PatchAPIController() diff --git a/sw-patch/cgcs-patch/cgcs_patch/app.py b/sw-patch/cgcs-patch/cgcs_patch/app.py new file mode 100644 index 00000000..885ad934 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/app.py @@ -0,0 +1,24 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +from pecan import make_app + + +def setup_app(config): + + return make_app( + config.app.root, + static_root=config.app.static_root, + template_path=config.app.template_path, + logging=getattr(config, 'logging', {}), + debug=getattr(config.app, 'debug', False), + force_canonical=getattr(config.app, 'force_canonical', True), + guess_content_type_from_ext=getattr( + config.app, + 'guess_content_type_from_ext', + True), + ) diff --git a/sw-patch/cgcs-patch/cgcs_patch/authapi/__init__.py b/sw-patch/cgcs-patch/cgcs_patch/authapi/__init__.py new file mode 100755 index 00000000..cc898390 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/authapi/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) 2013-2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from oslo_config import cfg + +API_SERVICE_OPTS = [ + cfg.StrOpt('auth_api_bind_ip', + default=None, + help='IP for the authenticated Patching API server to bind to'), + cfg.IntOpt('auth_api_port', + default=5491, + help='The port for the authenticated Patching API server'), + cfg.IntOpt('api_limit_max', + default=1000, + help='the maximum number of items returned in a single ' + 'response from a collection resource') +] + +CONF = cfg.CONF +opt_group = cfg.OptGroup(name='api', + title='Options for the patch-api service') +CONF.register_group(opt_group) +CONF.register_opts(API_SERVICE_OPTS) diff --git a/sw-patch/cgcs-patch/cgcs_patch/authapi/acl.py b/sw-patch/cgcs-patch/cgcs_patch/authapi/acl.py new file mode 100755 index 00000000..ff2b2356 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/authapi/acl.py @@ -0,0 +1,30 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +from cgcs_patch.authapi import auth_token + +OPT_GROUP_NAME = 'keystone_authtoken' + + +"""Access Control Lists (ACL's) control access the API server.""" + + +def install(app, conf, public_routes): + """Install ACL check on application. + + :param app: A WSGI application. + :param conf: Settings. Must include OPT_GROUP_NAME section. + :param public_routes: The list of the routes which will be allowed + access without authentication. + :return: The same WSGI application with ACL installed. + + """ + + keystone_config = dict(conf.items(OPT_GROUP_NAME)) + return auth_token.AuthTokenMiddleware(app, + conf=keystone_config, + public_api_routes=public_routes) diff --git a/sw-patch/cgcs-patch/cgcs_patch/authapi/app.py b/sw-patch/cgcs-patch/cgcs_patch/authapi/app.py new file mode 100755 index 00000000..4eed6d20 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/authapi/app.py @@ -0,0 +1,77 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +from oslo_config import cfg +import pecan + +from cgcs_patch.authapi import acl +from cgcs_patch.authapi import config +from cgcs_patch.authapi import hooks +from cgcs_patch.authapi import policy + +from six.moves import configparser + +auth_opts = [ + cfg.StrOpt('auth_strategy', + default='keystone', + help='Method to use for auth: noauth or keystone.'), +] + +CONF = cfg.CONF +CONF.register_opts(auth_opts) + + +def get_pecan_config(): + # Set up the pecan configuration + filename = config.__file__.replace('.pyc', '.py') + return pecan.configuration.conf_from_file(filename) + + +def setup_app(pecan_config=None, extra_hooks=None): + config_parser = configparser.RawConfigParser() + config_parser.read('/etc/patching/patching.conf') + + policy.init() + + app_hooks = [hooks.ConfigHook(), + hooks.ContextHook(pecan_config.app.acl_public_routes), + ] + if extra_hooks: + app_hooks.extend(extra_hooks) + + if not pecan_config: + pecan_config = get_pecan_config() + + if pecan_config.app.enable_acl: + app_hooks.append(hooks.AdminAuthHook()) + + pecan.configuration.set_config(dict(pecan_config), overwrite=True) + + app = pecan.make_app( + pecan_config.app.root, + static_root=pecan_config.app.static_root, + template_path=pecan_config.app.template_path, + debug=False, + force_canonical=getattr(pecan_config.app, 'force_canonical', True), + hooks=app_hooks, + guess_content_type_from_ext=False, # Avoid mime-type lookup + ) + + if pecan_config.app.enable_acl: + return acl.install(app, config_parser, pecan_config.app.acl_public_routes) + + return app + + +class VersionSelectorApplication(object): + def __init__(self): + pc = get_pecan_config() + pc.app.enable_acl = (CONF.auth_strategy == 'keystone') + self.v1 = setup_app(pecan_config=pc) + + def __call__(self, environ, start_response): + return self.v1(environ, start_response) diff --git a/sw-patch/cgcs-patch/cgcs_patch/authapi/auth_token.py b/sw-patch/cgcs-patch/cgcs_patch/authapi/auth_token.py new file mode 100755 index 00000000..8b375fc4 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/authapi/auth_token.py @@ -0,0 +1,40 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystonemiddleware import auth_token +from sysinv.common import utils + + +class AuthTokenMiddleware(auth_token.AuthProtocol): + """A wrapper on Keystone auth_token middleware. + + Does not perform verification of authentication tokens + for public routes in the API. + + """ + def __init__(self, app, conf, public_api_routes=None): + if public_api_routes is None: + public_api_routes = [] + + self.public_api_routes = set(public_api_routes) + + super(AuthTokenMiddleware, self).__init__(app, conf) + + def __call__(self, env, start_response): + path = utils.safe_rstrip(env.get('PATH_INFO'), '/') + + if path in self.public_api_routes: + return self.app(env, start_response) # pylint: disable=no-member + + return super(AuthTokenMiddleware, self).__call__(env, start_response) # pylint: disable=too-many-function-args diff --git a/sw-patch/cgcs-patch/cgcs_patch/authapi/config.py b/sw-patch/cgcs-patch/cgcs_patch/authapi/config.py new file mode 100755 index 00000000..796fb8dc --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/authapi/config.py @@ -0,0 +1,23 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +# Server Specific Configurations +server = { + 'port': '5491', + 'host': '0.0.0.0' +} + +# Pecan Application Configurations +app = { + 'root': 'cgcs_patch.api.controllers.root.RootController', + 'modules': ['cgcs_patch.api'], + 'static_root': '%(confdir)s/public', + 'template_path': '%(confdir)s/../templates', + 'debug': False, + 'enable_acl': True, + 'acl_public_routes': [], +} diff --git a/sw-patch/cgcs-patch/cgcs_patch/authapi/hooks.py b/sw-patch/cgcs-patch/cgcs_patch/authapi/hooks.py new file mode 100755 index 00000000..c4d2353e --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/authapi/hooks.py @@ -0,0 +1,100 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2012 New Dream Network, LLC (DreamHost) +# +# Author: Doug Hellmann +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Copyright (c) 2013-2017 Wind River Systems, Inc. +# + + +from oslo_config import cfg +from pecan import hooks + +from sysinv.common import context +from sysinv.common import utils +from sysinv.openstack.common import policy +from webob import exc + + +class ConfigHook(hooks.PecanHook): + """Attach the config object to the request so controllers can get to it.""" + + def before(self, state): + state.request.cfg = cfg.CONF + + +class ContextHook(hooks.PecanHook): + """Configures a request context and attaches it to the request. + + The following HTTP request headers are used: + + X-User-Id or X-User: + Used for context.user_id. + + X-Tenant-Id or X-Tenant: + Used for context.tenant. + + X-Auth-Token: + Used for context.auth_token. + + X-Roles: + Used for setting context.is_admin flag to either True or False. + The flag is set to True, if X-Roles contains either an administrator + or admin substring. Otherwise it is set to False. + + """ + def __init__(self, public_api_routes): + self.public_api_routes = public_api_routes + super(ContextHook, self).__init__() + + def before(self, state): + user_id = state.request.headers.get('X-User-Id') + user_id = state.request.headers.get('X-User', user_id) + tenant = state.request.headers.get('X-Tenant-Id') + tenant = state.request.headers.get('X-Tenant', tenant) + domain_id = state.request.headers.get('X-User-Domain-Id') + domain_name = state.request.headers.get('X-User-Domain-Name') + auth_token = state.request.headers.get('X-Auth-Token', None) + creds = {'roles': state.request.headers.get('X-Roles', '').split(',')} + + is_admin = policy.check('admin', state.request.headers, creds) + + path = utils.safe_rstrip(state.request.path, '/') + is_public_api = path in self.public_api_routes + + state.request.context = context.RequestContext( + auth_token=auth_token, + user=user_id, + tenant=tenant, + domain_id=domain_id, + domain_name=domain_name, + is_admin=is_admin, + is_public_api=is_public_api) + + +class AdminAuthHook(hooks.PecanHook): + """Verify that the user has admin rights. + + Checks whether the request context is an admin context and + rejects the request otherwise. + + """ + def before(self, state): + ctx = state.request.context + is_admin_api = policy.check('admin_api', {}, ctx.to_dict()) + + if not is_admin_api and not ctx.is_public_api: + raise exc.HTTPForbidden() diff --git a/sw-patch/cgcs-patch/cgcs_patch/authapi/policy.py b/sw-patch/cgcs-patch/cgcs_patch/authapi/policy.py new file mode 100755 index 00000000..285c6a16 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/authapi/policy.py @@ -0,0 +1,117 @@ +# +# Copyright (c) 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Copyright (c) 2014-2017 Wind River Systems, Inc. +# + +"""Policy Engine For Patching.""" + +import os.path + +from sysinv.common import exception +from sysinv.common import utils +from sysinv.openstack.common import policy + + +_POLICY_PATH = None +_POLICY_CACHE = {} + + +def reset(): + global _POLICY_PATH + global _POLICY_CACHE + _POLICY_PATH = None + _POLICY_CACHE = {} + policy.reset() + + +def init(): + global _POLICY_PATH + global _POLICY_CACHE + if not _POLICY_PATH: + _POLICY_PATH = '/etc/patching/policy.json' + if not os.path.exists(_POLICY_PATH): + raise exception.ConfigNotFound(message='/etc/patching/policy.json') + utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, + reload_func=_set_rules) + + +def _set_rules(data): + default_rule = "rule:admin_api" + policy.set_rules(policy.Rules.load_json(data, default_rule)) + + +def enforce(context, action, target, do_raise=True): + """Verifies that the action is valid on the target in this context. + + :param context: sysinv context + :param action: string representing the action to be checked + this should be colon separated for clarity. + i.e. ``compute:create_instance``, + ``compute:attach_volume``, + ``volume:attach_volume`` + :param target: dictionary representing the object of the action + for object creation this should be a dictionary representing the + location of the object e.g. ``{'project_id': context.project_id}`` + :param do_raise: if True (the default), raises PolicyNotAuthorized; + if False, returns False + + :raises sysinv.exception.PolicyNotAuthorized: if verification fails + and do_raise is True. + + :return: returns a non-False value (not necessarily "True") if + authorized, and the exact value False if not authorized and + do_raise is False. + """ + init() + + credentials = context.to_dict() + + # Add the exception arguments if asked to do a raise + extra = {} + if do_raise: + extra.update(exc=exception.PolicyNotAuthorized, action=action) + + return policy.check(action, target, credentials, **extra) + + +def check_is_admin(context): + """Whether or not role contains 'admin' role according to policy setting. + + """ + init() + + credentials = context.to_dict() + target = credentials + + return policy.check('context_is_admin', target, credentials) + + +@policy.register('context_is_admin') +class IsAdminCheck(policy.Check): + """An explicit check for is_admin.""" + + def __init__(self, kind, match): + """Initialize the check.""" + + self.expected = (match.lower() == 'true') + + super(IsAdminCheck, self).__init__(kind, str(self.expected)) + + def __call__(self, target, creds): + """Determine whether is_admin matches the requested value.""" + + return creds['is_admin'] == self.expected diff --git a/sw-patch/cgcs-patch/cgcs_patch/base.py b/sw-patch/cgcs-patch/cgcs_patch/base.py new file mode 100644 index 00000000..8c743c09 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/base.py @@ -0,0 +1,170 @@ +""" +Copyright (c) 2017-2021 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import socket +import struct +import subprocess +import time + +import cgcs_patch.utils as utils +import cgcs_patch.config as cfg +import cgcs_patch.constants as constants +from cgcs_patch.patch_functions import LOG + + +class PatchService(object): + def __init__(self): + self.sock_out = None + self.sock_in = None + self.service_type = None + self.port = None + self.mcast_addr = None + self.socket_lock = None + + def update_config(self): + # Implemented in subclass + pass + + def socket_lock_acquire(self): + pass + + def socket_lock_release(self): + pass + + def setup_socket_ipv4(self): + mgmt_ip = cfg.get_mgmt_ip() + if mgmt_ip is None: + # Don't setup socket unless we have a mgmt ip + return None + + self.update_config() + + interface_addr = socket.inet_pton(socket.AF_INET, mgmt_ip) + + # Close sockets, if necessary + for s in [self.sock_out, self.sock_in]: + if s is not None: + s.close() + + self.sock_out = socket.socket(socket.AF_INET, + socket.SOCK_DGRAM) + self.sock_in = socket.socket(socket.AF_INET, + socket.SOCK_DGRAM) + + self.sock_out.setblocking(0) + self.sock_in.setblocking(0) + + self.sock_out.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.sock_in.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + self.sock_in.bind(('', self.port)) + + if self.mcast_addr: + # These options are for outgoing multicast messages + self.sock_out.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, interface_addr) + self.sock_out.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1) + # Since only the controllers are sending to this address, + # we want the loopback so the local agent can receive it + self.sock_out.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1) + + # Register the multicast group + group = socket.inet_pton(socket.AF_INET, self.mcast_addr) + mreq = struct.pack('=4s4s', group, interface_addr) + + self.sock_in.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) + + return self.sock_in + + def setup_socket_ipv6(self): + mgmt_ip = cfg.get_mgmt_ip() + if mgmt_ip is None: + # Don't setup socket unless we have a mgmt ip + return None + + self.update_config() + + # Close sockets, if necessary + for s in [self.sock_out, self.sock_in]: + if s is not None: + s.close() + + self.sock_out = socket.socket(socket.AF_INET6, + socket.SOCK_DGRAM) + self.sock_in = socket.socket(socket.AF_INET6, + socket.SOCK_DGRAM) + + self.sock_out.setblocking(0) + self.sock_in.setblocking(0) + + self.sock_out.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.sock_in.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + self.sock_out.bind((mgmt_ip, 0)) + self.sock_in.bind(('', self.port)) + + if self.mcast_addr: + # These options are for outgoing multicast messages + mgmt_ifindex = utils.if_nametoindex(cfg.get_mgmt_iface()) + self.sock_out.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, mgmt_ifindex) + self.sock_out.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 1) + # Since only the controllers are sending to this address, + # we want the loopback so the local agent can receive it + self.sock_out.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, 1) + + # Register the multicast group + if_index_packed = struct.pack('I', mgmt_ifindex) + group = socket.inet_pton(socket.AF_INET6, self.mcast_addr) + if_index_packed + self.sock_in.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, group) + + return self.sock_in + + def setup_socket(self): + self.socket_lock_acquire() + + try: + sock_in = None + if utils.get_management_version() == constants.ADDRESS_VERSION_IPV6: + sock_in = self.setup_socket_ipv6() + else: + sock_in = self.setup_socket_ipv4() + self.socket_lock_release() + return sock_in + except Exception: + LOG.exception("Failed to setup socket") + + # Close sockets, if necessary + for s in [self.sock_out, self.sock_in]: + if s is not None: + s.close() + + self.socket_lock_release() + + return None + + def audit_socket(self): + if not self.mcast_addr: + # Multicast address not configured, therefore nothing to do + return + + # Ensure multicast address is still allocated + cmd = "ip maddr show %s | awk 'BEGIN {ORS=\"\"}; {if ($2 == \"%s\") print $2}'" % \ + (cfg.get_mgmt_iface(), self.mcast_addr) + try: + result = subprocess.check_output(cmd, shell=True) + + if result == self.mcast_addr: + return + except subprocess.CalledProcessError as e: + LOG.error("Command output: %s", e.output) + return + + # Close the socket and set it up again + LOG.info("Detected missing multicast addr (%s). Reconfiguring", self.mcast_addr) + while self.setup_socket() is None: + LOG.info("Unable to setup sockets. Waiting to retry") + time.sleep(5) + LOG.info("Multicast address reconfigured") diff --git a/sw-patch/cgcs-patch/cgcs_patch/certificates.py b/sw-patch/cgcs-patch/cgcs_patch/certificates.py new file mode 100644 index 00000000..19f3a06c --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/certificates.py @@ -0,0 +1,51 @@ +""" +Copyright (c) 2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +dev_certificate = b"""-----BEGIN CERTIFICATE----- + MIIDejCCAmKgAwIBAgICEAQwDQYJKoZIhvcNAQELBQAwQjELMAkGA1UEBhMCQ0Ex + EDAOBgNVBAgMB09udGFyaW8xITAfBgNVBAoMGFdpbmQgUml2ZXIgU3lzdGVtcywg + SW5jLjAeFw0xNzA4MTgxNDM3MjlaFw0yNzA4MTYxNDM3MjlaMEExCzAJBgNVBAYT + AkNBMRAwDgYDVQQIDAdPbnRhcmlvMSAwHgYDVQQKDBdXaW5kIFJpdmVyIFN5c3Rl + bXMsIEluYzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALcs0/Te6x69 + lxQOxudrF+uSC5F9r5bKUnZNWUKHyXKlN4SzZgWGs+fb/DqXIm7piuoQ6GH7GEQd + BEN1j/bwp30LZlv0Ur+8jhCvEdqsIP3vUXfv7pv0bomVs0Q8ZRI/FYZhjxYlyFKr + gZFV9WPP8S9SwfClHjaYRUudvwvjHHnnnkZ9blVFbXU0Xe83A8fWd0HNqAU1TlmK + 4CeSi4FI4aRKiXJnOvgv2UoJMI57rBIVKYRUH8uuFpPofOwjOM/Rd6r3Ir+4/CX6 + +/NALOBIEN6M05ZzoiyiH8NHELknQBqzNs0cXObJWpaSinAOcBnPCc7DNRwgQzjR + SdcE9FG1+LcCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3Bl + blNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFDRbal2KxU0hQyv4 + MVnWrW96+aWoMB8GA1UdIwQYMBaAFJaLO1x8+jti7V6pLGbUyqpy0M36MA0GCSqG + SIb3DQEBCwUAA4IBAQBmcPFZzEoPtuMPCFvJ/0cmngp8yvCGxWz3JEDkdGYSCVGs + TG5e9DeltaHOk6yLvZSRY1so30GQnyB9q8v4DwEGVslKg8u9w/WEU81wl6Q2FZ5s + XRP6TASQ0Lbg9e4b3bnTITJJ8jT/zF29NaohgC2fg0UwVuldZLfa7FihJB4//OC1 + UdNEcmdqTVRqN2oco1n3ZUWKXvG2AvGsoiqu+lsWX1MXacoFvJexSACLrUvOoXMW + i38Ofp7XMCAm3rM0cXv7Uc9WCrgnTWbEvDgjGfRAmcM9moWGoWX6E46Xkojpkfle + Ss6CHAMK42aZ/+MWQlZEzNK49PtomGMjn5SuoK8u + -----END CERTIFICATE-----""" + +formal_certificate = b"""-----BEGIN CERTIFICATE----- + MIIDezCCAmOgAwIBAgICEAMwDQYJKoZIhvcNAQELBQAwQjELMAkGA1UEBhMCQ0Ex + EDAOBgNVBAgMB09udGFyaW8xITAfBgNVBAoMGFdpbmQgUml2ZXIgU3lzdGVtcywg + SW5jLjAeFw0xNzA4MTgxNDM1MTJaFw0yNzA4MTYxNDM1MTJaMEIxCzAJBgNVBAYT + AkNBMRAwDgYDVQQIDAdPbnRhcmlvMSEwHwYDVQQKDBhXaW5kIFJpdmVyIFN5c3Rl + bXMsIEluYy4wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC+0fS8ybg8 + M37lW+lcR9LmQAR2zUJdbnl2L0fj3W/7W+PMm3mJWeQDTf19wf+qHHrgEkjxGp10 + BSXWZYdPyCdOjAay/Ew1s/waFeAQZpf4vv/9D1Y/4sVkqct9ibo5NVgvVsjqKVnX + IVhyzHlhBSUqYhZlS/SOx8JcLQWSUMJoP2XR4Tv28xIXi0Fuyp8QBwUmSwmvfPy4 + 0yxzfON/b8kHld5aTY353KLXh/5YWsn1zRlOYfS1OuJk4LGjm6HvmZtxPNUZk4vI + NA24rH4FKkuxyM3x8aPi3LE4G6GSrJDuNi28xzOj864rlFoyLODy/mov1YMR/g4k + d3mG6UbRckPxAgMBAAGjezB5MAkGA1UdEwQCMAAwLAYJYIZIAYb4QgENBB8WHU9w + ZW5TU0wgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBTjyMN/AX07rEmB + 6sz6pnyt/m+eSzAfBgNVHSMEGDAWgBSWiztcfPo7Yu1eqSxm1MqqctDN+jANBgkq + hkiG9w0BAQsFAAOCAQEASpyCu/adGTvNjyy/tV+sL/kaVEKLA7q36HUrzQkTjMPX + y8L8PVZoeWprkz7cvYTyHmVTPLBvFkGEFVn8LWi9fTTp/UrHnxw6fvb+V78mOypi + 4A1aU9+dh3L6arpd4jZ4hDiLhEClesGCYVTVBdsrh3zSOc51nT4hosyBVpRd/VgQ + jhGJBBMEXASZceady4ajK5jnR3wF8oW/he4NYF97qh8WWKVsIYbwgLS0rT58q7qq + vpjPxMOahUdACkyPyt/XJICTlkanVD7KgG3oLWpc+3FWPHGr+F7mspPLZqUcEFDV + bGF+oDJ7p/tqHsNvPlRDVGqh0QdiAkKeS/SJC9jmAw== + -----END CERTIFICATE----- + """ diff --git a/sw-patch/cgcs-patch/cgcs_patch/config.py b/sw-patch/cgcs-patch/cgcs_patch/config.py new file mode 100644 index 00000000..2e9f9842 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/config.py @@ -0,0 +1,138 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import os +import six +from six.moves import configparser +import io +import logging +import socket +import cgcs_patch.utils as utils +import cgcs_patch.constants as constants +import tsconfig.tsconfig as tsc + +controller_mcast_group = None +agent_mcast_group = None +controller_port = 0 +agent_port = 0 +api_port = 0 +mgmt_if = None +nodetype = None +platform_conf_mtime = 0 +patching_conf_mtime = 0 +patching_conf = '/etc/patching/patching.conf' + + +def read_config(): + global patching_conf_mtime + global patching_conf + + if patching_conf_mtime == os.stat(patching_conf).st_mtime: + # The file has not changed since it was last read + return + + defaults = { + 'controller_mcast_group': "239.1.1.3", + 'agent_mcast_group': "239.1.1.4", + 'api_port': "5487", + 'controller_port': "5488", + 'agent_port': "5489", + } + + global controller_mcast_group + global agent_mcast_group + global api_port + global controller_port + global agent_port + + # In python3 configparser uses strict mode by default. It doesn't + # agree duplicate keys, and will throw an error + # In python2 the strict argument is missing + # TODO(dsafta): the logic branching here can be removed once + # https://bugs.launchpad.net/starlingx/+bug/1931529 is fixed, allowing + # python3 parser to work in strict mode. + + if six.PY2: + config = configparser.SafeConfigParser(defaults) + elif six.PY3: + config = configparser.SafeConfigParser(defaults, strict=False) + + config.read(patching_conf) + patching_conf_mtime = os.stat(patching_conf).st_mtime + + controller_mcast_group = config.get('runtime', + 'controller_multicast') + agent_mcast_group = config.get('runtime', 'agent_multicast') + + api_port = config.getint('runtime', 'api_port') + controller_port = config.getint('runtime', 'controller_port') + agent_port = config.getint('runtime', 'agent_port') + + # The platform.conf file has no section headers, which causes problems + # for ConfigParser. So we'll fake it out. + ini_str = u'[platform_conf]\n' + open(tsc.PLATFORM_CONF_FILE, 'r').read() + ini_fp = io.StringIO(ini_str) + config.readfp(ini_fp) + + try: + value = str(config.get('platform_conf', 'nodetype')) + + global nodetype + nodetype = value + except configparser.Error: + logging.exception("Failed to read nodetype from config") + return False + + +def get_mgmt_ip(): + # Check if initial config is complete + if not os.path.exists('/etc/platform/.initial_config_complete'): + return None + mgmt_hostname = socket.gethostname() + return utils.gethostbyname(mgmt_hostname) + + +# Because the patching daemons are launched before manifests are +# applied, the content of some settings in platform.conf can change, +# such as the management interface. As such, we can't just directly +# use tsc.management_interface +# +def get_mgmt_iface(): + # Check if initial config is complete + if not os.path.exists(constants.INITIAL_CONFIG_COMPLETE_FLAG): + return None + + global mgmt_if + global platform_conf_mtime + + if mgmt_if is not None and \ + platform_conf_mtime == os.stat(tsc.PLATFORM_CONF_FILE).st_mtime: + # The platform.conf file hasn't been modified since we read it, + # so return the cached value. + return mgmt_if + + if six.PY2: + config = configparser.SafeConfigParser() + elif six.PY3: + config = configparser.SafeConfigParser(strict=False) + + # The platform.conf file has no section headers, which causes problems + # for ConfigParser. So we'll fake it out. + ini_str = u'[platform_conf]\n' + open(tsc.PLATFORM_CONF_FILE, 'r').read() + ini_fp = io.StringIO(ini_str) + config.readfp(ini_fp) + + try: + value = str(config.get('platform_conf', 'management_interface')) + + mgmt_if = value + + platform_conf_mtime = os.stat(tsc.PLATFORM_CONF_FILE).st_mtime + except configparser.Error: + logging.exception("Failed to read management_interface from config") + return None + return mgmt_if diff --git a/sw-patch/cgcs-patch/cgcs_patch/constants.py b/sw-patch/cgcs-patch/cgcs_patch/constants.py new file mode 100644 index 00000000..4f1654a5 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/constants.py @@ -0,0 +1,51 @@ +""" +Copyright (c) 2015-2021 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import os +try: + # The tsconfig module is only available at runtime + import tsconfig.tsconfig as tsc + + INITIAL_CONFIG_COMPLETE_FLAG = os.path.join( + tsc.PLATFORM_CONF_PATH, ".initial_config_complete") +except Exception: + pass + +PATCH_AGENT_STATE_IDLE = "idle" +PATCH_AGENT_STATE_INSTALLING = "installing" +PATCH_AGENT_STATE_INSTALL_FAILED = "install-failed" +PATCH_AGENT_STATE_INSTALL_REJECTED = "install-rejected" + +PATCH_STORAGE_DIR = "/opt/patching" + +ADDRESS_VERSION_IPV4 = 4 +ADDRESS_VERSION_IPV6 = 6 +CONTROLLER_FLOATING_HOSTNAME = "controller" + +AVAILABLE = 'Available' +APPLIED = 'Applied' +PARTIAL_APPLY = 'Partial-Apply' +PARTIAL_REMOVE = 'Partial-Remove' +COMMITTED = 'Committed' +UNKNOWN = 'n/a' + +STATUS_OBSOLETE = 'OBS' +STATUS_RELEASED = 'REL' +STATUS_DEVELOPEMENT = 'DEV' + +CLI_OPT_ALL = '--all' +CLI_OPT_DRY_RUN = '--dry-run' +CLI_OPT_RECURSIVE = '--recursive' +CLI_OPT_RELEASE = '--release' + +ENABLE_DEV_CERTIFICATE_PATCH_IDENTIFIER = 'ENABLE_DEV_CERTIFICATE' + +LOOPBACK_INTERFACE_NAME = "lo" + +SEMANTIC_PREAPPLY = 'pre-apply' +SEMANTIC_PREREMOVE = 'pre-remove' +SEMANTIC_ACTIONS = [SEMANTIC_PREAPPLY, SEMANTIC_PREREMOVE] diff --git a/sw-patch/cgcs-patch/cgcs_patch/exceptions.py b/sw-patch/cgcs-patch/cgcs_patch/exceptions.py new file mode 100644 index 00000000..6c27c253 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/exceptions.py @@ -0,0 +1,57 @@ +""" +Copyright (c) 2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + + +class PatchError(Exception): + """Base class for patching exceptions.""" + + def __init__(self, message=None): + super(PatchError, self).__init__(message) + self.message = message + + def __str__(self): + return self.message or "" + + +class MetadataFail(PatchError): + """Metadata error.""" + pass + + +class RpmFail(PatchError): + """RPM error.""" + pass + + +class SemanticFail(PatchError): + """Semantic check error.""" + pass + + +class RepoFail(PatchError): + """Repo error.""" + pass + + +class PatchFail(PatchError): + """General patching error.""" + pass + + +class PatchValidationFailure(PatchError): + """Patch validation error.""" + pass + + +class PatchMismatchFailure(PatchError): + """Patch validation error.""" + pass + + +class PatchInvalidRequest(PatchError): + """Invalid API request.""" + pass diff --git a/sw-patch/cgcs-patch/cgcs_patch/messages.py b/sw-patch/cgcs-patch/cgcs_patch/messages.py new file mode 100644 index 00000000..86ff99f9 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/messages.py @@ -0,0 +1,64 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +from cgcs_patch.patch_functions import LOG + +PATCHMSG_UNKNOWN = 0 +PATCHMSG_HELLO = 1 +PATCHMSG_HELLO_ACK = 2 +PATCHMSG_SYNC_REQ = 3 +PATCHMSG_SYNC_COMPLETE = 4 +PATCHMSG_HELLO_AGENT = 5 +PATCHMSG_HELLO_AGENT_ACK = 6 +PATCHMSG_QUERY_DETAILED = 7 +PATCHMSG_QUERY_DETAILED_RESP = 8 +PATCHMSG_AGENT_INSTALL_REQ = 9 +PATCHMSG_AGENT_INSTALL_RESP = 10 +PATCHMSG_DROP_HOST_REQ = 11 + +PATCHMSG_STR = { + PATCHMSG_UNKNOWN: "unknown", + PATCHMSG_HELLO: "hello", + PATCHMSG_HELLO_ACK: "hello-ack", + PATCHMSG_SYNC_REQ: "sync-req", + PATCHMSG_SYNC_COMPLETE: "sync-complete", + PATCHMSG_HELLO_AGENT: "hello-agent", + PATCHMSG_HELLO_AGENT_ACK: "hello-agent-ack", + PATCHMSG_QUERY_DETAILED: "query-detailed", + PATCHMSG_QUERY_DETAILED_RESP: "query-detailed-resp", + PATCHMSG_AGENT_INSTALL_REQ: "agent-install-req", + PATCHMSG_AGENT_INSTALL_RESP: "agent-install-resp", + PATCHMSG_DROP_HOST_REQ: "drop-host-req", +} + + +class PatchMessage(object): + def __init__(self, msgtype=PATCHMSG_UNKNOWN): + self.msgtype = msgtype + self.msgversion = 1 + self.message = {} + + def decode(self, data): + if 'msgtype' in data: + self.msgtype = data['msgtype'] + if 'msgversion' in data: + self.msgversion = data['msgversion'] + + def encode(self): + self.message['msgtype'] = self.msgtype + self.message['msgversion'] = self.msgversion + + def data(self): + return {'msgtype': self.msgtype} + + def msgtype_str(self): + if self.msgtype in PATCHMSG_STR: + return PATCHMSG_STR[self.msgtype] + return "invalid-type" + + def handle(self, sock, addr): # pylint: disable=unused-argument + LOG.info("Unhandled message type: %s", self.msgtype) diff --git a/sw-patch/cgcs-patch/cgcs_patch/patch_agent.py b/sw-patch/cgcs-patch/cgcs_patch/patch_agent.py new file mode 100644 index 00000000..ee10f403 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/patch_agent.py @@ -0,0 +1,941 @@ +""" +Copyright (c) 2014-2019 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import dnf +import dnf.callback +import dnf.comps +import dnf.exceptions +import dnf.rpm +import dnf.sack +import dnf.transaction +import json +import libdnf.transaction +import os +import random +import requests +import select +import shutil +import socket +import subprocess +import sys +import time + +from cgcs_patch.patch_functions import configure_logging +from cgcs_patch.patch_functions import LOG +import cgcs_patch.config as cfg +from cgcs_patch.base import PatchService +import cgcs_patch.utils as utils +import cgcs_patch.messages as messages +import cgcs_patch.constants as constants + +from tsconfig.tsconfig import http_port +from tsconfig.tsconfig import install_uuid +from tsconfig.tsconfig import subfunctions +from tsconfig.tsconfig import SW_VERSION + +pidfile_path = "/var/run/patch_agent.pid" +node_is_patched_file = "/var/run/node_is_patched" +node_is_patched_rr_file = "/var/run/node_is_patched_rr" +patch_installing_file = "/var/run/patch_installing" +patch_failed_file = "/var/run/patch_install_failed" +node_is_locked_file = "/var/run/.node_locked" + +insvc_patch_scripts = "/run/patching/patch-scripts" +insvc_patch_flags = "/run/patching/patch-flags" +insvc_patch_restart_agent = "/run/patching/.restart.patch-agent" + +run_insvc_patch_scripts_cmd = "/usr/sbin/run-patch-scripts" + +pa = None + +http_port_real = http_port + +# DNF commands +dnf_cmd = ['/bin/dnf'] +dnf_quiet = dnf_cmd + ['--quiet'] +dnf_makecache = dnf_quiet + ['makecache', + '--disablerepo="*"', + '--enablerepo', 'platform-base', + '--enablerepo', 'platform-updates'] + + +def setflag(fname): + try: + with open(fname, "w") as f: + f.write("%d\n" % os.getpid()) + except Exception: + LOG.exception("Failed to update %s flag", fname) + + +def clearflag(fname): + if os.path.exists(fname): + try: + os.remove(fname) + except Exception: + LOG.exception("Failed to clear %s flag", fname) + + +def check_install_uuid(): + controller_install_uuid_url = "http://controller:%s/feed/rel-%s/install_uuid" % (http_port_real, SW_VERSION) + try: + req = requests.get(controller_install_uuid_url) + if req.status_code != 200: + # If we're on controller-1, controller-0 may not have the install_uuid + # matching this release, if we're in an upgrade. If the file doesn't exist, + # bypass this check + if socket.gethostname() == "controller-1": + return True + + LOG.error("Failed to get install_uuid from controller") + return False + except requests.ConnectionError: + LOG.error("Failed to connect to controller") + return False + + controller_install_uuid = str(req.text).rstrip() + + if install_uuid != controller_install_uuid: + LOG.error("Local install_uuid=%s doesn't match controller=%s", install_uuid, controller_install_uuid) + return False + + return True + + +class PatchMessageHelloAgent(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO_AGENT) + self.patch_op_counter = 0 + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'patch_op_counter' in data: + self.patch_op_counter = data['patch_op_counter'] + + def encode(self): + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + # Send response + + # + # If a user tries to do a host-install on an unlocked node, + # without bypassing the lock check (either via in-service + # patch or --force option), the agent will set its state + # to Install-Rejected in order to report back the rejection. + # However, since this should just be a transient state, + # we don't want the client reporting the Install-Rejected + # state indefinitely, so reset it to Idle after a minute or so. + # + if pa.state == constants.PATCH_AGENT_STATE_INSTALL_REJECTED: + if os.path.exists(node_is_locked_file): + # Node has been locked since rejected attempt. Reset the state + pa.state = constants.PATCH_AGENT_STATE_IDLE + elif (time.time() - pa.rejection_timestamp) > 60: + # Rejected state for more than a minute. Reset it. + pa.state = constants.PATCH_AGENT_STATE_IDLE + + if self.patch_op_counter > 0: + pa.handle_patch_op_counter(self.patch_op_counter) + + resp = PatchMessageHelloAgentAck() + resp.send(sock) + + def send(self, sock): # pylint: disable=unused-argument + LOG.error("Should not get here") + + +class PatchMessageHelloAgentAck(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO_AGENT_ACK) + + def encode(self): + global pa + messages.PatchMessage.encode(self) + self.message['query_id'] = pa.query_id + self.message['out_of_date'] = pa.changes + self.message['hostname'] = socket.gethostname() + self.message['requires_reboot'] = pa.node_is_patched + self.message['patch_failed'] = pa.patch_failed + self.message['sw_version'] = SW_VERSION + self.message['state'] = pa.state + + def handle(self, sock, addr): + LOG.error("Should not get here") + + def send(self, sock): + global pa + self.encode() + message = json.dumps(self.message) + sock.sendto(str.encode(message), (pa.controller_address, cfg.controller_port)) + + +class PatchMessageQueryDetailed(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_QUERY_DETAILED) + + def decode(self, data): + messages.PatchMessage.decode(self, data) + + def encode(self): + # Nothing to add to the HELLO_AGENT, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + # Send response + LOG.info("Handling detailed query") + resp = PatchMessageQueryDetailedResp() + resp.send(sock) + + def send(self, sock): # pylint: disable=unused-argument + LOG.error("Should not get here") + + +class PatchMessageQueryDetailedResp(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_QUERY_DETAILED_RESP) + + def encode(self): + global pa + messages.PatchMessage.encode(self) + self.message['installed'] = pa.installed + self.message['to_remove'] = pa.to_remove + self.message['missing_pkgs'] = pa.missing_pkgs + self.message['duplicated_pkgs'] = pa.duplicated_pkgs + self.message['nodetype'] = cfg.nodetype + self.message['sw_version'] = SW_VERSION + self.message['subfunctions'] = subfunctions + self.message['state'] = pa.state + + def handle(self, sock, addr): + LOG.error("Should not get here") + + def send(self, sock): + self.encode() + message = json.dumps(self.message) + sock.sendall(str.encode(message)) + + +class PatchMessageAgentInstallReq(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_AGENT_INSTALL_REQ) + self.force = False + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'force' in data: + self.force = data['force'] + + def encode(self): + # Nothing to add to the HELLO_AGENT, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + LOG.info("Handling host install request, force=%s", self.force) + global pa + resp = PatchMessageAgentInstallResp() + + if not os.path.exists(node_is_locked_file): + if self.force: + LOG.info("Installing on unlocked node, with force option") + else: + LOG.info("Rejecting install request on unlocked node") + pa.state = constants.PATCH_AGENT_STATE_INSTALL_REJECTED + pa.rejection_timestamp = time.time() + resp.status = False + resp.reject_reason = 'Node must be locked.' + resp.send(sock, addr) + return + + resp.status = pa.handle_install() + resp.send(sock, addr) + + def send(self, sock): # pylint: disable=unused-argument + LOG.error("Should not get here") + + +class PatchMessageAgentInstallResp(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_AGENT_INSTALL_RESP) + self.status = False + self.reject_reason = None + + def encode(self): + global pa + messages.PatchMessage.encode(self) + self.message['status'] = self.status + if self.reject_reason is not None: + self.message['reject_reason'] = self.reject_reason + + def handle(self, sock, addr): + LOG.error("Should not get here") + + def send(self, sock, addr): + address = (addr[0], cfg.controller_port) + self.encode() + message = json.dumps(self.message) + sock.sendto(str.encode(message), address) + + # Send a hello ack to follow it + resp = PatchMessageHelloAgentAck() + resp.send(sock) + + +class PatchAgentDnfTransLogCB(dnf.callback.TransactionProgress): + def __init__(self): + dnf.callback.TransactionProgress.__init__(self) + + self.log_prefix = 'dnf trans' + + def progress(self, package, action, ti_done, ti_total, ts_done, ts_total): + if action in dnf.transaction.ACTIONS: + action_str = dnf.transaction.ACTIONS[action] + elif action == dnf.transaction.TRANS_POST: + action_str = 'Post transaction' + else: + action_str = 'unknown(%d)' % action + + if ti_done is not None: + # To reduce the volume of logs, only log 0% and 100% + if ti_done == 0 or ti_done == ti_total: + LOG.info('%s PROGRESS %s: %s %0.1f%% [%s/%s]', + self.log_prefix, action_str, package, + (ti_done * 100 // ti_total), + ts_done, ts_total) + else: + LOG.info('%s PROGRESS %s: %s [%s/%s]', + self.log_prefix, action_str, package, ts_done, ts_total) + + def filelog(self, package, action): + if action in dnf.transaction.FILE_ACTIONS: + msg = '%s: %s' % (dnf.transaction.FILE_ACTIONS[action], package) + else: + msg = '%s: %s' % (package, action) + LOG.info('%s FILELOG %s', self.log_prefix, msg) + + def scriptout(self, msgs): + if msgs: + LOG.info("%s SCRIPTOUT :\n%s", self.log_prefix, msgs) + + def error(self, message): + LOG.error("%s ERROR: %s", self.log_prefix, message) + + +class PatchAgent(PatchService): + def __init__(self): + PatchService.__init__(self) + self.sock_out = None + self.sock_in = None + self.controller_address = None + self.listener = None + self.changes = False + self.installed = {} + self.installed_dnf = [] + self.to_install = {} + self.to_install_dnf = [] + self.to_downgrade_dnf = [] + self.to_remove = [] + self.to_remove_dnf = [] + self.missing_pkgs = [] + self.missing_pkgs_dnf = [] + self.duplicated_pkgs = {} + self.patch_op_counter = 0 + self.node_is_patched = os.path.exists(node_is_patched_file) + self.node_is_patched_timestamp = 0 + self.query_id = 0 + self.state = constants.PATCH_AGENT_STATE_IDLE + self.last_config_audit = 0 + self.rejection_timestamp = 0 + self.dnfb = None + self.last_repo_revision = None + + # Check state flags + if os.path.exists(patch_installing_file): + # We restarted while installing. Change to failed + setflag(patch_failed_file) + os.remove(patch_installing_file) + + if os.path.exists(patch_failed_file): + self.state = constants.PATCH_AGENT_STATE_INSTALL_FAILED + + self.patch_failed = os.path.exists(patch_failed_file) + + def update_config(self): + cfg.read_config() + + if self.port != cfg.agent_port: + self.port = cfg.agent_port + + # Loopback interface does not support multicast messaging, therefore + # revert to using unicast messaging when configured against the + # loopback device + if cfg.get_mgmt_iface() == constants.LOOPBACK_INTERFACE_NAME: + self.mcast_addr = None + self.controller_address = cfg.get_mgmt_ip() + else: + self.mcast_addr = cfg.agent_mcast_group + self.controller_address = cfg.controller_mcast_group + + def setup_tcp_socket(self): + address_family = utils.get_management_family() + self.listener = socket.socket(address_family, socket.SOCK_STREAM) + self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.listener.bind(('', self.port)) + self.listener.listen(2) # Allow two connections, for two controllers + + @staticmethod + def pkgobj_to_version_str(pkg): + # Transform pkgobj version to format used by patch-controller + if pkg.epoch != 0: + output = "%s:%s-%s@%s" % (pkg.epoch, pkg.version, pkg.release, pkg.arch) + else: + output = "%s-%s@%s" % (pkg.version, pkg.release, pkg.arch) + + return output + + @staticmethod + def pkgobjs_to_list(pkgobjs): + # Transform pkgobj list to format used by patch-controller + output = {} + for pkg in pkgobjs: + output[pkg.name] = PatchAgent.pkgobj_to_version_str(pkg) + + return output + + def dnf_reset_client(self): + if self.dnfb is not None: + self.dnfb.close() + self.dnfb = None + + self.dnfb = dnf.Base() + self.dnfb.conf.substitutions['infra'] = 'stock' + + # Reset default installonlypkgs list + self.dnfb.conf.installonlypkgs = [] + + self.dnfb.read_all_repos() + + # Ensure only platform repos are enabled for transaction + for repo in self.dnfb.repos.all(): + if repo.id == 'platform-base' or repo.id == 'platform-updates': + repo.enable() + else: + repo.disable() + + # Read repo info + self.dnfb.fill_sack() + + def query(self, check_revision=False): + """ Check current patch state """ + if not check_install_uuid(): + LOG.info("Failed install_uuid check. Skipping query") + return False + + if self.dnfb is not None: + self.dnfb.close() + self.dnfb = None + + # TODO(dpenney): Use python APIs for makecache + try: + subprocess.check_output(dnf_makecache, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + LOG.error("Failed to run dnf makecache") + LOG.error("Command output: %s", e.output) + # Set a state to "unknown"? + return False + + self.dnf_reset_client() + current_repo_revision = self.dnfb.repos['platform-updates']._repo.getRevision() # pylint: disable=protected-access + + if check_revision and self.last_repo_revision is not None: + # We're expecting the revision to be updated. + # If it's not, we ended up getting a cached repomd query. + if current_repo_revision == self.last_repo_revision: + LOG.info("makecache returned same revision as previous (%s). Retry after one second", + current_repo_revision) + time.sleep(1) + try: + subprocess.check_output(dnf_makecache, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + LOG.error("Failed to run dnf makecache") + LOG.error("Command output: %s", e.output) + # Set a state to "unknown"? + return False + + self.dnf_reset_client() + current_repo_revision = self.dnfb.repos['platform-updates']._repo.getRevision() # pylint: disable=protected-access + if current_repo_revision != self.last_repo_revision: + LOG.info("Stale repo revision id corrected with retry. New id: %s", + current_repo_revision) + + self.last_repo_revision = current_repo_revision + + # Generate a unique query id + self.query_id = random.random() + + self.changes = False + self.installed_dnf = [] + self.installed = {} + self.to_install_dnf = [] + self.to_downgrade_dnf = [] + self.to_remove = [] + self.to_remove_dnf = [] + self.missing_pkgs = [] + self.missing_pkgs_dnf = [] + + # Get the repo data + pkgs_installed = dnf.sack._rpmdb_sack(self.dnfb).query().installed() # pylint: disable=protected-access + avail = self.dnfb.sack.query().available().latest() + + # Check for packages with multiple installed versions + self.duplicated_pkgs = {} + for pkg in pkgs_installed: + pkglist = pkgs_installed.filter(name=pkg.name, arch=pkg.arch) + if len(pkglist) > 1: + if pkg.name not in self.duplicated_pkgs: + self.duplicated_pkgs[pkg.name] = {} + if pkg.arch not in self.duplicated_pkgs[pkg.name]: + self.duplicated_pkgs[pkg.name][pkg.arch] = list(map(PatchAgent.pkgobj_to_version_str, pkglist)) + LOG.warn("Duplicate packages installed: %s %s", + pkg.name, ", ".join(self.duplicated_pkgs[pkg.name][pkg.arch])) + + # There are three possible actions: + # 1. If installed pkg is not in a repo, remove it. + # 2. If installed pkg version does not match newest repo version, update it. + # 3. If a package in the grouplist is not installed, install it. + + for pkg in pkgs_installed: + highest = avail.filter(name=pkg.name, arch=pkg.arch) + if highest: + highest_pkg = highest[0] + + if pkg.evr_eq(highest_pkg): + continue + + if pkg.evr_gt(highest_pkg): + self.to_downgrade_dnf.append(highest_pkg) + else: + self.to_install_dnf.append(highest_pkg) + else: + self.to_remove_dnf.append(pkg) + self.to_remove.append(pkg.name) + + self.installed_dnf.append(pkg) + self.changes = True + + # Look for new packages + self.dnfb.read_comps() + grp_id = 'updates-%s' % '-'.join(subfunctions) + pkggrp = None + for grp in self.dnfb.comps.groups_iter(): + if grp.id == grp_id: + pkggrp = grp + break + + if pkggrp is None: + LOG.error("Could not find software group: %s", grp_id) + + for pkg in pkggrp.packages_iter(): + try: + res = pkgs_installed.filter(name=pkg.name) + if len(res) == 0: + found_pkg = avail.filter(name=pkg.name) + self.missing_pkgs_dnf.append(found_pkg[0]) + self.missing_pkgs.append(found_pkg[0].name) + self.changes = True + except dnf.exceptions.PackageNotFoundError: + self.missing_pkgs_dnf.append(pkg) + self.missing_pkgs.append(pkg.name) + self.changes = True + + self.installed = self.pkgobjs_to_list(self.installed_dnf) + self.to_install = self.pkgobjs_to_list(self.to_install_dnf + self.to_downgrade_dnf) + + LOG.info("Patch state query returns %s", self.changes) + LOG.info("Installed: %s", self.installed) + LOG.info("To install: %s", self.to_install) + LOG.info("To remove: %s", self.to_remove) + LOG.info("Missing: %s", self.missing_pkgs) + if len(self.duplicated_pkgs) > 0: + LOG.info("Duplicated: %s", self.duplicated_pkgs) + + return True + + def resolve_dnf_transaction(self, undo_failure=True): + LOG.info("Starting to process transaction: undo_failure=%s", undo_failure) + self.dnfb.resolve() + self.dnfb.download_packages(self.dnfb.transaction.install_set) + + tid = self.dnfb.do_transaction(display=PatchAgentDnfTransLogCB()) + + transaction_rc = True + for t in self.dnfb.transaction: + if t.state != libdnf.transaction.TransactionItemState_DONE: + transaction_rc = False + break + + self.dnf_reset_client() + + if not transaction_rc: + if undo_failure: + LOG.error("Failure occurred... Undoing last transaction (%s)", tid) + old = self.dnfb.history.old((tid,))[0] + mobj = dnf.db.history.MergedTransactionWrapper(old) + + self.dnfb._history_undo_operations(mobj, old.tid, True) # pylint: disable=protected-access + + if not self.resolve_dnf_transaction(undo_failure=False): + LOG.error("Failed to undo transaction") + + LOG.info("Transaction complete: undo_failure=%s, success=%s", undo_failure, transaction_rc) + return transaction_rc + + def handle_install(self, verbose_to_stdout=False, disallow_insvc_patch=False): + # + # The disallow_insvc_patch parameter is set when we're installing + # the patch during init. At that time, we don't want to deal with + # in-service patch scripts, so instead we'll treat any patch as + # a reboot-required when this parameter is set. Rather than running + # any scripts, the RR flag will be set, which will result in the node + # being rebooted immediately upon completion of the installation. + # + + LOG.info("Handling install") + + # Check the INSTALL_UUID first. If it doesn't match the active + # controller, we don't want to install patches. + if not check_install_uuid(): + LOG.error("Failed install_uuid check. Skipping install") + + self.patch_failed = True + setflag(patch_failed_file) + self.state = constants.PATCH_AGENT_STATE_INSTALL_FAILED + + # Send a hello to provide a state update + if self.sock_out is not None: + hello_ack = PatchMessageHelloAgentAck() + hello_ack.send(self.sock_out) + + return False + + self.state = constants.PATCH_AGENT_STATE_INSTALLING + setflag(patch_installing_file) + + try: + # Create insvc patch directories + if os.path.exists(insvc_patch_scripts): + shutil.rmtree(insvc_patch_scripts, ignore_errors=True) + if os.path.exists(insvc_patch_flags): + shutil.rmtree(insvc_patch_flags, ignore_errors=True) + os.mkdir(insvc_patch_scripts, 0o700) + os.mkdir(insvc_patch_flags, 0o700) + except Exception: + LOG.exception("Failed to create in-service patch directories") + + # Send a hello to provide a state update + if self.sock_out is not None: + hello_ack = PatchMessageHelloAgentAck() + hello_ack.send(self.sock_out) + + # Build up the install set + if verbose_to_stdout: + print("Checking for software updates...") + self.query() + + changed = False + rc = True + + if len(self.duplicated_pkgs) > 0: + LOG.error("Duplicate installed packages found. Manual recovery is required.") + rc = False + else: + if len(self.to_install_dnf) > 0 or len(self.to_downgrade_dnf) > 0: + LOG.info("Adding pkgs to installation set: %s", self.to_install) + for pkg in self.to_install_dnf: + self.dnfb.package_install(pkg) + + for pkg in self.to_downgrade_dnf: + self.dnfb.package_downgrade(pkg) + + changed = True + + if len(self.missing_pkgs_dnf) > 0: + LOG.info("Adding missing pkgs to installation set: %s", self.missing_pkgs) + for pkg in self.missing_pkgs_dnf: + self.dnfb.package_install(pkg) + changed = True + + if len(self.to_remove_dnf) > 0: + LOG.info("Adding pkgs to be removed: %s", self.to_remove) + for pkg in self.to_remove_dnf: + self.dnfb.package_remove(pkg) + changed = True + + if changed: + # Run the transaction set + transaction_rc = False + try: + transaction_rc = self.resolve_dnf_transaction() + except dnf.exceptions.DepsolveError: + LOG.exception("Failures resolving dependencies in transaction") + except dnf.exceptions.DownloadError: + LOG.exception("Failures downloading in transaction") + except dnf.exceptions.Error: + LOG.exception("Failure resolving transaction") + + if not transaction_rc: + LOG.error("Failures occurred during transaction") + rc = False + if verbose_to_stdout: + print("WARNING: Software update failed.") + + else: + if verbose_to_stdout: + print("Nothing to install.") + LOG.info("Nothing to install") + + if changed and rc: + # Update the node_is_patched flag + setflag(node_is_patched_file) + + self.node_is_patched = True + if verbose_to_stdout: + print("This node has been patched.") + + if os.path.exists(node_is_patched_rr_file): + LOG.info("Reboot is required. Skipping patch-scripts") + elif disallow_insvc_patch: + LOG.info("Disallowing patch-scripts. Treating as reboot-required") + setflag(node_is_patched_rr_file) + else: + LOG.info("Running in-service patch-scripts") + + try: + subprocess.check_output(run_insvc_patch_scripts_cmd, stderr=subprocess.STDOUT) + + # Clear the node_is_patched flag, since we've handled it in-service + clearflag(node_is_patched_file) + self.node_is_patched = False + except subprocess.CalledProcessError as e: + LOG.exception("In-Service patch scripts failed") + LOG.error("Command output: %s", e.output) + # Fail the patching operation + rc = False + + # Clear the in-service patch dirs + if os.path.exists(insvc_patch_scripts): + shutil.rmtree(insvc_patch_scripts, ignore_errors=True) + if os.path.exists(insvc_patch_flags): + shutil.rmtree(insvc_patch_flags, ignore_errors=True) + + if rc: + self.patch_failed = False + clearflag(patch_failed_file) + self.state = constants.PATCH_AGENT_STATE_IDLE + else: + # Update the patch_failed flag + self.patch_failed = True + setflag(patch_failed_file) + self.state = constants.PATCH_AGENT_STATE_INSTALL_FAILED + + clearflag(patch_installing_file) + self.query() + + # Send a hello to provide a state update + if self.sock_out is not None: + hello_ack = PatchMessageHelloAgentAck() + hello_ack.send(self.sock_out) + + return rc + + def handle_patch_op_counter(self, counter): + changed = False + if os.path.exists(node_is_patched_file): + # The node has been patched. Run a query if: + # - node_is_patched didn't exist previously + # - node_is_patched timestamp changed + timestamp = os.path.getmtime(node_is_patched_file) + if not self.node_is_patched: + self.node_is_patched = True + self.node_is_patched_timestamp = timestamp + changed = True + elif self.node_is_patched_timestamp != timestamp: + self.node_is_patched_timestamp = timestamp + changed = True + elif self.node_is_patched: + self.node_is_patched = False + self.node_is_patched_timestamp = 0 + changed = True + + if self.patch_op_counter < counter: + self.patch_op_counter = counter + changed = True + + if changed: + rc = self.query(check_revision=True) + if not rc: + # Query failed. Reset the op counter + self.patch_op_counter = 0 + + def run(self): + self.setup_socket() + + while self.sock_out is None: + # Check every thirty seconds? + # Once we've got a conf file, tied into packstack, + # we'll get restarted when the file is updated, + # and this should be unnecessary. + time.sleep(30) + self.setup_socket() + + self.setup_tcp_socket() + + # Ok, now we've got our socket. + # Let's let the controllers know we're here + hello_ack = PatchMessageHelloAgentAck() + hello_ack.send(self.sock_out) + + first_hello = True + + connections = [] + + timeout = time.time() + 30.0 + remaining = 30 + + while True: + inputs = [self.sock_in, self.listener] + connections + outputs = [] + + rlist, wlist, xlist = select.select(inputs, outputs, inputs, remaining) + + remaining = int(timeout - time.time()) + if remaining <= 0 or remaining > 30: + timeout = time.time() + 30.0 + remaining = 30 + + if (len(rlist) == 0 and + len(wlist) == 0 and + len(xlist) == 0): + # Timeout hit + self.audit_socket() + continue + + for s in rlist: + if s == self.listener: + conn, addr = s.accept() + connections.append(conn) + continue + + data = '' + addr = None + msg = None + + if s == self.sock_in: + # Receive from UDP + data, addr = s.recvfrom(1024) + else: + # Receive from TCP + while True: + try: + packet = s.recv(1024) + except socket.error: + LOG.exception("Socket error on recv") + data = '' + break + + if packet: + data += packet.decode() + + if data == '': + break + + try: + json.loads(data) + break + except ValueError: + # Message is incomplete + continue + else: + # End of TCP message received + break + + if data == '': + # Connection dropped + connections.remove(s) + s.close() + continue + + msgdata = json.loads(data) + + # For now, discard any messages that are not msgversion==1 + if 'msgversion' in msgdata and msgdata['msgversion'] != 1: + continue + + if 'msgtype' in msgdata: + if msgdata['msgtype'] == messages.PATCHMSG_HELLO_AGENT: + if first_hello: + self.query() + first_hello = False + + msg = PatchMessageHelloAgent() + elif msgdata['msgtype'] == messages.PATCHMSG_QUERY_DETAILED: + msg = PatchMessageQueryDetailed() + elif msgdata['msgtype'] == messages.PATCHMSG_AGENT_INSTALL_REQ: + msg = PatchMessageAgentInstallReq() + + if msg is None: + msg = messages.PatchMessage() + + msg.decode(msgdata) + if s == self.sock_in: + msg.handle(self.sock_out, addr) + else: + msg.handle(s, addr) + + for s in xlist: + if s in connections: + connections.remove(s) + s.close() + + # Check for in-service patch restart flag + if os.path.exists(insvc_patch_restart_agent): + # Make sure it's safe to restart, ie. no reqs queued + rlist, wlist, xlist = select.select(inputs, outputs, inputs, 0) + if (len(rlist) == 0 and + len(wlist) == 0 and + len(xlist) == 0): + # Restart + LOG.info("In-service patch restart flag detected. Exiting.") + os.remove(insvc_patch_restart_agent) + exit(0) + + +def main(): + global pa + + configure_logging(dnf_log=True) + + cfg.read_config() + + pa = PatchAgent() + pa.query() + + if len(sys.argv) <= 1: + pa.run() + elif sys.argv[1] == "--install": + if not check_install_uuid(): + # In certain cases, the lighttpd server could still be running using + # its default port 80, as opposed to the port configured in platform.conf + global http_port_real + LOG.info("Failed install_uuid check via http_port=%s. Trying with default port 80", http_port_real) + http_port_real = 80 + + pa.handle_install(verbose_to_stdout=True, disallow_insvc_patch=True) + elif sys.argv[1] == "--status": + rc = 0 + if pa.changes: + rc = 1 + exit(rc) diff --git a/sw-patch/cgcs-patch/cgcs_patch/patch_client.py b/sw-patch/cgcs-patch/cgcs_patch/patch_client.py new file mode 100644 index 00000000..135ba3a8 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/patch_client.py @@ -0,0 +1,1513 @@ +""" +Copyright (c) 2014-2022 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" +from __future__ import print_function +from six.moves import input +import requests +import json +import os +import sys +import shutil +import re +import time +import signal + +import subprocess +import textwrap + +# noinspection PyUnresolvedReferences +from requests_toolbelt import MultipartEncoder + +import cgcs_patch.constants as constants +import cgcs_patch.utils as utils + +from tsconfig.tsconfig import SW_VERSION as RUNNING_SW_VERSION +from tsconfig.tsconfig import INITIAL_CONTROLLER_CONFIG_COMPLETE + +api_addr = "127.0.0.1:5487" +auth_token = None + +TERM_WIDTH = 72 +VIRTUAL_REGION = 'SystemController' +IPV6_FAMILY = 6 + + +help_upload = "Upload one or more patches to the patching system." +help_upload_dir = "Upload patches from one or more directories to the patching system." +help_apply = "Apply one or more patches. This adds the specified patches " + \ + "to the repository, making the update(s) available to the " + \ + "hosts in the system. Use --all to apply all available patches." +help_remove = "Remove one or more patches. This removes the specified " + \ + "patches from the repository." +help_delete = "Delete one or more patches from the patching system." +help_query = "Query system patches. Optionally, specify 'query applied' " + \ + "to query only those patches that are applied, or 'query available' " + \ + "to query those that are not." +help_show = "Show details for specified patches." +help_what_requires = "List patches that require the specified patches." +help_query_hosts = "Query patch states for hosts in the system." +help_host_install = "Trigger patch install/remove on specified host. " + \ + "To force install on unlocked node, use the --force option." +help_host_install_async = "Trigger patch install/remove on specified host. " + \ + "To force install on unlocked node, use the --force option." + \ + " Note: This command returns immediately upon dispatching installation request." +help_patch_args = "Patches are specified as a space-separated list of patch IDs." +help_install_local = "Trigger patch install/remove on the local host. " + \ + "This command can only be used for patch installation prior to initial " + \ + "configuration." +help_drop_host = "Drop specified host from table." +help_query_dependencies = "List dependencies for specified patch. Use " + \ + constants.CLI_OPT_RECURSIVE + " for recursive query." +help_is_applied = "Query Applied state for list of patches. " + \ + "Returns True if all are Applied, False otherwise." +help_is_available = "Query Available state for list of patches. " + \ + "Returns True if all are Available, False otherwise." +help_report_app_dependencies = "Report application patch dependencies, " + \ + "specifying application name with --app option, plus a list of patches. " + \ + "Reported dependencies can be dropped by specifying app with no patch list." +help_query_app_dependencies = "Display set of reported application patch " + \ + "dependencies." +help_commit = "Commit patches to free disk space. WARNING: This action " + \ + "is irreversible!" +help_region_name = "Send the request to a specified region" + + +def set_term_width(): + global TERM_WIDTH + + try: + with open(os.devnull, 'w') as NULL: + output = subprocess.check_output(["tput", "cols"], stderr=NULL) + width = int(output) + if width > 60: + TERM_WIDTH = width - 4 + except Exception: + pass + + +def print_help(): + print("usage: sw-patch [--debug]") + print(" ...") + print("") + print("Subcomands:") + print("") + print(textwrap.fill(" {0:<15} ".format("upload:") + help_upload, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("upload-dir:") + help_upload_dir, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("apply:") + help_apply, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print(textwrap.fill(help_patch_args, + width=TERM_WIDTH, initial_indent=' ' * 20, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("remove:") + help_remove, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print(textwrap.fill(help_patch_args, + width=TERM_WIDTH, initial_indent=' ' * 20, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("delete:") + help_delete, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print(textwrap.fill(help_patch_args, + width=TERM_WIDTH, initial_indent=' ' * 20, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("query:") + help_query, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("show:") + help_show, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("what-requires:") + help_what_requires, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("query-hosts:") + help_query_hosts, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("host-install:") + help_host_install, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("host-install-async:") + help_host_install_async, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("install-local:") + help_install_local, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("drop-host:") + help_drop_host, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("query-dependencies:") + help_query_dependencies, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("is-applied:") + help_is_applied, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("is-available:") + help_is_available, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("report-app-dependencies:") + help_report_app_dependencies, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("query-app-dependencies:") + help_query_app_dependencies, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("commit:") + help_commit, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + print(textwrap.fill(" {0:<15} ".format("--os-region-name:") + help_region_name, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + print("") + + exit(1) + + +def check_rc(req): + rc = 0 + if req.status_code == 200: + data = json.loads(req.text) + if 'error' in data and data["error"] != "": + rc = 1 + else: + rc = 1 + + return rc + + +def print_result_debug(req): + if req.status_code == 200: + data = json.loads(req.text) + if 'pd' in data: + print(json.dumps(data['pd'], + sort_keys=True, + indent=4, + separators=(',', ': '))) + elif 'data' in data: + print(json.dumps(data['data'], + sort_keys=True, + indent=4, + separators=(',', ': '))) + else: + print(json.dumps(data, + sort_keys=True, + indent=4, + separators=(',', ': '))) + elif req.status_code == 500: + print("An internal error has occurred. Please check /var/log/patching.log for details") + else: + m = re.search("(Error message:.*)", req.text, re.MULTILINE) + print(m.group(0)) + + +def print_patch_op_result(req): + if req.status_code == 200: + data = json.loads(req.text) + + if 'pd' in data: + pd = data['pd'] + + # Calculate column widths + hdr_id = "Patch ID" + hdr_rr = "RR" + hdr_rel = "Release" + hdr_repo = "Repo State" + hdr_state = "Patch State" + + width_id = len(hdr_id) + width_rr = len(hdr_rr) + width_rel = len(hdr_rel) + width_repo = len(hdr_repo) + width_state = len(hdr_state) + + show_repo = False + + for patch_id in list(pd): + if len(patch_id) > width_id: + width_id = len(patch_id) + if len(pd[patch_id]["sw_version"]) > width_rel: + width_rel = len(pd[patch_id]["sw_version"]) + if len(pd[patch_id]["repostate"]) > width_repo: + width_repo = len(pd[patch_id]["repostate"]) + if len(pd[patch_id]["patchstate"]) > width_state: + width_state = len(pd[patch_id]["patchstate"]) + if pd[patch_id]["patchstate"] == "n/a": + show_repo = True + + if show_repo: + print("{0:^{width_id}} {1:^{width_rr}} {2:^{width_rel}} {3:^{width_repo}} {4:^{width_state}}".format( + hdr_id, hdr_rr, hdr_rel, hdr_repo, hdr_state, + width_id=width_id, width_rr=width_rr, + width_rel=width_rel, width_repo=width_repo, width_state=width_state)) + + print("{0} {1} {2} {3} {4}".format( + '=' * width_id, '=' * width_rr, '=' * width_rel, '=' * width_repo, '=' * width_state)) + + for patch_id in sorted(list(pd)): + if "reboot_required" in pd[patch_id]: + rr = pd[patch_id]["reboot_required"] + else: + rr = "Y" + + print("{0:<{width_id}} {1:^{width_rr}} {2:^{width_rel}} {3:^{width_repo}} {4:^{width_state}}".format( + patch_id, + rr, + pd[patch_id]["sw_version"], + pd[patch_id]["repostate"], + pd[patch_id]["patchstate"], + width_id=width_id, width_rr=width_rr, + width_rel=width_rel, width_repo=width_repo, width_state=width_state)) + else: + print("{0:^{width_id}} {1:^{width_rr}} {2:^{width_rel}} {3:^{width_state}}".format( + hdr_id, hdr_rr, hdr_rel, hdr_state, + width_id=width_id, width_rr=width_rr, width_rel=width_rel, width_state=width_state)) + + print("{0} {1} {2} {3}".format( + '=' * width_id, '=' * width_rr, '=' * width_rel, '=' * width_state)) + + for patch_id in sorted(list(pd)): + if "reboot_required" in pd[patch_id]: + rr = pd[patch_id]["reboot_required"] + else: + rr = "Y" + + print("{0:<{width_id}} {1:^{width_rr}} {2:^{width_rel}} {3:^{width_state}}".format( + patch_id, + rr, + pd[patch_id]["sw_version"], + pd[patch_id]["patchstate"], + width_id=width_id, width_rr=width_rr, width_rel=width_rel, width_state=width_state)) + + print("") + + if 'info' in data and data["info"] != "": + print(data["info"]) + + if 'warning' in data and data["warning"] != "": + print("Warning:") + print(data["warning"]) + + if 'error' in data and data["error"] != "": + print("Error:") + print(data["error"]) + + elif req.status_code == 500: + print("An internal error has occurred. Please check /var/log/patching.log for details") + + +def print_patch_show_result(req): + if req.status_code == 200: + data = json.loads(req.text) + + if 'metadata' in data: + pd = data['metadata'] + for patch_id in sorted(list(pd)): + print("%s:" % patch_id) + + if "sw_version" in pd[patch_id] and pd[patch_id]["sw_version"] != "": + print(textwrap.fill(" {0:<15} ".format("Release:") + pd[patch_id]["sw_version"], + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + + if "patchstate" in pd[patch_id] and pd[patch_id]["patchstate"] != "": + print(textwrap.fill(" {0:<15} ".format("Patch State:") + pd[patch_id]["patchstate"], + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + + if pd[patch_id]["patchstate"] == "n/a": + if "repostate" in pd[patch_id] and pd[patch_id]["repostate"] != "": + print(textwrap.fill(" {0:<15} ".format("Repo State:") + pd[patch_id]["repostate"], + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + + if "status" in pd[patch_id] and pd[patch_id]["status"] != "": + print(textwrap.fill(" {0:<15} ".format("Status:") + pd[patch_id]["status"], + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + + if "unremovable" in pd[patch_id] and pd[patch_id]["unremovable"] != "": + print(textwrap.fill(" {0:<15} ".format("Unremovable:") + pd[patch_id]["unremovable"], + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + + if "reboot_required" in pd[patch_id] and pd[patch_id]["reboot_required"] != "": + print(textwrap.fill(" {0:<15} ".format("RR:") + pd[patch_id]["reboot_required"], + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + + if "apply_active_release_only" in pd[patch_id] and pd[patch_id]["apply_active_release_only"] != "": + print(textwrap.fill(" {0:<15} ".format("Apply Active Release Only:") + pd[patch_id]["apply_active_release_only"], + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + + if "summary" in pd[patch_id] and pd[patch_id]["summary"] != "": + print(textwrap.fill(" {0:<15} ".format("Summary:") + pd[patch_id]["summary"], + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + + if "description" in pd[patch_id] and pd[patch_id]["description"] != "": + first_line = True + for line in pd[patch_id]["description"].split('\n'): + if first_line: + print(textwrap.fill(" {0:<15} ".format("Description:") + line, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + first_line = False + else: + print(textwrap.fill(line, + width=TERM_WIDTH, subsequent_indent=' ' * 20, + initial_indent=' ' * 20)) + + if "install_instructions" in pd[patch_id] and pd[patch_id]["install_instructions"] != "": + print(" Install Instructions:") + for line in pd[patch_id]["install_instructions"].split('\n'): + print(textwrap.fill(line, + width=TERM_WIDTH, subsequent_indent=' ' * 20, + initial_indent=' ' * 20)) + + if "warnings" in pd[patch_id] and pd[patch_id]["warnings"] != "": + first_line = True + for line in pd[patch_id]["warnings"].split('\n'): + if first_line: + print(textwrap.fill(" {0:<15} ".format("Warnings:") + line, + width=TERM_WIDTH, subsequent_indent=' ' * 20)) + first_line = False + else: + print(textwrap.fill(line, + width=TERM_WIDTH, subsequent_indent=' ' * 20, + initial_indent=' ' * 20)) + + if "requires" in pd[patch_id] and len(pd[patch_id]["requires"]) > 0: + print(" Requires:") + for req_patch in sorted(pd[patch_id]["requires"]): + print(' ' * 20 + req_patch) + + if "contents" in data and patch_id in data["contents"]: + print(" Contents:") + for pkg in sorted(data["contents"][patch_id]): + print(' ' * 20 + pkg) + + print("\n") + + if 'info' in data and data["info"] != "": + print(data["info"]) + + if 'warning' in data and data["warning"] != "": + print("Warning:") + print(data["warning"]) + + if 'error' in data and data["error"] != "": + print("Error:") + print(data["error"]) + + elif req.status_code == 500: + print("An internal error has occurred. Please check /var/log/patching.log for details") + + +def patch_upload_req(debug, args): + rc = 0 + + if len(args) == 0: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + for patchfile in sorted(list(set(args))): + if os.path.isdir(patchfile): + print("Error: %s is a directory. Please use upload-dir" % patchfile) + continue + + if not os.path.isfile(patchfile): + print("Error: File does not exist: %s" % patchfile) + continue + + enc = MultipartEncoder(fields={'file': (patchfile, + open(patchfile, 'rb'), + )}) + url = "http://%s/patch/upload" % api_addr + headers = {'Content-Type': enc.content_type} + append_auth_token_if_required(headers) + req = requests.post(url, + data=enc, + headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + if check_rc(req) != 0: + rc = 1 + + return rc + + +def patch_apply_req(debug, args): + if len(args) == 0: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + extra_opts = [] + + if "--skip-semantic" in args: + idx = args.index("--skip-semantic") + + # Get rid of the --skip-semantic + args.pop(idx) + + # Append the extra opts + extra_opts.append("skip-semantic=yes") + + if len(extra_opts) == 0: + extra_opts_str = '' + else: + extra_opts_str = '?%s' % '&'.join(extra_opts) + + patches = "/".join(args) + url = "http://%s/patch/apply/%s%s" % (api_addr, patches, extra_opts_str) + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_remove_req(debug, args): + if len(args) == 0: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + extra_opts = [] + + # The removeunremovable option is hidden and should not be added to help + # text or customer documentation. It is for emergency use only - under + # supervision of the design team. + if "--removeunremovable" in args: + idx = args.index("--removeunremovable") + + # Get rid of the --removeunremovable + args.pop(idx) + + # Append the extra opts + extra_opts.append('removeunremovable=yes') + + if "--skipappcheck" in args: + idx = args.index("--skipappcheck") + + # Get rid of the --skipappcheck + args.pop(idx) + + # Append the extra opts + extra_opts.append("skipappcheck=yes") + + if "--skip-semantic" in args: + idx = args.index("--skip-semantic") + + # Get rid of the --skip-semantic + args.pop(idx) + + # Append the extra opts + extra_opts.append("skip-semantic=yes") + + if len(extra_opts) == 0: + extra_opts_str = '' + else: + extra_opts_str = '?%s' % '&'.join(extra_opts) + + patches = "/".join(args) + url = "http://%s/patch/remove/%s%s" % (api_addr, patches, extra_opts_str) + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_delete_req(debug, args): + if len(args) == 0: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + patches = "/".join(args) + + url = "http://%s/patch/delete/%s" % (api_addr, patches) + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_commit_req(debug, args): + if len(args) == 0: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + dry_run = False + if constants.CLI_OPT_DRY_RUN in args: + dry_run = True + args.remove(constants.CLI_OPT_DRY_RUN) + + all_patches = False + if constants.CLI_OPT_ALL in args: + all_patches = True + args.remove(constants.CLI_OPT_ALL) + + # Default to running release + relopt = RUNNING_SW_VERSION + + release = False + if constants.CLI_OPT_RELEASE in args: + release = True + idx = args.index(constants.CLI_OPT_RELEASE) + # There must be at least one more arg + if len(args) < (idx + 1): + print_help() + + # Get rid of the --release + args.pop(idx) + # Pop off the release arg + relopt = args.pop(idx) + + headers = {} + append_auth_token_if_required(headers) + if release and not all_patches: + # Disallow + print("Use of --release option requires --all") + return 1 + elif all_patches: + # Get a list of all patches + extra_opts = "&release=%s" % relopt + url = "http://%s/patch/query?show=all%s" % (api_addr, extra_opts) + + req = requests.get(url, headers=headers) + + patch_list = [] + if req.status_code == 200: + data = json.loads(req.text) + + if 'pd' in data: + patch_list = sorted(list(data['pd'])) + elif req.status_code == 500: + print("Failed to get patch list. Aborting...") + return 1 + + if len(patch_list) == 0: + print("There are no %s patches to commit." % relopt) + return 0 + + print("The following patches will be committed:") + for patch_id in patch_list: + print(" %s" % patch_id) + print() + + patches = "/".join(patch_list) + else: + patches = "/".join(args) + + # First, get a list of dependencies and ask for confirmation + url = "http://%s/patch/query_dependencies/%s?recursive=yes" % (api_addr, patches) + + req = requests.get(url, headers=headers) + + if req.status_code == 200: + data = json.loads(req.text) + + if 'patches' in data: + print("The following patches will be committed:") + for patch_id in sorted(data['patches']): + print(" %s" % patch_id) + print() + else: + print("No patches found to commit") + return 1 + + elif req.status_code == 500: + print("An internal error has occurred. Please check /var/log/patching.log for details") + return 1 + + # Run dry-run + url = "http://%s/patch/commit_dry_run/%s" % (api_addr, patches) + + req = requests.post(url, headers=headers) + print_patch_op_result(req) + + if check_rc(req) != 0: + print("Aborting...") + return 1 + + if dry_run: + return 0 + + print() + commit_warning = "WARNING: Committing a patch is an irreversible operation. " + \ + "Committed patches cannot be removed." + print(textwrap.fill(commit_warning, width=TERM_WIDTH, subsequent_indent=' ' * 9)) + print() + + user_input = input("Would you like to continue? [y/N]: ") + if user_input.lower() != 'y': + print("Aborting...") + return 1 + + url = "http://%s/patch/commit/%s" % (api_addr, patches) + req = requests.post(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_query_req(debug, args): + state = "all" + extra_opts = "" + + if "--release" in args: + idx = args.index("--release") + # There must be at least one more arg + if len(args) < (idx + 1): + print_help() + + # Get rid of the --release + args.pop(idx) + # Pop off the release arg + relopt = args.pop(idx) + + # Format the query string + extra_opts = "&release=%s" % relopt + + if len(args) > 1: + # Support 1 additional arg at most, currently + print_help() + + if len(args) > 0: + state = args[0] + + url = "http://%s/patch/query?show=%s%s" % (api_addr, state, extra_opts) + + headers = {} + append_auth_token_if_required(headers) + req = requests.get(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def print_query_hosts_result(req): + if req.status_code == 200: + data = json.loads(req.text) + if 'data' not in data: + print("Invalid data returned:") + print_result_debug(req) + return + + agents = data['data'] + + # Calculate column widths + hdr_hn = "Hostname" + hdr_ip = "IP Address" + hdr_pc = "Patch Current" + hdr_rr = "Reboot Required" + hdr_rel = "Release" + hdr_state = "State" + + width_hn = len(hdr_hn) + width_ip = len(hdr_ip) + width_pc = len(hdr_pc) + width_rr = len(hdr_rr) + width_rel = len(hdr_rel) + width_state = len(hdr_state) + + for agent in sorted(agents, key=lambda a: a["hostname"]): + if len(agent["hostname"]) > width_hn: + width_hn = len(agent["hostname"]) + if len(agent["ip"]) > width_ip: + width_ip = len(agent["ip"]) + if len(agent["sw_version"]) > width_rel: + width_rel = len(agent["sw_version"]) + if len(agent["state"]) > width_state: + width_state = len(agent["state"]) + + print("{0:^{width_hn}} {1:^{width_ip}} {2:^{width_pc}} {3:^{width_rr}} {4:^{width_rel}} {5:^{width_state}}".format( + hdr_hn, hdr_ip, hdr_pc, hdr_rr, hdr_rel, hdr_state, + width_hn=width_hn, width_ip=width_ip, width_pc=width_pc, width_rr=width_rr, width_rel=width_rel, width_state=width_state)) + + print("{0} {1} {2} {3} {4} {5}".format( + '=' * width_hn, '=' * width_ip, '=' * width_pc, '=' * width_rr, '=' * width_rel, '=' * width_state)) + + for agent in sorted(agents, key=lambda a: a["hostname"]): + patch_current_field = "Yes" if agent["patch_current"] else "No" + if agent.get("interim_state") is True: + patch_current_field = "Pending" + + if agent["patch_failed"]: + patch_current_field = "Failed" + + print("{0:<{width_hn}} {1:<{width_ip}} {2:^{width_pc}} {3:^{width_rr}} {4:^{width_rel}} {5:^{width_state}}".format( + agent["hostname"], + agent["ip"], + patch_current_field, + "Yes" if agent["requires_reboot"] else "No", + agent["sw_version"], + agent["state"], + width_hn=width_hn, width_ip=width_ip, width_pc=width_pc, width_rr=width_rr, width_rel=width_rel, width_state=width_state)) + + elif req.status_code == 500: + print("An internal error has occurred. Please check /var/log/patching.log for details") + + +def patch_query_hosts_req(debug, args): + if len(args) > 0: + # Support 0 arg at most, currently + print_help() + + url = "http://%s/patch/query_hosts" % api_addr + + req = requests.get(url) + + if debug: + print_result_debug(req) + else: + print_query_hosts_result(req) + + return check_rc(req) + + +def patch_show_req(debug, args): + if len(args) == 0: + print_help() + + patches = "/".join(args) + + url = "http://%s/patch/show/%s" % (api_addr, patches) + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_show_result(req) + + return check_rc(req) + + +def what_requires(debug, args): + if len(args) == 0: + print_help() + + patches = "/".join(args) + + url = "http://%s/patch/what_requires/%s" % (api_addr, patches) + + headers = {} + append_auth_token_if_required(headers) + req = requests.get(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def query_dependencies(debug, args): + if len(args) == 0: + print_help() + + extra_opts = "" + if constants.CLI_OPT_RECURSIVE in args: + args.remove(constants.CLI_OPT_RECURSIVE) + extra_opts = "?recursive=yes" + + patches = "/".join(args) + + url = "http://%s/patch/query_dependencies/%s%s" % (api_addr, patches, extra_opts) + + headers = {} + append_auth_token_if_required(headers) + req = requests.get(url, headers=headers) + + if debug: + print_result_debug(req) + else: + if req.status_code == 200: + data = json.loads(req.text) + + if 'patches' in data: + for patch_id in sorted(data['patches']): + print(patch_id) + + elif req.status_code == 500: + print("An internal error has occurred. Please check /var/log/patching.log for details") + + return check_rc(req) + + +def wait_for_install_complete(agent_ip): + url = "http://%s/patch/query_hosts" % api_addr + rc = 0 + + max_retries = 4 + retriable_count = 0 + + while True: + # Sleep on the first pass as well, to allow time for the + # agent to respond + time.sleep(5) + + try: + req = requests.get(url) + except requests.exceptions.ConnectionError: + # The local patch-controller may have restarted. + retriable_count += 1 + if retriable_count <= max_retries: + continue + else: + print("Lost communications with the patch controller") + rc = 1 + break + + if req.status_code == 200: + data = json.loads(req.text) + if 'data' not in data: + print("Invalid query-hosts data returned:") + print_result_debug(req) + rc = 1 + break + + state = None + agents = data['data'] + interim_state = None + + for agent in agents: + if agent['hostname'] == agent_ip \ + or agent['ip'] == agent_ip: + state = agent.get('state') + interim_state = agent.get('interim_state') + + if state is None: + # If the patching daemons have restarted, there's a + # window after the patch-controller restart that the + # hosts table will be empty. + retriable_count += 1 + if retriable_count <= max_retries: + continue + else: + print("%s agent has timed out." % agent_ip) + rc = 1 + break + + if state == constants.PATCH_AGENT_STATE_INSTALLING or \ + interim_state is True: + # Still installing + sys.stdout.write(".") + sys.stdout.flush() + elif state == constants.PATCH_AGENT_STATE_INSTALL_REJECTED: + print("\nInstallation rejected. Node must be locked") + rc = 1 + break + elif state == constants.PATCH_AGENT_STATE_INSTALL_FAILED: + print("\nInstallation failed. Please check logs for details.") + rc = 1 + break + elif state == constants.PATCH_AGENT_STATE_IDLE: + print("\nInstallation was successful.") + rc = 0 + break + else: + print("\nPatch agent is reporting unknown state: %s" % state) + rc = 1 + break + + elif req.status_code == 500: + print("An internal error has occurred. Please check /var/log/patching.log for details") + rc = 1 + break + else: + m = re.search("(Error message:.*)", req.text, re.MULTILINE) + print(m.group(0)) + rc = 1 + break + + return rc + + +def host_install(debug, args): # pylint: disable=unused-argument + force = False + rc = 0 + + if "--force" in args: + force = True + args.remove("--force") + + if len(args) != 1: + print_help() + + agent_ip = args[0] + + # Issue host_install_async request and poll for results + url = "http://%s/patch/host_install_async/%s" % (api_addr, agent_ip) + + if force: + url += "/force" + + req = requests.post(url) + + if req.status_code == 200: + data = json.loads(req.text) + if 'error' in data and data["error"] != "": + print("Error:") + print(data["error"]) + rc = 1 + else: + rc = wait_for_install_complete(agent_ip) + elif req.status_code == 500: + print("An internal error has occurred. Please check /var/log/patching.log for details") + rc = 1 + else: + m = re.search("(Error message:.*)", req.text, re.MULTILINE) + print(m.group(0)) + rc = 1 + + return rc + + +def host_install_async(debug, args): + force = False + + if "--force" in args: + force = True + args.remove("--force") + + if len(args) != 1: + print_help() + + agent_ip = args[0] + + url = "http://%s/patch/host_install_async/%s" % (api_addr, agent_ip) + + if force: + url += "/force" + + req = requests.post(url) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def drop_host(debug, args): + if len(args) != 1: + print_help() + + host_ip = args[0] + + url = "http://%s/patch/drop_host/%s" % (api_addr, host_ip) + + req = requests.post(url) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_upload_dir_req(debug, args): + if len(args) == 0: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + dirlist = {} + i = 0 + for d in sorted(list(set(args))): + dirlist["dir%d" % i] = os.path.abspath(d) + i += 1 + + url = "http://%s/patch/upload_dir" % api_addr + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, params=dirlist, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_install_local(debug, args): # pylint: disable=unused-argument + """ This function is used to trigger patch installation prior to configuration """ + # Check to see if initial configuration has completed + if os.path.isfile(INITIAL_CONTROLLER_CONFIG_COMPLETE): + # Disallow the install + print("Error: This function can only be used before initial system configuration.", file=sys.stderr) + return 1 + + update_hosts_file = False + + # Check to see if the controller hostname is already known. + if not utils.gethostbyname(constants.CONTROLLER_FLOATING_HOSTNAME): + update_hosts_file = True + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + # To allow patch installation to occur before configuration, we need + # to alias controller to localhost so that the dnf repos work. + # There is a HOSTALIASES feature that would be preferred here, but it + # unfortunately requires dnsmasq to be running, which it is not at this point. + + rc = 0 + + if update_hosts_file: + # Make a backup of /etc/hosts + shutil.copy2('/etc/hosts', '/etc/hosts.patchbak') + + # Update /etc/hosts + with open('/etc/hosts', 'a') as f: + f.write("127.0.0.1 controller\n") + + # Run the patch install + try: + # Use the restart option of the sw-patch init script, which will + # install patches but won't automatically reboot if the RR flag is set + subprocess.check_output(['/etc/init.d/sw-patch', 'restart']) + except subprocess.CalledProcessError: + print("Error: Failed to install patches. Please check /var/log/patching.log for details", file=sys.stderr) + rc = 1 + + if update_hosts_file: + # Restore /etc/hosts + os.rename('/etc/hosts.patchbak', '/etc/hosts') + + if rc == 0: + print("Patch installation is complete.") + print("Please reboot before continuing with configuration.") + + return rc + + +def patch_init_release(debug, args): + if len(args) != 1: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + release = args[0] + + url = "http://%s/patch/init_release/%s" % (api_addr, release) + + req = requests.post(url) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_del_release(debug, args): + if len(args) != 1: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + release = args[0] + + url = "http://%s/patch/del_release/%s" % (api_addr, release) + + req = requests.post(url) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_is_applied_req(args): + if len(args) == 0: + print_help() + + patches = "/".join(args) + url = "http://%s/patch/is_applied/%s" % (api_addr, patches) + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, headers=headers) + + rc = 1 + + if req.status_code == 200: + result = json.loads(req.text) + print(result) + if result is True: + rc = 0 + elif req.status_code == 500: + print("An internal error has occurred. Please check /var/log/patching.log for details") + + return rc + + +def patch_is_available_req(args): + if len(args) == 0: + print_help() + + patches = "/".join(args) + url = "http://%s/patch/is_available/%s" % (api_addr, patches) + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, headers=headers) + + rc = 1 + + if req.status_code == 200: + result = json.loads(req.text) + print(result) + if result is True: + rc = 0 + elif req.status_code == 500: + print("An internal error has occurred. Please check /var/log/patching.log for details") + + return rc + + +def patch_report_app_dependencies_req(debug, args): # pylint: disable=unused-argument + if len(args) < 2: + print_help() + + extra_opts = [] + + if "--app" in args: + idx = args.index("--app") + + # Get rid of the --app and get the app name + args.pop(idx) + app = args.pop(idx) + + # Append the extra opts + extra_opts.append("app=%s" % app) + else: + print("Application name must be specified with --app argument.") + return 1 + + extra_opts_str = '?%s' % '&'.join(extra_opts) + + patches = "/".join(args) + url = "http://%s/patch/report_app_dependencies/%s%s" % (api_addr, patches, extra_opts_str) + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, headers=headers) + + if req.status_code == 200: + return 0 + else: + return 1 + + +def patch_query_app_dependencies_req(): + url = "http://%s/patch/query_app_dependencies" % api_addr + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, headers=headers) + + if req.status_code == 200: + data = json.loads(req.text) + if len(data) == 0: + print("There are no application dependencies.") + else: + hdr_app = "Application" + hdr_list = "Required Patches" + width_app = len(hdr_app) + width_list = len(hdr_list) + + for app, patch_list in data.items(): + width_app = max(width_app, len(app)) + width_list = max(width_list, len(', '.join(patch_list))) + + print("{0:<{width_app}} {1:<{width_list}}".format( + hdr_app, hdr_list, + width_app=width_app, width_list=width_list)) + + print("{0} {1}".format( + '=' * width_app, '=' * width_list)) + + for app, patch_list in sorted(data.items()): + print("{0:<{width_app}} {1:<{width_list}}".format( + app, ', '.join(patch_list), + width_app=width_app, width_list=width_list)) + + return 0 + else: + print("An internal error has occurred. Please check /var/log/patching.log for details") + return 1 + + +def completion_opts(args): + if len(args) != 1: + return 1 + + if args[0] == "patches": + url = "http://%s/patch/query" % api_addr + req = requests.get(url) + # Just list patch IDs + if req.status_code == 200: + data = json.loads(req.text) + + if 'pd' in data: + print(" ".join(list(data['pd']))) + return 0 + + elif args[0] == "hosts": + url = "http://%s/patch/query_hosts" % api_addr + req = requests.get(url) + + # Just list hostnames + if req.status_code == 200: + data = json.loads(req.text) + + if 'data' in data: + for agent in data['data']: + print(agent["hostname"]) + return 0 + + return 1 + + +def check_env(env, var): + if env not in os.environ: + print("You must provide a %s via env[%s]" % (var, env)) + exit(-1) + + +def get_auth_token_and_endpoint(region_name): + from keystoneauth1 import identity + from keystoneauth1 import session + from keystoneauth1 import exceptions + + user_env_map = {'OS_USERNAME': 'username', + 'OS_PASSWORD': 'password', + 'OS_PROJECT_NAME': 'project_name', + 'OS_AUTH_URL': 'auth_url', + 'OS_USER_DOMAIN_NAME': 'user_domain_name', + 'OS_PROJECT_DOMAIN_NAME': 'project_domain_name'} + + for k, v in user_env_map.items(): + check_env(k, v) + + user = dict() + for k, v in user_env_map.items(): + user[v] = os.environ.get(k) + + auth = identity.V3Password(**user) + sess = session.Session(auth=auth) + try: + token = auth.get_token(sess) + endpoint = auth.get_endpoint(sess, service_type='patching', + interface='internal', + region_name=region_name) + except (exceptions.http.Unauthorized, exceptions.EndpointNotFound) as e: + print(str(e)) + exit(-1) + + return token, endpoint + + +def append_auth_token_if_required(headers): + global auth_token + if auth_token is not None: + headers['X-Auth-Token'] = auth_token + + +def format_url_address(address): + import netaddr + try: + ip_addr = netaddr.IPAddress(address) + if ip_addr.version == IPV6_FAMILY: + return "[%s]" % address + else: + return address + except netaddr.AddrFormatError: + return address + + +def check_for_os_region_name(): + region_option = "--os-region-name" + if region_option not in sys.argv: + return False + + for c, value in enumerate(sys.argv, 1): + if value == region_option: + if c == len(sys.argv): + print("Please specify a region name") + print_help() + + region = sys.argv[c] + global VIRTUAL_REGION + if region != VIRTUAL_REGION: + print("Unsupported region name: %s" % region) + exit(1) + + # check it is running on the active controller + # not able to use sm-query due to it requires sudo + try: + subprocess.check_output("pgrep -f dcorch-api-proxy", shell=True) + except subprocess.CalledProcessError: + print("Command must be run from the active controller.") + exit(1) + + # get a token and fetch the internal endpoint in SystemController + global auth_token + auth_token, endpoint = get_auth_token_and_endpoint(region) + if endpoint is not None: + global api_addr + try: + # python 2 + from urlparse import urlparse + except ImportError: + # python 3 + from urllib.parse import urlparse + url = urlparse(endpoint) + address = format_url_address(url.hostname) + api_addr = '{}:{}'.format(address, url.port) + + sys.argv.remove("--os-region-name") + sys.argv.remove(region) + return True + + +def main(): + set_term_width() + + if len(sys.argv) <= 1: + print_help() + + debug = False + if "--debug" in sys.argv: + debug = True + sys.argv.remove("--debug") + + dc_request = check_for_os_region_name() + + rc = 0 + + action = sys.argv[1] + + # Reject the commands that are not supported in the virtual region + if (dc_request and action in ["query-hosts", "host-install", + "host-install-async", + "install-local", "drop-host"]): + global VIRTUAL_REGION + print("\n%s command is not allowed in %s region" % (action, + VIRTUAL_REGION)) + exit(1) + + if auth_token is None and os.geteuid() != 0: + # Restrict non-root/sudo users to these commands + if action == "query": + rc = patch_query_req(debug, sys.argv[2:]) + elif action == "query-hosts": + rc = patch_query_hosts_req(debug, sys.argv[2:]) + elif action == "what-requires": + rc = what_requires(debug, sys.argv[2:]) + elif action == "completion": + rc = completion_opts(sys.argv[2:]) + elif action == "--help" or action == "-h": + print_help() + else: + print("Error: Command must be run as sudo or root", file=sys.stderr) + rc = 1 + else: + if action == "upload": + rc = patch_upload_req(debug, sys.argv[2:]) + elif action == "apply": + rc = patch_apply_req(debug, sys.argv[2:]) + elif action == "remove": + rc = patch_remove_req(debug, sys.argv[2:]) + elif action == "delete": + rc = patch_delete_req(debug, sys.argv[2:]) + elif action == "commit": + rc = patch_commit_req(debug, sys.argv[2:]) + elif action == "query": + rc = patch_query_req(debug, sys.argv[2:]) + elif action == "query-hosts": + rc = patch_query_hosts_req(debug, sys.argv[2:]) + elif action == "show": + rc = patch_show_req(debug, sys.argv[2:]) + elif action == "what-requires": + what_requires(debug, sys.argv[2:]) + elif action == "query-dependencies": + query_dependencies(debug, sys.argv[2:]) + elif action == "host-install": + rc = host_install(debug, sys.argv[2:]) + elif action == "host-install-async": + rc = host_install_async(debug, sys.argv[2:]) + elif action == "drop-host": + rc = drop_host(debug, sys.argv[2:]) + elif action == "upload-dir": + rc = patch_upload_dir_req(debug, sys.argv[2:]) + elif action == "install-local": + rc = patch_install_local(debug, sys.argv[2:]) + elif action == "init-release": + rc = patch_init_release(debug, sys.argv[2:]) + elif action == "del-release": + rc = patch_del_release(debug, sys.argv[2:]) + elif action == "is-applied": + rc = patch_is_applied_req(sys.argv[2:]) + elif action == "is-available": + rc = patch_is_available_req(sys.argv[2:]) + elif action == "report-app-dependencies": + rc = patch_report_app_dependencies_req(debug, sys.argv[2:]) + elif action == "query-app-dependencies": + rc = patch_query_app_dependencies_req() + elif action == "completion": + rc = completion_opts(sys.argv[2:]) + else: + print_help() + + exit(rc) diff --git a/sw-patch/cgcs-patch/cgcs_patch/patch_controller.py b/sw-patch/cgcs-patch/cgcs_patch/patch_controller.py new file mode 100644 index 00000000..e2153329 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/patch_controller.py @@ -0,0 +1,2713 @@ +""" +Copyright (c) 2014-2019 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" +import shutil +import tempfile +import threading +import time +import socket +import json +import select +import subprocess +import six +from six.moves import configparser +import rpm +import os +import gc + +from cgcs_patch.patch_functions import parse_pkgver + +from wsgiref import simple_server +from cgcs_patch.api import app +from cgcs_patch.authapi import app as auth_app +from cgcs_patch.patch_functions import configure_logging +from cgcs_patch.patch_functions import BasePackageData +from cgcs_patch.patch_functions import avail_dir +from cgcs_patch.patch_functions import applied_dir +from cgcs_patch.patch_functions import committed_dir +from cgcs_patch.patch_functions import PatchFile +from cgcs_patch.patch_functions import parse_rpm_filename +from cgcs_patch.patch_functions import package_dir +from cgcs_patch.patch_functions import repo_dir +from cgcs_patch.patch_functions import semantics_dir +from cgcs_patch.patch_functions import SW_VERSION +from cgcs_patch.patch_functions import root_package_dir +from cgcs_patch.exceptions import MetadataFail +from cgcs_patch.exceptions import RpmFail +from cgcs_patch.exceptions import SemanticFail +from cgcs_patch.exceptions import PatchError +from cgcs_patch.exceptions import PatchFail +from cgcs_patch.exceptions import PatchInvalidRequest +from cgcs_patch.exceptions import PatchValidationFailure +from cgcs_patch.exceptions import PatchMismatchFailure +from cgcs_patch.patch_functions import LOG +from cgcs_patch.patch_functions import audit_log_info +from cgcs_patch.patch_functions import patch_dir +from cgcs_patch.patch_functions import repo_root_dir +from cgcs_patch.patch_functions import PatchData +from cgcs_patch.base import PatchService + +import cgcs_patch.config as cfg +import cgcs_patch.utils as utils +# noinspection PyUnresolvedReferences +from oslo_config import cfg as oslo_cfg + +import cgcs_patch.messages as messages +import cgcs_patch.constants as constants + +from tsconfig.tsconfig import INITIAL_CONFIG_COMPLETE_FLAG + +CONF = oslo_cfg.CONF + +pidfile_path = "/var/run/patch_controller.pid" + +pc = None +state_file = "%s/.controller.state" % constants.PATCH_STORAGE_DIR +app_dependency_basename = "app_dependencies.json" +app_dependency_filename = "%s/%s" % (constants.PATCH_STORAGE_DIR, app_dependency_basename) + +insvc_patch_restart_controller = "/run/patching/.restart.patch-controller" + +stale_hosts = [] +pending_queries = [] + +thread_death = None +keep_running = True + +# Limit socket blocking to 5 seconds to allow for thread to shutdown +api_socket_timeout = 5.0 + + +class ControllerNeighbour(object): + def __init__(self): + self.last_ack = 0 + self.synced = False + + def rx_ack(self): + self.last_ack = time.time() + + def get_age(self): + return int(time.time() - self.last_ack) + + def rx_synced(self): + self.synced = True + + def clear_synced(self): + self.synced = False + + def get_synced(self): + return self.synced + + +class AgentNeighbour(object): + def __init__(self, ip): + self.ip = ip + self.last_ack = 0 + self.last_query_id = 0 + self.out_of_date = False + self.hostname = "n/a" + self.requires_reboot = False + self.patch_failed = False + self.stale = False + self.pending_query = False + self.installed = {} + self.to_remove = [] + self.missing_pkgs = [] + self.duplicated_pkgs = {} + self.nodetype = None + self.sw_version = "unknown" + self.subfunctions = [] + self.state = None + + def rx_ack(self, + hostname, + out_of_date, + requires_reboot, + query_id, + patch_failed, + sw_version, + state): + self.last_ack = time.time() + self.hostname = hostname + self.patch_failed = patch_failed + self.sw_version = sw_version + self.state = state + + if out_of_date != self.out_of_date or requires_reboot != self.requires_reboot: + self.out_of_date = out_of_date + self.requires_reboot = requires_reboot + LOG.info("Agent %s (%s) reporting out_of_date=%s, requires_reboot=%s", + self.hostname, + self.ip, + self.out_of_date, + self.requires_reboot) + + if self.last_query_id != query_id: + self.last_query_id = query_id + self.stale = True + if self.ip not in stale_hosts and self.ip not in pending_queries: + stale_hosts.append(self.ip) + + def get_age(self): + return int(time.time() - self.last_ack) + + def handle_query_detailed_resp(self, + installed, + to_remove, + missing_pkgs, + duplicated_pkgs, + nodetype, + sw_version, + subfunctions, + state): + self.installed = installed + self.to_remove = to_remove + self.missing_pkgs = missing_pkgs + self.duplicated_pkgs = duplicated_pkgs + self.nodetype = nodetype + self.stale = False + self.pending_query = False + self.sw_version = sw_version + self.subfunctions = subfunctions + self.state = state + + if self.ip in pending_queries: + pending_queries.remove(self.ip) + + if self.ip in stale_hosts: + stale_hosts.remove(self.ip) + + def get_dict(self): + d = {"ip": self.ip, + "hostname": self.hostname, + "patch_current": not self.out_of_date, + "secs_since_ack": self.get_age(), + "patch_failed": self.patch_failed, + "stale_details": self.stale, + "installed": self.installed, + "to_remove": self.to_remove, + "missing_pkgs": self.missing_pkgs, + "duplicated_pkgs": self.duplicated_pkgs, + "nodetype": self.nodetype, + "subfunctions": self.subfunctions, + "sw_version": self.sw_version, + "state": self.state} + + global pc + if self.out_of_date and not pc.allow_insvc_patching: + d["requires_reboot"] = True + else: + d["requires_reboot"] = self.requires_reboot + + # Included for future enhancement, to allow per-node determination + # of in-service patching + d["allow_insvc_patching"] = pc.allow_insvc_patching + + return d + + +class PatchMessageHello(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO) + self.patch_op_counter = 0 + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'patch_op_counter' in data: + self.patch_op_counter = data['patch_op_counter'] + + def encode(self): + global pc + messages.PatchMessage.encode(self) + self.message['patch_op_counter'] = pc.patch_op_counter + + def handle(self, sock, addr): + global pc + host = addr[0] + if host == cfg.get_mgmt_ip(): + # Ignore messages from self + return + + # Send response + if self.patch_op_counter > 0: + pc.handle_nbr_patch_op_counter(host, self.patch_op_counter) + + resp = PatchMessageHelloAck() + resp.send(sock) + + def send(self, sock): + global pc + self.encode() + message = json.dumps(self.message) + sock.sendto(str.encode(message), (pc.controller_address, cfg.controller_port)) + + +class PatchMessageHelloAck(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO_ACK) + + def encode(self): + # Nothing to add, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + global pc + + pc.controller_neighbours_lock.acquire() + if not addr[0] in pc.controller_neighbours: + pc.controller_neighbours[addr[0]] = ControllerNeighbour() + + pc.controller_neighbours[addr[0]].rx_ack() + pc.controller_neighbours_lock.release() + + def send(self, sock): + global pc + self.encode() + message = json.dumps(self.message) + sock.sendto(str.encode(message), (pc.controller_address, cfg.controller_port)) + + +class PatchMessageSyncReq(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_SYNC_REQ) + + def encode(self): + # Nothing to add to the SYNC_REQ, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + global pc + host = addr[0] + if host == cfg.get_mgmt_ip(): + # Ignore messages from self + return + + # We may need to do this in a separate thread, so that we continue to process hellos + LOG.info("Handling sync req") + + pc.sync_from_nbr(host) + + resp = PatchMessageSyncComplete() + resp.send(sock) + + def send(self, sock): + global pc + LOG.info("sending sync req") + self.encode() + message = json.dumps(self.message) + sock.sendto(str.encode(message), (pc.controller_address, cfg.controller_port)) + + +class PatchMessageSyncComplete(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_SYNC_COMPLETE) + + def encode(self): + # Nothing to add to the SYNC_COMPLETE, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + global pc + LOG.info("Handling sync complete") + + pc.controller_neighbours_lock.acquire() + if not addr[0] in pc.controller_neighbours: + pc.controller_neighbours[addr[0]] = ControllerNeighbour() + + pc.controller_neighbours[addr[0]].rx_synced() + pc.controller_neighbours_lock.release() + + def send(self, sock): + global pc + LOG.info("sending sync complete") + self.encode() + message = json.dumps(self.message) + sock.sendto(str.encode(message), (pc.controller_address, cfg.controller_port)) + + +class PatchMessageHelloAgent(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO_AGENT) + + def encode(self): + global pc + messages.PatchMessage.encode(self) + self.message['patch_op_counter'] = pc.patch_op_counter + + def handle(self, sock, addr): + LOG.error("Should not get here") + + def send(self, sock): + global pc + self.encode() + message = json.dumps(self.message) + local_hostname = utils.ip_to_versioned_localhost(cfg.agent_mcast_group) + sock.sendto(str.encode(message), (pc.agent_address, cfg.agent_port)) + sock.sendto(str.encode(message), (local_hostname, cfg.agent_port)) + + +class PatchMessageHelloAgentAck(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO_AGENT_ACK) + self.query_id = 0 + self.agent_out_of_date = False + self.agent_hostname = "n/a" + self.agent_requires_reboot = False + self.agent_patch_failed = False + self.agent_sw_version = "unknown" + self.agent_state = "unknown" + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'query_id' in data: + self.query_id = data['query_id'] + if 'out_of_date' in data: + self.agent_out_of_date = data['out_of_date'] + if 'hostname' in data: + self.agent_hostname = data['hostname'] + if 'requires_reboot' in data: + self.agent_requires_reboot = data['requires_reboot'] + if 'patch_failed' in data: + self.agent_patch_failed = data['patch_failed'] + if 'sw_version' in data: + self.agent_sw_version = data['sw_version'] + if 'state' in data: + self.agent_state = data['state'] + + def encode(self): + # Nothing to add, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + global pc + + pc.hosts_lock.acquire() + if not addr[0] in pc.hosts: + pc.hosts[addr[0]] = AgentNeighbour(addr[0]) + + pc.hosts[addr[0]].rx_ack(self.agent_hostname, + self.agent_out_of_date, + self.agent_requires_reboot, + self.query_id, + self.agent_patch_failed, + self.agent_sw_version, + self.agent_state) + pc.hosts_lock.release() + + def send(self, sock): # pylint: disable=unused-argument + LOG.error("Should not get here") + + +class PatchMessageQueryDetailed(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_QUERY_DETAILED) + + def encode(self): + # Nothing to add to the message, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + LOG.error("Should not get here") + + def send(self, sock): + self.encode() + message = json.dumps(self.message) + sock.sendall(str.encode(message)) + + +class PatchMessageQueryDetailedResp(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_QUERY_DETAILED_RESP) + self.agent_sw_version = "unknown" + self.installed = {} + self.to_install = {} + self.to_remove = [] + self.missing_pkgs = [] + self.duplicated_pkgs = {} + self.subfunctions = [] + self.nodetype = "unknown" + self.agent_sw_version = "unknown" + self.agent_state = "unknown" + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'installed' in data: + self.installed = data['installed'] + if 'to_remove' in data: + self.to_remove = data['to_remove'] + if 'missing_pkgs' in data: + self.missing_pkgs = data['missing_pkgs'] + if 'duplicated_pkgs' in data: + self.duplicated_pkgs = data['duplicated_pkgs'] + if 'nodetype' in data: + self.nodetype = data['nodetype'] + if 'sw_version' in data: + self.agent_sw_version = data['sw_version'] + if 'subfunctions' in data: + self.subfunctions = data['subfunctions'] + if 'state' in data: + self.agent_state = data['state'] + + def encode(self): + LOG.error("Should not get here") + + def handle(self, sock, addr): + global pc + + ip = addr[0] + pc.hosts_lock.acquire() + if ip in pc.hosts: + pc.hosts[ip].handle_query_detailed_resp(self.installed, + self.to_remove, + self.missing_pkgs, + self.duplicated_pkgs, + self.nodetype, + self.agent_sw_version, + self.subfunctions, + self.agent_state) + for patch_id in list(pc.interim_state): + if ip in pc.interim_state[patch_id]: + pc.interim_state[patch_id].remove(ip) + if len(pc.interim_state[patch_id]) == 0: + del pc.interim_state[patch_id] + pc.hosts_lock.release() + pc.check_patch_states() + else: + pc.hosts_lock.release() + + def send(self, sock): # pylint: disable=unused-argument + LOG.error("Should not get here") + + +class PatchMessageAgentInstallReq(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_AGENT_INSTALL_REQ) + self.ip = None + self.force = False + + def encode(self): + global pc + messages.PatchMessage.encode(self) + self.message['force'] = self.force + + def handle(self, sock, addr): + LOG.error("Should not get here") + + def send(self, sock): + LOG.info("sending install request to node: %s", self.ip) + self.encode() + message = json.dumps(self.message) + sock.sendto(str.encode(message), (self.ip, cfg.agent_port)) + + +class PatchMessageAgentInstallResp(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_AGENT_INSTALL_RESP) + self.status = False + self.reject_reason = None + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'status' in data: + self.status = data['status'] + if 'reject_reason' in data: + self.reject_reason = data['reject_reason'] + + def encode(self): + # Nothing to add, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + LOG.info("Handling install resp from %s", addr[0]) + global pc + # LOG.info("Handling hello ack") + + pc.hosts_lock.acquire() + if not addr[0] in pc.hosts: + pc.hosts[addr[0]] = AgentNeighbour(addr[0]) + + pc.hosts[addr[0]].install_status = self.status + pc.hosts[addr[0]].install_pending = False + pc.hosts[addr[0]].install_reject_reason = self.reject_reason + pc.hosts_lock.release() + + def send(self, sock): # pylint: disable=unused-argument + LOG.error("Should not get here") + + +class PatchMessageDropHostReq(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_DROP_HOST_REQ) + self.ip = None + + def encode(self): + messages.PatchMessage.encode(self) + self.message['ip'] = self.ip + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'ip' in data: + self.ip = data['ip'] + + def handle(self, sock, addr): + global pc + host = addr[0] + if host == cfg.get_mgmt_ip(): + # Ignore messages from self + return + + if self.ip is None: + LOG.error("Received PATCHMSG_DROP_HOST_REQ with no ip: %s", json.dumps(self.data)) + return + + pc.drop_host(self.ip, sync_nbr=False) + return + + def send(self, sock): + global pc + self.encode() + message = json.dumps(self.message) + sock.sendto(str.encode(message), (pc.controller_address, cfg.controller_port)) + + +class PatchController(PatchService): + def __init__(self): + PatchService.__init__(self) + + # Locks + self.socket_lock = threading.RLock() + self.controller_neighbours_lock = threading.RLock() + self.hosts_lock = threading.RLock() + self.patch_data_lock = threading.RLock() + + self.hosts = {} + self.controller_neighbours = {} + + # interim_state is used to track hosts that have not responded + # with fresh queries since a patch was applied or removed, on + # a per-patch basis. This allows the patch controller to move + # patches immediately into a "Partial" state until all nodes + # have responded. + # + self.interim_state = {} + + self.sock_out = None + self.sock_in = None + self.controller_address = None + self.agent_address = None + self.patch_op_counter = 1 + self.patch_data = PatchData() + self.patch_data.load_all() + self.check_patch_states() + self.base_pkgdata = BasePackageData() + + self.allow_insvc_patching = True + + if os.path.exists(app_dependency_filename): + try: + with open(app_dependency_filename, 'r') as f: + self.app_dependencies = json.loads(f.read()) + except Exception: + LOG.exception("Failed to read app dependencies: %s", app_dependency_filename) + else: + self.app_dependencies = {} + + if os.path.isfile(state_file): + self.read_state_file() + else: + self.write_state_file() + + def update_config(self): + cfg.read_config() + + if self.port != cfg.controller_port: + self.port = cfg.controller_port + + # Loopback interface does not support multicast messaging, therefore + # revert to using unicast messaging when configured against the + # loopback device + if cfg.get_mgmt_iface() == constants.LOOPBACK_INTERFACE_NAME: + mgmt_ip = cfg.get_mgmt_ip() + self.mcast_addr = None + self.controller_address = mgmt_ip + self.agent_address = mgmt_ip + else: + self.mcast_addr = cfg.controller_mcast_group + self.controller_address = cfg.controller_mcast_group + self.agent_address = cfg.agent_mcast_group + + def socket_lock_acquire(self): + self.socket_lock.acquire() + + def socket_lock_release(self): + try: + self.socket_lock.release() + except Exception: + pass + + def write_state_file(self): + if six.PY2: + config = configparser.ConfigParser() + elif six.PY3: + config = configparser.ConfigParser(strict=False) + + cfgfile = open(state_file, 'w') + + config.add_section('runtime') + config.set('runtime', 'patch_op_counter', str(self.patch_op_counter)) + config.write(cfgfile) + cfgfile.close() + + def read_state_file(self): + if six.PY2: + config = configparser.ConfigParser() + elif six.PY3: + config = configparser.ConfigParser(strict=False) + + config.read(state_file) + + try: + counter = config.getint('runtime', 'patch_op_counter') + self.patch_op_counter = counter + + LOG.info("patch_op_counter is: %d", self.patch_op_counter) + except configparser.Error: + LOG.exception("Failed to read state info") + + def handle_nbr_patch_op_counter(self, host, nbr_patch_op_counter): + if self.patch_op_counter >= nbr_patch_op_counter: + return + + self.sync_from_nbr(host) + + def sync_from_nbr(self, host): + # Sync the patching repo + host_url = utils.ip_to_url(host) + try: + output = subprocess.check_output(["rsync", + "-acv", + "--delete", + "--exclude", "tmp", + "rsync://%s/patching/" % host_url, + "%s/" % patch_dir], + stderr=subprocess.STDOUT) + LOG.info("Synced to mate patching via rsync: %s", output) + except subprocess.CalledProcessError as e: + LOG.error("Failed to rsync: %s", e.output) + return False + + try: + output = subprocess.check_output(["rsync", + "-acv", + "--delete", + "rsync://%s/repo/" % host_url, + "%s/" % repo_root_dir], + stderr=subprocess.STDOUT) + LOG.info("Synced to mate repo via rsync: %s", output) + except subprocess.CalledProcessError: + LOG.error("Failed to rsync: %s", output) + return False + + self.read_state_file() + + self.patch_data_lock.acquire() + self.hosts_lock.acquire() + self.interim_state = {} + self.patch_data.load_all() + self.check_patch_states() + self.hosts_lock.release() + + if os.path.exists(app_dependency_filename): + try: + with open(app_dependency_filename, 'r') as f: + self.app_dependencies = json.loads(f.read()) + except Exception: + LOG.exception("Failed to read app dependencies: %s", app_dependency_filename) + else: + self.app_dependencies = {} + + self.patch_data_lock.release() + + return True + + def inc_patch_op_counter(self): + self.patch_op_counter += 1 + self.write_state_file() + + def check_patch_states(self): + # If we have no hosts, we can't be sure of the current patch state + if len(self.hosts) == 0: + for patch_id in self.patch_data.metadata: + self.patch_data.metadata[patch_id]["patchstate"] = constants.UNKNOWN + return + + # Default to allowing in-service patching + self.allow_insvc_patching = True + + # Take the detailed query results from the hosts and merge with the patch data + + self.hosts_lock.acquire() + + # Initialize patch state data based on repo state and interim_state presence + for patch_id in self.patch_data.metadata: + if patch_id in self.interim_state: + if self.patch_data.metadata[patch_id]["repostate"] == constants.AVAILABLE: + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_REMOVE + elif self.patch_data.metadata[patch_id]["repostate"] == constants.APPLIED: + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_APPLY + else: + self.patch_data.metadata[patch_id]["patchstate"] = \ + self.patch_data.metadata[patch_id]["repostate"] + + for ip in list(self.hosts): + if not self.hosts[ip].out_of_date: + continue + + for pkg in list(self.hosts[ip].installed): + for patch_id in list(self.patch_data.content_versions): + if pkg not in self.patch_data.content_versions[patch_id]: + continue + + if patch_id not in self.patch_data.metadata: + LOG.error("Patch data missing for %s", patch_id) + continue + + # If the patch is on a different release than the host, skip it. + if self.patch_data.metadata[patch_id]["sw_version"] != self.hosts[ip].sw_version: + continue + + # Is the installed pkg higher or lower version? + # The rpm.labelCompare takes version broken into 3 components + installed_ver = self.hosts[ip].installed[pkg].split('@')[0] + if ":" in installed_ver: + # Ignore epoch + installed_ver = installed_ver.split(':')[1] + + patch_ver = self.patch_data.content_versions[patch_id][pkg] + if ":" in patch_ver: + # Ignore epoch + patch_ver = patch_ver.split(':')[1] + + rc = rpm.labelCompare(parse_pkgver(installed_ver), + parse_pkgver(patch_ver)) + + if self.patch_data.metadata[patch_id]["repostate"] == constants.AVAILABLE: + # The RPM is not expected to be installed. + # If the installed version is the same or higher, + # this patch is in a Partial-Remove state + if rc >= 0 or patch_id in self.interim_state: + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_REMOVE + if self.patch_data.metadata[patch_id].get("reboot_required") != "N": + self.allow_insvc_patching = False + continue + elif self.patch_data.metadata[patch_id]["repostate"] == constants.APPLIED: + # The RPM is expected to be installed. + # If the installed version is the lower, + # this patch is in a Partial-Apply state + if rc == -1 or patch_id in self.interim_state: + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_APPLY + if self.patch_data.metadata[patch_id].get("reboot_required") != "N": + self.allow_insvc_patching = False + continue + + if self.hosts[ip].sw_version == "14.10": + # For Release 1 + personality = "personality-%s" % self.hosts[ip].nodetype + else: + personality = "personality-%s" % "-".join(self.hosts[ip].subfunctions) + + # Check the to_remove list + for pkg in self.hosts[ip].to_remove: + for patch_id in list(self.patch_data.content_versions): + if pkg not in self.patch_data.content_versions[patch_id]: + continue + + if patch_id not in self.patch_data.metadata: + LOG.error("Patch data missing for %s", patch_id) + continue + + if personality not in self.patch_data.metadata[patch_id]: + continue + + if pkg not in self.patch_data.metadata[patch_id][personality]: + continue + + if self.patch_data.metadata[patch_id]["repostate"] == constants.AVAILABLE: + # The RPM is not expected to be installed. + # This patch is in a Partial-Remove state + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_REMOVE + if self.patch_data.metadata[patch_id].get("reboot_required") != "N": + self.allow_insvc_patching = False + continue + + # Check the missing_pkgs list + for pkg in self.hosts[ip].missing_pkgs: + for patch_id in list(self.patch_data.content_versions): + if pkg not in self.patch_data.content_versions[patch_id]: + continue + + if patch_id not in self.patch_data.metadata: + LOG.error("Patch data missing for %s", patch_id) + continue + + if personality not in self.patch_data.metadata[patch_id]: + continue + + if pkg not in self.patch_data.metadata[patch_id][personality]: + continue + + if self.patch_data.metadata[patch_id]["repostate"] == constants.APPLIED: + # The RPM is expected to be installed. + # This patch is in a Partial-Apply state + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_APPLY + if self.patch_data.metadata[patch_id].get("reboot_required") != "N": + self.allow_insvc_patching = False + continue + + self.hosts_lock.release() + + def get_store_filename(self, patch_sw_version, rpmname): + rpm_dir = package_dir[patch_sw_version] + rpmfile = "%s/%s" % (rpm_dir, rpmname) + return rpmfile + + def get_repo_filename(self, patch_sw_version, rpmname): + rpmfile = self.get_store_filename(patch_sw_version, rpmname) + if not os.path.isfile(rpmfile): + msg = "Could not find rpm: %s" % rpmfile + LOG.error(msg) + return None + + repo_filename = None + + try: + # Get the architecture from the RPM + pkgarch = subprocess.check_output(["rpm", + "-qp", + "--queryformat", + "%{ARCH}", + "--nosignature", + rpmfile]) + + repo_filename = "%s/Packages/%s/%s" % (repo_dir[patch_sw_version], pkgarch, rpmname) + except subprocess.CalledProcessError: + msg = "RPM query failed for %s" % rpmfile + LOG.exception(msg) + return None + + return repo_filename + + def run_semantic_check(self, action, patch_list): + if not os.path.exists(INITIAL_CONFIG_COMPLETE_FLAG): + # Skip semantic checks if initial configuration isn't complete + return + + # Pass the current patch state to the semantic check as a series of args + patch_state_args = [] + for patch_id in list(self.patch_data.metadata): + patch_state = '%s=%s' % (patch_id, self.patch_data.metadata[patch_id]["patchstate"]) + patch_state_args += ['-p', patch_state] + + # Run semantic checks, if any + for patch_id in patch_list: + semchk = os.path.join(semantics_dir, action, patch_id) + + if os.path.exists(semchk): + try: + LOG.info("Running semantic check: %s", semchk) + subprocess.check_output([semchk] + patch_state_args, + stderr=subprocess.STDOUT) + LOG.info("Semantic check %s passed", semchk) + except subprocess.CalledProcessError as e: + msg = "Semantic check failed for %s:\n%s" % (patch_id, e.output) + LOG.exception(msg) + raise PatchFail(msg) + + def patch_import_api(self, patches): + """ + Import patches + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + + # Refresh data, if needed + self.base_pkgdata.loaddirs() + + # Protect against duplications + patch_list = sorted(list(set(patches))) + + # First, make sure the specified files exist + for patch in patch_list: + if not os.path.isfile(patch): + raise PatchFail("File does not exist: %s" % patch) + + try: + if not os.path.exists(avail_dir): + os.makedirs(avail_dir) + if not os.path.exists(applied_dir): + os.makedirs(applied_dir) + if not os.path.exists(committed_dir): + os.makedirs(committed_dir) + except os.error: + msg = "Failed to create directories" + LOG.exception(msg) + raise PatchFail(msg) + + msg = "Importing patches: %s" % ",".join(patch_list) + LOG.info(msg) + audit_log_info(msg) + + for patch in patch_list: + msg = "Importing patch: %s" % patch + LOG.info(msg) + audit_log_info(msg) + + # Get the patch_id from the filename + # and check to see if it's already imported + (patch_id, ext) = os.path.splitext(os.path.basename(patch)) + if patch_id in self.patch_data.metadata: + if self.patch_data.metadata[patch_id]["repostate"] == constants.APPLIED: + mdir = applied_dir + elif self.patch_data.metadata[patch_id]["repostate"] == constants.COMMITTED: + msg = "%s is committed. Metadata not updated" % patch_id + LOG.info(msg) + msg_info += msg + "\n" + continue + else: + mdir = avail_dir + + try: + thispatch = PatchFile.extract_patch(patch, + metadata_dir=mdir, + metadata_only=True, + existing_content=self.patch_data.contents[patch_id], + allpatches=self.patch_data, + base_pkgdata=self.base_pkgdata) + self.patch_data.update_patch(thispatch) + msg = "%s is already imported. Updated metadata only" % patch_id + LOG.info(msg) + msg_info += msg + "\n" + except PatchMismatchFailure: + msg = "Contents of %s do not match re-imported patch" % patch_id + LOG.exception(msg) + msg_error += msg + "\n" + continue + except PatchValidationFailure as e: + msg = "Patch validation failed for %s" % patch_id + if str(e) is not None and str(e) != '': + msg += ":\n%s" % str(e) + LOG.exception(msg) + msg_error += msg + "\n" + continue + except PatchFail: + msg = "Failed to import patch %s" % patch_id + LOG.exception(msg) + msg_error += msg + "\n" + + continue + + if ext != ".patch": + msg = "File must end in .patch extension: %s" \ + % os.path.basename(patch) + LOG.exception(msg) + msg_error += msg + "\n" + continue + + try: + thispatch = PatchFile.extract_patch(patch, + metadata_dir=avail_dir, + allpatches=self.patch_data, + base_pkgdata=self.base_pkgdata) + + msg_info += "%s is now available\n" % patch_id + self.patch_data.add_patch(patch_id, thispatch) + + self.patch_data.metadata[patch_id]["repostate"] = constants.AVAILABLE + if len(self.hosts) > 0: + self.patch_data.metadata[patch_id]["patchstate"] = constants.AVAILABLE + else: + self.patch_data.metadata[patch_id]["patchstate"] = constants.UNKNOWN + except PatchValidationFailure as e: + msg = "Patch validation failed for %s" % patch_id + if str(e) is not None and str(e) != '': + msg += ":\n%s" % str(e) + LOG.exception(msg) + msg_error += msg + "\n" + continue + except PatchFail: + msg = "Failed to import patch %s" % patch_id + LOG.exception(msg) + msg_error += msg + "\n" + continue + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_apply_api(self, patch_ids, **kwargs): + """ + Apply patches, moving patches from available to applied and updating repo + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + + # Protect against duplications + patch_list = sorted(list(set(patch_ids))) + + msg = "Applying patches: %s" % ",".join(patch_list) + LOG.info(msg) + audit_log_info(msg) + + if "--all" in patch_list: + # Set patch_ids to list of all available patches + # We're getting this list now, before we load the applied patches + patch_list = [] + for patch_id in sorted(list(self.patch_data.metadata)): + if self.patch_data.metadata[patch_id]["repostate"] == constants.AVAILABLE: + patch_list.append(patch_id) + + if len(patch_list) == 0: + msg_info += "There are no available patches to be applied.\n" + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + repo_changed = False + + # First, verify that all specified patches exist + id_verification = True + for patch_id in patch_list: + if patch_id not in self.patch_data.metadata: + msg = "Patch %s does not exist" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + id_verification = False + + if not id_verification: + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Check for patches that can't be applied during an upgrade + upgrade_check = True + for patch_id in patch_list: + if self.patch_data.metadata[patch_id]["sw_version"] != SW_VERSION \ + and self.patch_data.metadata[patch_id].get("apply_active_release_only") == "Y": + msg = "%s cannot be applied in an upgrade" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + upgrade_check = False + + if not upgrade_check: + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Next, check the patch dependencies + # required_patches will map the required patch to the patches that need it + required_patches = {} + for patch_id in patch_list: + for req_patch in self.patch_data.metadata[patch_id]["requires"]: + # Ignore patches in the op set + if req_patch in patch_list: + continue + + if req_patch not in required_patches: + required_patches[req_patch] = [] + + required_patches[req_patch].append(patch_id) + + # Now verify the state of the required patches + req_verification = True + for req_patch, iter_patch_list in required_patches.items(): + if req_patch not in self.patch_data.metadata \ + or self.patch_data.metadata[req_patch]["repostate"] == constants.AVAILABLE: + msg = "%s is required by: %s" % (req_patch, ", ".join(sorted(iter_patch_list))) + msg_error += msg + "\n" + LOG.info(msg) + req_verification = False + + if not req_verification: + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + if kwargs.get("skip-semantic") != "yes": + self.run_semantic_check(constants.SEMANTIC_PREAPPLY, patch_list) + + # Start applying the patches + for patch_id in patch_list: + msg = "Applying patch: %s" % patch_id + LOG.info(msg) + audit_log_info(msg) + + if self.patch_data.metadata[patch_id]["repostate"] == constants.APPLIED \ + or self.patch_data.metadata[patch_id]["repostate"] == constants.COMMITTED: + msg = "%s is already in the repo" % patch_id + LOG.info(msg) + msg_info += msg + "\n" + continue + + # To allow for easy cleanup, we're going to first iterate + # through the rpm list to determine where to copy the file. + # As a second step, we'll go through the list and copy each file. + # If there are problems querying any RPMs, none will be copied. + rpmlist = {} + for rpmname in self.patch_data.contents[patch_id]: + patch_sw_version = self.patch_data.metadata[patch_id]["sw_version"] + + rpmfile = self.get_store_filename(patch_sw_version, rpmname) + if not os.path.isfile(rpmfile): + msg = "Could not find rpm: %s" % rpmfile + LOG.error(msg) + raise RpmFail(msg) + + repo_filename = self.get_repo_filename(patch_sw_version, rpmname) + if repo_filename is None: + msg = "Failed to determine repo path for %s" % rpmfile + LOG.exception(msg) + raise RpmFail(msg) + + repo_pkg_dir = os.path.dirname(repo_filename) + if not os.path.exists(repo_pkg_dir): + os.makedirs(repo_pkg_dir) + rpmlist[rpmfile] = repo_filename + + # Copy the RPMs. If a failure occurs, clean up copied files. + copied = [] + for rpmfile in rpmlist: + LOG.info("Copy %s to %s", rpmfile, rpmlist[rpmfile]) + try: + shutil.copy(rpmfile, rpmlist[rpmfile]) + copied.append(rpmlist[rpmfile]) + except IOError: + msg = "Failed to copy %s" % rpmfile + LOG.exception(msg) + # Clean up files + for filename in copied: + LOG.info("Cleaning up %s", filename) + os.remove(filename) + + raise RpmFail(msg) + + try: + # Move the metadata to the applied dir + shutil.move("%s/%s-metadata.xml" % (avail_dir, patch_id), + "%s/%s-metadata.xml" % (applied_dir, patch_id)) + + msg_info += "%s is now in the repo\n" % patch_id + except shutil.Error: + msg = "Failed to move the metadata for %s" % patch_id + LOG.exception(msg) + raise MetadataFail(msg) + + self.patch_data.metadata[patch_id]["repostate"] = constants.APPLIED + if len(self.hosts) > 0: + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_APPLY + else: + self.patch_data.metadata[patch_id]["patchstate"] = constants.UNKNOWN + + self.hosts_lock.acquire() + self.interim_state[patch_id] = list(self.hosts) + self.hosts_lock.release() + + repo_changed = True + + if repo_changed: + # Update the repo + self.patch_data.gen_groups_xml() + for ver, rdir in repo_dir.items(): + try: + output = subprocess.check_output(["createrepo", + "--update", + "-g", + "comps.xml", + rdir], + stderr=subprocess.STDOUT) + LOG.info("Repo[%s] updated:\n%s", ver, output) + except subprocess.CalledProcessError: + msg = "Failed to update the repo for %s" % ver + LOG.exception(msg) + raise PatchFail(msg) + else: + LOG.info("Repository is unchanged") + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_remove_api(self, patch_ids, **kwargs): + """ + Remove patches, moving patches from applied to available and updating repo + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + remove_unremovable = False + + repo_changed = False + + # Protect against duplications + patch_list = sorted(list(set(patch_ids))) + + msg = "Removing patches: %s" % ",".join(patch_list) + LOG.info(msg) + audit_log_info(msg) + + if kwargs.get("removeunremovable") == "yes": + remove_unremovable = True + + # First, verify that all specified patches exist + id_verification = True + for patch_id in patch_list: + if patch_id not in self.patch_data.metadata: + msg = "Patch %s does not exist" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + id_verification = False + + if not id_verification: + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # See if any of the patches are marked as unremovable + unremovable_verification = True + for patch_id in patch_list: + if self.patch_data.metadata[patch_id].get("unremovable") == "Y": + if remove_unremovable: + msg = "Unremovable patch %s being removed" % patch_id + LOG.warning(msg) + msg_warning += msg + "\n" + else: + msg = "Patch %s is not removable" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + unremovable_verification = False + elif self.patch_data.metadata[patch_id]['repostate'] == constants.COMMITTED: + msg = "Patch %s is committed and cannot be removed" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + unremovable_verification = False + + if not unremovable_verification: + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Next, see if any of the patches are required by applied patches + # required_patches will map the required patch to the patches that need it + required_patches = {} + for patch_iter in list(self.patch_data.metadata): + # Ignore patches in the op set + if patch_iter in patch_list: + continue + + # Only check applied patches + if self.patch_data.metadata[patch_iter]["repostate"] == constants.AVAILABLE: + continue + + for req_patch in self.patch_data.metadata[patch_iter]["requires"]: + if req_patch not in patch_list: + continue + + if req_patch not in required_patches: + required_patches[req_patch] = [] + + required_patches[req_patch].append(patch_iter) + + if len(required_patches) > 0: + for req_patch, iter_patch_list in required_patches.items(): + msg = "%s is required by: %s" % (req_patch, ", ".join(sorted(iter_patch_list))) + msg_error += msg + "\n" + LOG.info(msg) + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + if kwargs.get("skipappcheck") != "yes": + # Check application dependencies before removing + required_patches = {} + for patch_id in patch_list: + for appname, iter_patch_list in self.app_dependencies.items(): + if patch_id in iter_patch_list: + if patch_id not in required_patches: + required_patches[patch_id] = [] + required_patches[patch_id].append(appname) + + if len(required_patches) > 0: + for req_patch, app_list in required_patches.items(): + msg = "%s is required by application(s): %s" % (req_patch, ", ".join(sorted(app_list))) + msg_error += msg + "\n" + LOG.info(msg) + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + if kwargs.get("skip-semantic") != "yes": + self.run_semantic_check(constants.SEMANTIC_PREREMOVE, patch_list) + + for patch_id in patch_list: + msg = "Removing patch: %s" % patch_id + LOG.info(msg) + audit_log_info(msg) + + if self.patch_data.metadata[patch_id]["repostate"] == constants.AVAILABLE: + msg = "%s is not in the repo" % patch_id + LOG.info(msg) + msg_info += msg + "\n" + continue + + repo_changed = True + + for rpmname in self.patch_data.contents[patch_id]: + patch_sw_version = self.patch_data.metadata[patch_id]["sw_version"] + rpmfile = self.get_store_filename(patch_sw_version, rpmname) + if not os.path.isfile(rpmfile): + msg = "Could not find rpm: %s" % rpmfile + LOG.error(msg) + raise RpmFail(msg) + + repo_filename = self.get_repo_filename(patch_sw_version, rpmname) + if repo_filename is None: + msg = "Failed to determine repo path for %s" % rpmfile + LOG.exception(msg) + raise RpmFail(msg) + + try: + os.remove(repo_filename) + except OSError: + msg = "Failed to remove RPM" + LOG.exception(msg) + raise RpmFail(msg) + + try: + # Move the metadata to the available dir + shutil.move("%s/%s-metadata.xml" % (applied_dir, patch_id), + "%s/%s-metadata.xml" % (avail_dir, patch_id)) + msg_info += "%s has been removed from the repo\n" % patch_id + except shutil.Error: + msg = "Failed to move the metadata for %s" % patch_id + LOG.exception(msg) + raise MetadataFail(msg) + + self.patch_data.metadata[patch_id]["repostate"] = constants.AVAILABLE + if len(self.hosts) > 0: + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_REMOVE + else: + self.patch_data.metadata[patch_id]["patchstate"] = constants.UNKNOWN + + self.hosts_lock.acquire() + self.interim_state[patch_id] = list(self.hosts) + self.hosts_lock.release() + + if repo_changed: + # Update the repo + self.patch_data.gen_groups_xml() + for ver, rdir in repo_dir.items(): + try: + output = subprocess.check_output(["createrepo", + "--update", + "-g", + "comps.xml", + rdir], + stderr=subprocess.STDOUT) + LOG.info("Repo[%s] updated:\n%s", ver, output) + except subprocess.CalledProcessError: + msg = "Failed to update the repo for %s" % ver + LOG.exception(msg) + raise PatchFail(msg) + else: + LOG.info("Repository is unchanged") + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_delete_api(self, patch_ids): + """ + Delete patches + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + + # Protect against duplications + patch_list = sorted(list(set(patch_ids))) + + msg = "Deleting patches: %s" % ",".join(patch_list) + LOG.info(msg) + audit_log_info(msg) + + # Verify patches exist and are in proper state first + id_verification = True + for patch_id in patch_list: + if patch_id not in self.patch_data.metadata: + msg = "Patch %s does not exist" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + id_verification = False + continue + + # Get the aggregated patch state, if possible + patchstate = constants.UNKNOWN + if patch_id in self.patch_data.metadata: + patchstate = self.patch_data.metadata[patch_id]["patchstate"] + + if self.patch_data.metadata[patch_id]["repostate"] != constants.AVAILABLE or \ + (patchstate != constants.AVAILABLE and patchstate != constants.UNKNOWN): + msg = "Patch %s not in Available state" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + id_verification = False + continue + + if not id_verification: + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Handle operation + for patch_id in patch_list: + for rpmname in self.patch_data.contents[patch_id]: + patch_sw_version = self.patch_data.metadata[patch_id]["sw_version"] + rpmfile = self.get_store_filename(patch_sw_version, rpmname) + if not os.path.isfile(rpmfile): + # We're deleting the patch anyway, so the missing file + # doesn't really matter + continue + + try: + os.remove(rpmfile) + except OSError: + msg = "Failed to remove RPM %s" % rpmfile + LOG.exception(msg) + raise RpmFail(msg) + + for action in constants.SEMANTIC_ACTIONS: + action_file = os.path.join(semantics_dir, action, patch_id) + if not os.path.isfile(action_file): + continue + + try: + os.remove(action_file) + except OSError: + msg = "Failed to remove semantic %s" % action_file + LOG.exception(msg) + raise SemanticFail(msg) + + try: + # Delete the metadata + os.remove("%s/%s-metadata.xml" % (avail_dir, patch_id)) + except OSError: + msg = "Failed to remove metadata for %s" % patch_id + LOG.exception(msg) + raise MetadataFail(msg) + + self.patch_data.delete_patch(patch_id) + msg = "%s has been deleted" % patch_id + LOG.info(msg) + msg_info += msg + "\n" + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_init_release_api(self, release): + """ + Create an empty repo for a new release + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + + msg = "Initializing repo for: %s" % release + LOG.info(msg) + audit_log_info(msg) + + if release == SW_VERSION: + msg = "Rejected: Requested release %s is running release" % release + msg_error += msg + "\n" + LOG.info(msg) + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Refresh data + self.base_pkgdata.loaddirs() + + self.patch_data.load_all_metadata(avail_dir, repostate=constants.AVAILABLE) + self.patch_data.load_all_metadata(applied_dir, repostate=constants.APPLIED) + self.patch_data.load_all_metadata(committed_dir, repostate=constants.COMMITTED) + + repo_dir[release] = "%s/rel-%s" % (repo_root_dir, release) + + # Verify the release doesn't already exist + if os.path.exists(repo_dir[release]): + msg = "Patch repository for %s already exists" % release + msg_info += msg + "\n" + LOG.info(msg) + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Generate the groups xml + self.patch_data.gen_release_groups_xml(release) + + # Create the repo + try: + output = subprocess.check_output(["createrepo", + "--update", + "-g", + "comps.xml", + repo_dir[release]], + stderr=subprocess.STDOUT) + LOG.info("Repo[%s] updated:\n%s", release, output) + except subprocess.CalledProcessError: + msg = "Failed to update the repo for %s" % release + LOG.exception(msg) + + # Wipe out what was created + shutil.rmtree(repo_dir[release]) + del repo_dir[release] + + raise PatchFail(msg) + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_del_release_api(self, release): + """ + Delete the repo and patches for second release + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + + msg = "Deleting repo and patches for: %s" % release + LOG.info(msg) + audit_log_info(msg) + + if release == SW_VERSION: + msg = "Rejected: Requested release %s is running release" % release + msg_error += msg + "\n" + LOG.info(msg) + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Delete patch XML files + for patch_id in list(self.patch_data.metadata): + if self.patch_data.metadata[patch_id]["sw_version"] != release: + continue + + if self.patch_data.metadata[patch_id]["repostate"] == constants.APPLIED: + mdir = applied_dir + elif self.patch_data.metadata[patch_id]["repostate"] == constants.COMMITTED: + mdir = committed_dir + else: + mdir = avail_dir + + for action in constants.SEMANTIC_ACTIONS: + action_file = os.path.join(semantics_dir, action, patch_id) + if not os.path.isfile(action_file): + continue + + try: + os.remove(action_file) + except OSError: + msg = "Failed to remove semantic %s" % action_file + LOG.exception(msg) + raise SemanticFail(msg) + + try: + # Delete the metadata + os.remove("%s/%s-metadata.xml" % (mdir, patch_id)) + except OSError: + msg = "Failed to remove metadata for %s" % patch_id + LOG.exception(msg) + + # Refresh patch data + self.patch_data = PatchData() + self.patch_data.load_all_metadata(avail_dir, repostate=constants.AVAILABLE) + self.patch_data.load_all_metadata(applied_dir, repostate=constants.APPLIED) + self.patch_data.load_all_metadata(committed_dir, repostate=constants.COMMITTED) + + raise MetadataFail(msg) + + # Delete the packages dir + package_dir[release] = "%s/%s" % (root_package_dir, release) + if os.path.exists(package_dir[release]): + try: + shutil.rmtree(package_dir[release]) + except shutil.Error: + msg = "Failed to delete package dir for %s" % release + LOG.exception(msg) + + del package_dir[release] + + # Verify the release exists + repo_dir[release] = "%s/rel-%s" % (repo_root_dir, release) + if not os.path.exists(repo_dir[release]): + # Nothing to do + msg = "Patch repository for %s does not exist" % release + msg_info += msg + "\n" + LOG.info(msg) + del repo_dir[release] + + # Refresh patch data + self.patch_data = PatchData() + self.patch_data.load_all_metadata(avail_dir, repostate=constants.AVAILABLE) + self.patch_data.load_all_metadata(applied_dir, repostate=constants.APPLIED) + self.patch_data.load_all_metadata(committed_dir, repostate=constants.COMMITTED) + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Delete the repo + try: + shutil.rmtree(repo_dir[release]) + except shutil.Error: + msg = "Failed to delete repo for %s" % release + LOG.exception(msg) + + del repo_dir[release] + + if self.base_pkgdata is not None and release in self.base_pkgdata.pkgs: + del self.base_pkgdata.pkgs[release] + + # Refresh patch data + self.patch_data = PatchData() + self.patch_data.load_all_metadata(avail_dir, repostate=constants.AVAILABLE) + self.patch_data.load_all_metadata(applied_dir, repostate=constants.APPLIED) + self.patch_data.load_all_metadata(committed_dir, repostate=constants.COMMITTED) + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_query_what_requires(self, patch_ids): + """ + Query the known patches to see which have dependencies on the specified patches + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + + msg = "Querying what requires patches: %s" % ",".join(patch_ids) + LOG.info(msg) + audit_log_info(msg) + + # First, verify that all specified patches exist + id_verification = True + for patch_id in patch_ids: + if patch_id not in self.patch_data.metadata: + msg = "Patch %s does not exist" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + id_verification = False + + if not id_verification: + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + required_patches = {} + for patch_iter in list(self.patch_data.metadata): + for req_patch in self.patch_data.metadata[patch_iter]["requires"]: + if req_patch not in patch_ids: + continue + + if req_patch not in required_patches: + required_patches[req_patch] = [] + + required_patches[req_patch].append(patch_iter) + + for patch_id in patch_ids: + if patch_id in required_patches: + iter_patch_list = required_patches[patch_id] + msg_info += "%s is required by: %s\n" % (patch_id, ", ".join(sorted(iter_patch_list))) + else: + msg_info += "%s is not required by any patches.\n" % patch_id + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_sync(self): + # Increment the patch_op_counter here + self.inc_patch_op_counter() + + self.patch_data_lock.acquire() + # self.patch_data.load_all() + self.check_patch_states() + self.patch_data_lock.release() + + if self.sock_out is None: + return True + + # Send the sync requests + + self.controller_neighbours_lock.acquire() + for n in self.controller_neighbours: + self.controller_neighbours[n].clear_synced() + self.controller_neighbours_lock.release() + + msg = PatchMessageSyncReq() + self.socket_lock.acquire() + msg.send(self.sock_out) + self.socket_lock.release() + + # Now we wait, up to two mins... TODO: Wait on a condition + my_ip = cfg.get_mgmt_ip() + sync_rc = False + max_time = time.time() + 120 + while time.time() < max_time: + all_done = True + self.controller_neighbours_lock.acquire() + for n in self.controller_neighbours: + if n != my_ip and not self.controller_neighbours[n].get_synced(): + all_done = False + self.controller_neighbours_lock.release() + + if all_done: + LOG.info("Sync complete") + sync_rc = True + break + + time.sleep(0.5) + + # Send hellos to the hosts now, to get queries performed + hello_agent = PatchMessageHelloAgent() + self.socket_lock.acquire() + hello_agent.send(self.sock_out) + self.socket_lock.release() + + if not sync_rc: + LOG.info("Timed out waiting for sync completion") + return sync_rc + + def patch_query_cached(self, **kwargs): + query_state = None + if "show" in kwargs: + if kwargs["show"] == "available": + query_state = constants.AVAILABLE + elif kwargs["show"] == "applied": + query_state = constants.APPLIED + elif kwargs["show"] == "committed": + query_state = constants.COMMITTED + + query_release = None + if "release" in kwargs: + query_release = kwargs["release"] + + results = {} + self.patch_data_lock.acquire() + if query_state is None and query_release is None: + # Return everything + results = self.patch_data.metadata + else: + # Filter results + for patch_id, data in self.patch_data.metadata.items(): + if query_state is not None and data["repostate"] != query_state: + continue + if query_release is not None and data["sw_version"] != query_release: + continue + results[patch_id] = data + self.patch_data_lock.release() + + return results + + def patch_query_specific_cached(self, patch_ids): + audit_log_info("Patch show") + + results = {"metadata": {}, + "contents": {}, + "error": ""} + + self.patch_data_lock.acquire() + + for patch_id in patch_ids: + if patch_id not in list(self.patch_data.metadata): + results["error"] += "%s is unrecognized\n" % patch_id + + for patch_id, data in self.patch_data.metadata.items(): + if patch_id in patch_ids: + results["metadata"][patch_id] = data + for patch_id, data in self.patch_data.contents.items(): + if patch_id in patch_ids: + results["contents"][patch_id] = data + + self.patch_data_lock.release() + + return results + + def get_dependencies(self, patch_ids, recursive): + dependencies = set() + patch_added = False + + self.patch_data_lock.acquire() + + # Add patches to workset + for patch_id in sorted(patch_ids): + dependencies.add(patch_id) + patch_added = True + + while patch_added: + patch_added = False + for patch_id in sorted(dependencies): + for req in self.patch_data.metadata[patch_id]["requires"]: + if req not in dependencies: + dependencies.add(req) + patch_added = recursive + + self.patch_data_lock.release() + + return sorted(dependencies) + + def patch_query_dependencies(self, patch_ids, **kwargs): + msg = "Patch query-dependencies %s" % patch_ids + LOG.info(msg) + audit_log_info(msg) + + failure = False + + results = {"patches": [], + "error": ""} + + recursive = False + if kwargs.get("recursive") == "yes": + recursive = True + + self.patch_data_lock.acquire() + + # Verify patch IDs + for patch_id in sorted(patch_ids): + if patch_id not in list(self.patch_data.metadata): + errormsg = "%s is unrecognized\n" % patch_id + LOG.info("patch_query_dependencies: %s", errormsg) + results["error"] += errormsg + failure = True + self.patch_data_lock.release() + + if failure: + LOG.info("patch_query_dependencies failed") + return results + + results["patches"] = self.get_dependencies(patch_ids, recursive) + + return results + + def patch_commit(self, patch_ids, dry_run=False): + msg = "Patch commit %s" % patch_ids + LOG.info(msg) + audit_log_info(msg) + + try: + if not os.path.exists(committed_dir): + os.makedirs(committed_dir) + except os.error: + msg = "Failed to create %s" % committed_dir + LOG.exception(msg) + raise PatchFail(msg) + + failure = False + recursive = True + + keep = {} + cleanup = {} + cleanup_files = set() + + results = {"info": "", + "error": ""} + + # Ensure there are only REL patches + non_rel_list = [] + self.patch_data_lock.acquire() + for patch_id in self.patch_data.metadata: + if self.patch_data.metadata[patch_id]['status'] != constants.STATUS_RELEASED: + non_rel_list.append(patch_id) + self.patch_data_lock.release() + + if len(non_rel_list) > 0: + errormsg = "A commit cannot be performed with non-REL status patches in the system:\n" + for patch_id in non_rel_list: + errormsg += " %s\n" % patch_id + LOG.info("patch_commit rejected: %s", errormsg) + results["error"] += errormsg + return results + + # Verify patch IDs + self.patch_data_lock.acquire() + for patch_id in sorted(patch_ids): + if patch_id not in list(self.patch_data.metadata): + errormsg = "%s is unrecognized\n" % patch_id + LOG.info("patch_commit: %s", errormsg) + results["error"] += errormsg + failure = True + self.patch_data_lock.release() + + if failure: + LOG.info("patch_commit: Failed patch ID check") + return results + + commit_list = self.get_dependencies(patch_ids, recursive) + + # Check patch states + avail_list = [] + self.patch_data_lock.acquire() + for patch_id in commit_list: + if self.patch_data.metadata[patch_id]['patchstate'] != constants.APPLIED \ + and self.patch_data.metadata[patch_id]['patchstate'] != constants.COMMITTED: + avail_list.append(patch_id) + self.patch_data_lock.release() + + if len(avail_list) > 0: + errormsg = "The following patches are not applied and cannot be committed:\n" + for patch_id in avail_list: + errormsg += " %s\n" % patch_id + LOG.info("patch_commit rejected: %s", errormsg) + results["error"] += errormsg + return results + + # Get list of packages + self.patch_data_lock.acquire() + for patch_id in commit_list: + patch_sw_version = self.patch_data.metadata[patch_id]["sw_version"] + + if patch_sw_version not in keep: + keep[patch_sw_version] = {} + if patch_sw_version not in cleanup: + cleanup[patch_sw_version] = {} + + for rpmname in self.patch_data.contents[patch_id]: + try: + pkgname, arch, pkgver = parse_rpm_filename(rpmname) + except ValueError as e: + self.patch_data_lock.release() + raise e + + if pkgname not in keep[patch_sw_version]: + keep[patch_sw_version][pkgname] = {arch: pkgver} + continue + elif arch not in keep[patch_sw_version][pkgname]: + keep[patch_sw_version][pkgname][arch] = pkgver + continue + + # Compare versions + keep_pkgver = keep[patch_sw_version][pkgname][arch] + if pkgver > keep_pkgver: + if pkgname not in cleanup[patch_sw_version]: + cleanup[patch_sw_version][pkgname] = {arch: [keep_pkgver]} + elif arch not in cleanup[patch_sw_version][pkgname]: + cleanup[patch_sw_version][pkgname][arch] = [keep_pkgver] + else: + cleanup[patch_sw_version][pkgname][arch].append(keep_pkgver) + + # Find the rpmname + keep_rpmname = keep_pkgver.generate_rpm_filename(pkgname, arch) + + store_filename = self.get_store_filename(patch_sw_version, keep_rpmname) + if store_filename is not None and os.path.exists(store_filename): + cleanup_files.add(store_filename) + + repo_filename = self.get_repo_filename(patch_sw_version, keep_rpmname) + if repo_filename is not None and os.path.exists(repo_filename): + cleanup_files.add(repo_filename) + + # Keep the new pkgver + keep[patch_sw_version][pkgname][arch] = pkgver + else: + # Put this pkg in the cleanup list + if pkgname not in cleanup[patch_sw_version]: + cleanup[patch_sw_version][pkgname] = {arch: [pkgver]} + elif arch not in cleanup[patch_sw_version][pkgname]: + cleanup[patch_sw_version][pkgname][arch] = [pkgver] + else: + cleanup[patch_sw_version][pkgname][arch].append(pkgver) + + store_filename = self.get_store_filename(patch_sw_version, rpmname) + if store_filename is not None and os.path.exists(store_filename): + cleanup_files.add(store_filename) + + repo_filename = self.get_repo_filename(patch_sw_version, rpmname) + if repo_filename is not None and os.path.exists(repo_filename): + cleanup_files.add(repo_filename) + + self.patch_data_lock.release() + + # Calculate disk space + disk_space = 0 + for rpmfile in cleanup_files: + statinfo = os.stat(rpmfile) + disk_space += statinfo.st_size + + if dry_run: + results["info"] = "This commit operation would free %0.2f MiB" % (disk_space / (1024.0 * 1024.0)) + return results + + # Do the commit + + # Move the metadata to the committed dir + for patch_id in commit_list: + metadata_fname = "%s-metadata.xml" % patch_id + applied_fname = os.path.join(applied_dir, metadata_fname) + committed_fname = os.path.join(committed_dir, metadata_fname) + if os.path.exists(applied_fname): + try: + shutil.move(applied_fname, committed_fname) + except shutil.Error: + msg = "Failed to move the metadata for %s" % patch_id + LOG.exception(msg) + raise MetadataFail(msg) + + # Delete the files + for rpmfile in cleanup_files: + try: + os.remove(rpmfile) + except OSError: + msg = "Failed to remove: %s" % rpmfile + LOG.exception(msg) + raise MetadataFail(msg) + + # Update the repo + self.patch_data.gen_groups_xml() + for ver, rdir in repo_dir.items(): + try: + output = subprocess.check_output(["createrepo", + "--update", + "-g", + "comps.xml", + rdir], + stderr=subprocess.STDOUT) + LOG.info("Repo[%s] updated:\n%s", ver, output) + except subprocess.CalledProcessError: + msg = "Failed to update the repo for %s" % ver + LOG.exception(msg) + raise PatchFail(msg) + + self.patch_data.load_all() + + results["info"] = "The patches have been committed." + return results + + def query_host_cache(self): + output = [] + + self.hosts_lock.acquire() + for nbr in list(self.hosts): + host = self.hosts[nbr].get_dict() + host["interim_state"] = False + for patch_id in list(pc.interim_state): + if nbr in pc.interim_state[patch_id]: + host["interim_state"] = True + + output.append(host) + + self.hosts_lock.release() + + return output + + def any_patch_host_installing(self): + rc = False + + self.hosts_lock.acquire() + for host in self.hosts.values(): + if host.state == constants.PATCH_AGENT_STATE_INSTALLING: + rc = True + break + + self.hosts_lock.release() + + return rc + + def patch_host_install(self, host_ip, force, async_req=False): + msg_info = "" + msg_warning = "" + msg_error = "" + + ip = host_ip + + self.hosts_lock.acquire() + # If not in hosts table, maybe a hostname was used instead + if host_ip not in self.hosts: + try: + ip = utils.gethostbyname(host_ip) + if ip not in self.hosts: + # Translated successfully, but IP isn't in the table. + # Raise an exception to drop out to the failure handling + raise PatchError("Host IP (%s) not in table" % ip) + except Exception: + self.hosts_lock.release() + msg = "Unknown host specified: %s" % host_ip + msg_error += msg + "\n" + LOG.error("Error in host-install: %s", msg) + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + msg = "Running host-install for %s (%s), force=%s, async_req=%s" % (host_ip, ip, force, async_req) + LOG.info(msg) + audit_log_info(msg) + + if self.allow_insvc_patching: + LOG.info("Allowing in-service patching") + force = True + + self.hosts[ip].install_pending = True + self.hosts[ip].install_status = False + self.hosts[ip].install_reject_reason = None + self.hosts_lock.release() + + installreq = PatchMessageAgentInstallReq() + installreq.ip = ip + installreq.force = force + installreq.encode() + self.socket_lock.acquire() + installreq.send(self.sock_out) + self.socket_lock.release() + + if async_req: + # async_req install requested, so return now + msg = "Patch installation request sent to %s." % self.hosts[ip].hostname + msg_info += msg + "\n" + LOG.info("host-install async_req: %s", msg) + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Now we wait, up to ten mins... TODO: Wait on a condition + resp_rx = False + max_time = time.time() + 600 + while time.time() < max_time: + self.hosts_lock.acquire() + if ip not in self.hosts: + # The host aged out while we were waiting + self.hosts_lock.release() + msg = "Agent expired while waiting: %s" % ip + msg_error += msg + "\n" + LOG.error("Error in host-install: %s", msg) + break + + if not self.hosts[ip].install_pending: + # We got a response + resp_rx = True + if self.hosts[ip].install_status: + msg = "Patch installation was successful on %s." % self.hosts[ip].hostname + msg_info += msg + "\n" + LOG.info("host-install: %s", msg) + elif self.hosts[ip].install_reject_reason: + msg = "Patch installation rejected by %s. %s" % ( + self.hosts[ip].hostname, + self.hosts[ip].install_reject_reason) + msg_error += msg + "\n" + LOG.error("Error in host-install: %s", msg) + else: + msg = "Patch installation failed on %s." % self.hosts[ip].hostname + msg_error += msg + "\n" + LOG.error("Error in host-install: %s", msg) + + self.hosts_lock.release() + break + + self.hosts_lock.release() + + time.sleep(0.5) + + if not resp_rx: + msg = "Timeout occurred while waiting response from %s." % ip + msg_error += msg + "\n" + LOG.error("Error in host-install: %s", msg) + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def drop_host(self, host_ip, sync_nbr=True): + msg_info = "" + msg_warning = "" + msg_error = "" + + ip = host_ip + + self.hosts_lock.acquire() + # If not in hosts table, maybe a hostname was used instead + if host_ip not in self.hosts: + try: + # Because the host may be getting dropped due to deletion, + # we may be unable to do a hostname lookup. Instead, we'll + # iterate through the table here. + for host in list(self.hosts): + if host_ip == self.hosts[host].hostname: + ip = host + break + + if ip not in self.hosts: + # Translated successfully, but IP isn't in the table. + # Raise an exception to drop out to the failure handling + raise PatchError("Host IP (%s) not in table" % ip) + except Exception: + self.hosts_lock.release() + msg = "Unknown host specified: %s" % host_ip + msg_error += msg + "\n" + LOG.error("Error in drop-host: %s", msg) + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + msg = "Running drop-host for %s (%s)" % (host_ip, ip) + LOG.info(msg) + audit_log_info(msg) + + del self.hosts[ip] + for patch_id in list(self.interim_state): + if ip in self.interim_state[patch_id]: + self.interim_state[patch_id].remove(ip) + + self.hosts_lock.release() + + if sync_nbr: + sync_msg = PatchMessageDropHostReq() + sync_msg.ip = ip + self.socket_lock.acquire() + sync_msg.send(self.sock_out) + self.socket_lock.release() + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def is_applied(self, patch_ids): + all_applied = True + + self.patch_data_lock.acquire() + + for patch_id in patch_ids: + if patch_id not in self.patch_data.metadata: + all_applied = False + break + + if self.patch_data.metadata[patch_id]["patchstate"] != constants.APPLIED: + all_applied = False + break + + self.patch_data_lock.release() + + return all_applied + + def is_available(self, patch_ids): + all_available = True + + self.patch_data_lock.acquire() + + for patch_id in patch_ids: + if patch_id not in self.patch_data.metadata: + all_available = False + break + + if self.patch_data.metadata[patch_id]["patchstate"] != \ + constants.AVAILABLE: + all_available = False + break + + self.patch_data_lock.release() + + return all_available + + def report_app_dependencies(self, patch_ids, **kwargs): + """ + Handle report of application dependencies + """ + if "app" not in kwargs: + raise PatchInvalidRequest + + appname = kwargs.get("app") + + LOG.info("Handling app dependencies report: app=%s, patch_ids=%s", + appname, ','.join(patch_ids)) + + self.patch_data_lock.acquire() + + if len(patch_ids) == 0: + if appname in self.app_dependencies: + del self.app_dependencies[appname] + else: + self.app_dependencies[appname] = patch_ids + + try: + tmpfile, tmpfname = tempfile.mkstemp( + prefix=app_dependency_basename, + dir=constants.PATCH_STORAGE_DIR) + + os.write(tmpfile, json.dumps(self.app_dependencies)) + os.close(tmpfile) + + os.rename(tmpfname, app_dependency_filename) + except Exception: + LOG.exception("Failed in report_app_dependencies") + raise PatchFail("Internal failure") + finally: + self.patch_data_lock.release() + + return True + + def query_app_dependencies(self): + """ + Query application dependencies + """ + self.patch_data_lock.acquire() + + data = self.app_dependencies + + self.patch_data_lock.release() + + return dict(data) + + +# The wsgiref.simple_server module has an error handler that catches +# and prints any exceptions that occur during the API handling to stderr. +# This means the patching sys.excepthook handler that logs uncaught +# exceptions is never called, and those exceptions are lost. +# +# To get around this, we're subclassing the simple_server.ServerHandler +# in order to replace the handle_error method with a custom one that +# logs the exception instead, and will set a global flag to shutdown +# the server and reset. +# +class MyServerHandler(simple_server.ServerHandler): + def handle_error(self): + LOG.exception('An uncaught exception has occurred:') + if not self.headers_sent: + self.result = self.error_output(self.environ, self.start_response) + self.finish_response() + global keep_running + keep_running = False + + +def get_handler_cls(): + cls = simple_server.WSGIRequestHandler + + # old-style class doesn't support super + class MyHandler(cls, object): + def address_string(self): + # In the future, we could provide a config option to allow reverse DNS lookup + return self.client_address[0] + + # Overload the handle function to use our own MyServerHandler + def handle(self): + """Handle a single HTTP request""" + + self.raw_requestline = self.rfile.readline() + if not self.parse_request(): # An error code has been sent, just exit + return + + handler = MyServerHandler( + self.rfile, self.wfile, self.get_stderr(), self.get_environ() + ) + handler.request_handler = self # pylint: disable=attribute-defined-outside-init + handler.run(self.server.get_app()) + + return MyHandler + + +class PatchControllerApiThread(threading.Thread): + def __init__(self): + threading.Thread.__init__(self) + self.wsgi = None + + def run(self): + host = "127.0.0.1" + port = cfg.api_port + + try: + # In order to support IPv6, server_class.address_family must be + # set to the correct address family. Because the unauthenticated + # API always uses IPv4 for the loopback address, the address_family + # variable cannot be set directly in the WSGIServer class, so a + # local subclass needs to be created for the call to make_server, + # where the correct address_family can be specified. + class server_class(simple_server.WSGIServer): + pass + + server_class.address_family = socket.AF_INET + self.wsgi = simple_server.make_server( + host, port, + app.VersionSelectorApplication(), + server_class=server_class, + handler_class=get_handler_cls()) + + self.wsgi.socket.settimeout(api_socket_timeout) + global keep_running + while keep_running: + self.wsgi.handle_request() + + # Call garbage collect after wsgi request is handled, + # to ensure any open file handles are closed in the case + # of an upload. + gc.collect() + except Exception: + # Log all exceptions + LOG.exception("Error occurred during request processing") + + global thread_death + thread_death.set() + + def kill(self): + # Must run from other thread + if self.wsgi is not None: + self.wsgi.shutdown() + + +class PatchControllerAuthApiThread(threading.Thread): + def __init__(self): + threading.Thread.__init__(self) + # LOG.info ("Initializing Authenticated API thread") + self.wsgi = None + + def run(self): + host = CONF.auth_api_bind_ip + port = CONF.auth_api_port + if host is None: + host = utils.get_versioned_address_all() + try: + # Can only launch authenticated server post-config + while not os.path.exists('/etc/platform/.initial_config_complete'): + time.sleep(5) + + # In order to support IPv6, server_class.address_family must be + # set to the correct address family. Because the unauthenticated + # API always uses IPv4 for the loopback address, the address_family + # variable cannot be set directly in the WSGIServer class, so a + # local subclass needs to be created for the call to make_server, + # where the correct address_family can be specified. + class server_class(simple_server.WSGIServer): + pass + + server_class.address_family = utils.get_management_family() + self.wsgi = simple_server.make_server( + host, port, + auth_app.VersionSelectorApplication(), + server_class=server_class, + handler_class=get_handler_cls()) + + # self.wsgi.serve_forever() + self.wsgi.socket.settimeout(api_socket_timeout) + + global keep_running + while keep_running: + self.wsgi.handle_request() + + # Call garbage collect after wsgi request is handled, + # to ensure any open file handles are closed in the case + # of an upload. + gc.collect() + except Exception: + # Log all exceptions + LOG.exception("Authorized API failure: Error occurred during request processing") + + def kill(self): + # Must run from other thread + if self.wsgi is not None: + self.wsgi.shutdown() + + +class PatchControllerMainThread(threading.Thread): + def __init__(self): + threading.Thread.__init__(self) + # LOG.info ("Initializing Main thread") + + def run(self): + global pc + global thread_death + + # LOG.info ("In Main thread") + + try: + sock_in = pc.setup_socket() + + while sock_in is None: + # Check every thirty seconds? + # Once we've got a conf file, tied into packstack, + # we'll get restarted when the file is updated, + # and this should be unnecessary. + time.sleep(30) + sock_in = pc.setup_socket() + + # Ok, now we've got our socket. Let's start with a hello! + pc.socket_lock.acquire() + + hello = PatchMessageHello() + hello.send(pc.sock_out) + + hello_agent = PatchMessageHelloAgent() + hello_agent.send(pc.sock_out) + + pc.socket_lock.release() + + # Send hello every thirty seconds + hello_timeout = time.time() + 30.0 + remaining = 30 + + agent_query_conns = [] + + while True: + # Check to see if any other thread has died + if thread_death.is_set(): + LOG.info("Detected thread death. Terminating") + return + + # Check for in-service patch restart flag + if os.path.exists(insvc_patch_restart_controller): + LOG.info("In-service patch restart flag detected. Exiting.") + global keep_running + keep_running = False + os.remove(insvc_patch_restart_controller) + return + + inputs = [pc.sock_in] + agent_query_conns + outputs = [] + + # LOG.info("Running select, remaining=%d", remaining) + rlist, wlist, xlist = select.select(inputs, outputs, inputs, remaining) + + if (len(rlist) == 0 and + len(wlist) == 0 and + len(xlist) == 0): + # Timeout hit + pc.audit_socket() + + # LOG.info("Checking sockets") + for s in rlist: + data = '' + addr = None + msg = None + + if s == pc.sock_in: + # Receive from UDP + pc.socket_lock.acquire() + data, addr = s.recvfrom(1024) + pc.socket_lock.release() + else: + # Receive from TCP + while True: + try: + packet = s.recv(1024) + except socket.error: + LOG.exception("Socket error on recv") + data = '' + break + + if packet: + data += packet.decode() + + if data == '': + break + try: + json.loads(data) + break + except ValueError: + # Message is incomplete + continue + else: + LOG.info('End of TCP message received') + break + + if data == '': + # Connection dropped + agent_query_conns.remove(s) + s.close() + continue + + # Get the TCP endpoint address + addr = s.getpeername() + + msgdata = json.loads(data) + + # For now, discard any messages that are not msgversion==1 + if 'msgversion' in msgdata and msgdata['msgversion'] != 1: + continue + + if 'msgtype' in msgdata: + if msgdata['msgtype'] == messages.PATCHMSG_HELLO: + msg = PatchMessageHello() + elif msgdata['msgtype'] == messages.PATCHMSG_HELLO_ACK: + msg = PatchMessageHelloAck() + elif msgdata['msgtype'] == messages.PATCHMSG_SYNC_REQ: + msg = PatchMessageSyncReq() + elif msgdata['msgtype'] == messages.PATCHMSG_SYNC_COMPLETE: + msg = PatchMessageSyncComplete() + elif msgdata['msgtype'] == messages.PATCHMSG_HELLO_AGENT_ACK: + msg = PatchMessageHelloAgentAck() + elif msgdata['msgtype'] == messages.PATCHMSG_QUERY_DETAILED_RESP: + msg = PatchMessageQueryDetailedResp() + elif msgdata['msgtype'] == messages.PATCHMSG_AGENT_INSTALL_RESP: + msg = PatchMessageAgentInstallResp() + elif msgdata['msgtype'] == messages.PATCHMSG_DROP_HOST_REQ: + msg = PatchMessageDropHostReq() + + if msg is None: + msg = messages.PatchMessage() + + msg.decode(msgdata) + if s == pc.sock_in: + msg.handle(pc.sock_out, addr) + else: + msg.handle(s, addr) + + # We can drop the connection after a query response + if msg.msgtype == messages.PATCHMSG_QUERY_DETAILED_RESP and s != pc.sock_in: + agent_query_conns.remove(s) + s.shutdown(socket.SHUT_RDWR) + s.close() + + while len(stale_hosts) > 0 and len(agent_query_conns) <= 5: + ip = stale_hosts.pop() + try: + agent_sock = socket.create_connection((ip, cfg.agent_port)) + query = PatchMessageQueryDetailed() + query.send(agent_sock) + agent_query_conns.append(agent_sock) + except Exception: + # Put it back on the list + stale_hosts.append(ip) + + remaining = int(hello_timeout - time.time()) + if remaining <= 0 or remaining > 30: + hello_timeout = time.time() + 30.0 + remaining = 30 + + pc.socket_lock.acquire() + + hello = PatchMessageHello() + hello.send(pc.sock_out) + + hello_agent = PatchMessageHelloAgent() + hello_agent.send(pc.sock_out) + + pc.socket_lock.release() + + # Age out neighbours + pc.controller_neighbours_lock.acquire() + nbrs = list(pc.controller_neighbours) + for n in nbrs: + # Age out controllers after 2 minutes + if pc.controller_neighbours[n].get_age() >= 120: + LOG.info("Aging out controller %s from table", n) + del pc.controller_neighbours[n] + pc.controller_neighbours_lock.release() + + pc.hosts_lock.acquire() + nbrs = list(pc.hosts) + for n in nbrs: + # Age out hosts after 1 hour + if pc.hosts[n].get_age() >= 3600: + LOG.info("Aging out host %s from table", n) + del pc.hosts[n] + for patch_id in list(pc.interim_state): + if n in pc.interim_state[patch_id]: + pc.interim_state[patch_id].remove(n) + + pc.hosts_lock.release() + except Exception: + # Log all exceptions + LOG.exception("Error occurred during request processing") + thread_death.set() + + +def main(): + configure_logging() + + cfg.read_config() + + # daemon.pidlockfile.write_pid_to_pidfile(pidfile_path) + + global thread_death + thread_death = threading.Event() + + # Set the TMPDIR environment variable to /scratch so that any modules + # that create directories with tempfile will not use /tmp + os.environ['TMPDIR'] = '/scratch' + + global pc + pc = PatchController() + + LOG.info("launching") + api_thread = PatchControllerApiThread() + auth_api_thread = PatchControllerAuthApiThread() + main_thread = PatchControllerMainThread() + + api_thread.start() + auth_api_thread.start() + main_thread.start() + + thread_death.wait() + global keep_running + keep_running = False + + api_thread.join() + auth_api_thread.join() + main_thread.join() diff --git a/sw-patch/cgcs-patch/cgcs_patch/patch_functions.py b/sw-patch/cgcs-patch/cgcs_patch/patch_functions.py new file mode 100644 index 00000000..add102e8 --- /dev/null +++ b/sw-patch/cgcs-patch/cgcs_patch/patch_functions.py @@ -0,0 +1,1440 @@ +""" +Copyright (c) 2014-2019 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import getopt +import glob +import hashlib +import logging +import os +import platform +import re +import shutil +import subprocess +import sys +import tarfile +import tempfile +from lxml import etree as ElementTree +from xml.dom import minidom + +from cgcs_patch.patch_verify import verify_files +from cgcs_patch.patch_verify import cert_type_all +from cgcs_patch.patch_signing import sign_files +from cgcs_patch.exceptions import MetadataFail +from cgcs_patch.exceptions import PatchFail +from cgcs_patch.exceptions import PatchValidationFailure +from cgcs_patch.exceptions import PatchMismatchFailure + +import cgcs_patch.constants as constants +import rpm + +try: + # The tsconfig module is only available at runtime + from tsconfig.tsconfig import SW_VERSION +except Exception: + SW_VERSION = "unknown" + +# Constants +patch_dir = constants.PATCH_STORAGE_DIR +avail_dir = "%s/metadata/available" % patch_dir +applied_dir = "%s/metadata/applied" % patch_dir +committed_dir = "%s/metadata/committed" % patch_dir +semantics_dir = "%s/semantics" % patch_dir + +repo_root_dir = "/var/www/pages/updates" +repo_dir = {SW_VERSION: "%s/rel-%s" % (repo_root_dir, SW_VERSION)} + +root_package_dir = "%s/packages" % patch_dir +package_dir = {SW_VERSION: "%s/%s" % (root_package_dir, SW_VERSION)} + +logfile = "/var/log/patching.log" +apilogfile = "/var/log/patching-api.log" + +LOG = logging.getLogger('main_logger') +auditLOG = logging.getLogger('audit_logger') +audit_log_msg_prefix = 'User: sysadmin/admin Action: ' + +detached_signature_file = "signature.v2" + + +def handle_exception(exc_type, exc_value, exc_traceback): + """ + Exception handler to log any uncaught exceptions + """ + LOG.error("Uncaught exception", + exc_info=(exc_type, exc_value, exc_traceback)) + sys.__excepthook__(exc_type, exc_value, exc_traceback) + + +def configure_logging(logtofile=True, level=logging.INFO, dnf_log=False): + if logtofile: + my_exec = os.path.basename(sys.argv[0]) + + log_format = '%(asctime)s: ' \ + + my_exec + '[%(process)s]: ' \ + + '%(filename)s(%(lineno)s): ' \ + + '%(levelname)s: %(message)s' + + formatter = logging.Formatter(log_format, datefmt="%FT%T") + + LOG.setLevel(level) + main_log_handler = logging.FileHandler(logfile) + main_log_handler.setFormatter(formatter) + LOG.addHandler(main_log_handler) + + if dnf_log: + dnf_logger = logging.getLogger('dnf') + dnf_logger.addHandler(main_log_handler) + + try: + os.chmod(logfile, 0o640) + except Exception: + pass + + auditLOG.setLevel(level) + api_log_handler = logging.FileHandler(apilogfile) + api_log_handler.setFormatter(formatter) + auditLOG.addHandler(api_log_handler) + try: + os.chmod(apilogfile, 0o640) + except Exception: + pass + + # Log uncaught exceptions to file + sys.excepthook = handle_exception + else: + logging.basicConfig(level=level) + + +def audit_log_info(msg=''): + msg = audit_log_msg_prefix + msg + auditLOG.info(msg) + + +def get_md5(path): + """ + Utility function for generating the md5sum of a file + :param path: Path to file + """ + md5 = hashlib.md5() + block_size = 8192 + with open(path, 'rb') as f: + for chunk in iter(lambda: f.read(block_size), b''): + md5.update(chunk) + return int(md5.hexdigest(), 16) + + +def add_text_tag_to_xml(parent, + name, + text): + """ + Utility function for adding a text tag to an XML object + :param parent: Parent element + :param name: Element name + :param text: Text value + :return:The created element + """ + tag = ElementTree.SubElement(parent, name) + tag.text = text + return tag + + +def write_xml_file(top, + fname): + # Generate the file, in a readable format if possible + outfile = open(fname, 'w') + rough_xml = ElementTree.tostring(top) + if platform.python_version() == "2.7.2": + # The 2.7.2 toprettyxml() function unnecessarily indents + # childless tags, adding whitespace. In the case of the + # yum comps.xml file, it makes the file unusable, so just + # write the rough xml + outfile.write(rough_xml) + else: + outfile.write(minidom.parseString(rough_xml).toprettyxml(indent=" ")) + + +def parse_rpm_filename(filename): + + # Drop the extension + basename = os.path.splitext(os.path.basename(filename))[0] + + # RPM name format is: + # [:]--. + # + pattern = re.compile(r'((([^:]):)?)(.*)-([^-]+)-(.*)\.([^\.]*)$') + + m = pattern.match(basename) + + if m is None: + raise ValueError("Filename does not match expected RPM format: %s" % basename) + + epoch = m.group(3) + pkgname = m.group(4) + version = m.group(5) + release = m.group(6) + arch = m.group(7) + + return (pkgname, arch, PackageVersion(epoch, version, release)) + + +def parse_pkgver(pkgver): + # Version format is: + # [:]- + # + pattern = re.compile(r'((([^:]):)?)([^-]+)((-(.*))?)$') + + m = pattern.match(pkgver) + + if m is None: + raise ValueError("Package version does not match expected format: %s" % pkgver) + + epoch = m.group(3) + version = m.group(4) + release = m.group(7) + + return (epoch, version, release) + + +def get_release_from_patch(patchfile): + rel = "" + try: + cmd = "tar xf %s -O metadata.tar | tar x -O" % patchfile + metadata_str = subprocess.check_output(cmd, shell=True) + root = ElementTree.fromstring(metadata_str) + # Extract release version + rel = root.findtext('sw_version') + except subprocess.CalledProcessError as e: + LOG.error("Failed to run tar command") + LOG.error("Command output: %s", e.output) + raise e + except Exception as e: + print("Failed to parse patch software version") + raise e + return rel + + +class PackageVersion(object): + """ + The PackageVersion class provides a structure for RPM version information, + along with suport for comparison operators. + """ + def __init__(self, epoch, version, release): + self.epoch = epoch + self.version = version + self.release = release + + def __le__(self, other): + """ + This function is called by comparison operators to compare + two versions. The rpm.labelCompare() function takes two versions, + specified in a list structure, and returns -1, 0, or 1. + """ + out = rpm.labelCompare((self.epoch, self.version, self.release), + (other.epoch, other.version, other.release)) + if out == 1: + return False + return True + + def __eq__(self, other): + out = rpm.labelCompare((self.epoch, self.version, self.release), + (other.epoch, other.version, other.release)) + if out == 0: + return True + return False + + def __ne__(self, other): + out = rpm.labelCompare((self.epoch, self.version, self.release), + (other.epoch, other.version, other.release)) + if out == 0: + return False + return True + + def __gt__(self, other): + out = rpm.labelCompare((self.epoch, self.version, self.release), + (other.epoch, other.version, other.release)) + if out == 1: + return True + return False + + def __lt__(self, other): + out = rpm.labelCompare((self.epoch, self.version, self.release), + (other.epoch, other.version, other.release)) + if out == -1: + return True + return False + + def __ge__(self, other): + out = rpm.labelCompare((self.epoch, self.version, self.release), + (other.epoch, other.version, other.release)) + if out == -1: + return False + return True + + def __str__(self): + """ + This function is called by str() and print to compute the + informal string representation of a PackageVersion object. + """ + prefix = "" + if self.epoch is not None and self.epoch != '': + # Prefix the version with epoch, if specified + prefix = "%s:" % self.epoch + + return "%s%s-%s" % (prefix, self.version, self.release) + + def __hash__(self): + return hash(self.__str__()) + + def generate_rpm_filename(self, pkgname, arch): + prefix = "" + if self.epoch is not None and self.epoch != '': + # Prefix the version with epoch, if specified + prefix = "%s:" % self.epoch + + return "%s%s-%s-%s.%s.rpm" % (prefix, pkgname, self.version, self.release, arch) + + +class BasePackageData(object): + """ + Information about the base package data provided by the load + """ + def __init__(self): + self.pkgs = {} + self.loaddirs() + + def loaddirs(self): + # Load up available package info + base_dir = "/var/www/pages/feed" + if not os.path.exists(base_dir): + # Return, since this could be running off-box + return + + # Look for release dirs + for reldir in glob.glob("%s/rel-*" % base_dir): + pattern = re.compile("%s/rel-(.*)" % base_dir) + m = pattern.match(reldir) + sw_rel = m.group(1) + + if sw_rel in self.pkgs: + # We've already parsed this dir once + continue + + self.pkgs[sw_rel] = {} + for _root, _dirs, files in os.walk("%s/Packages" % reldir): # pylint: disable=unused-variable + for name in files: + if name.endswith(".rpm"): + try: + pkgname, arch, pkgver = parse_rpm_filename(name) + except ValueError as e: + raise e + + if pkgname not in self.pkgs[sw_rel]: + self.pkgs[sw_rel][pkgname] = {} + self.pkgs[sw_rel][pkgname][arch] = pkgver + + # Clean up deleted data + for sw_rel in self.pkgs: + if not os.path.exists("%s/rel-%s" % (base_dir, sw_rel)): + del self.pkgs[sw_rel] + + def check_release(self, sw_rel): + return (sw_rel in self.pkgs) + + def find_version(self, sw_rel, pkgname, arch): + if sw_rel not in self.pkgs or \ + pkgname not in self.pkgs[sw_rel] or \ + arch not in self.pkgs[sw_rel][pkgname]: + return None + + return self.pkgs[sw_rel][pkgname][arch] + + +class PatchData(object): + """ + Aggregated patch data + """ + def __init__(self): + # + # The groups dict provides information about targetted (new) packages, + # identifying the software group in which to include the package. + # This allows the patch agent to identify new packages to install + # (or erase) as appropriate. + # This dict is nested as follows: + # [ patch_sw_version ] - Release associated with the patch + # [ group/ptype ] - Group (personality) in which the pkg belongs + # [ patch_id ] + # [ package ] + # + self.groups = {} + + # + # The metadata dict stores all metadata associated with a patch. + # This dict is keyed on patch_id, with metadata for each patch stored + # in a nested dict. (See parse_metadata method for more info) + # + self.metadata = {} + + # + # The contents dict stores the lists of RPMs provided by each patch, + # indexed by patch_id. + # + self.contents = {} + + # + # The content_versions dict provides a simple list of packages and their + # versions for each patch, used by the patch controller in determining + # patch states. + # content_versions[patch_id][pkgname] = "%s-%s" % (pkgver.version, pkgver.release) + # + self.content_versions = {} + + # + # The package_versions dict provides a mapping of packages to the patch_id, + # including the package arch. + # [ patch_sw_version ] + # [ pkgname ] + # [ arch ] + # [ pkgver ] + # -> patch_id + self.package_versions = {} + + # + # The semantics dict stores the lists of semantic actions provided by each patch, + # indexed by patch_id. + # + self.semantics = {} + + def add_patch(self, patch_id, new_patch): + # We can just use "update" on these dicts because they are indexed by patch_id + self.metadata.update(new_patch.metadata) + self.contents.update(new_patch.contents) + self.content_versions.update(new_patch.content_versions) + self.semantics.update(new_patch.semantics) + + # Need to recursively update package_version and keys dicts + for patch_sw_version in list(new_patch.package_versions): + if patch_sw_version not in self.package_versions: + self.package_versions[patch_sw_version] = {} + for pkgname in list(new_patch.package_versions[patch_sw_version]): + if pkgname not in self.package_versions[patch_sw_version]: + self.package_versions[patch_sw_version][pkgname] = {} + for arch in list(new_patch.package_versions[patch_sw_version][pkgname]): + if arch not in self.package_versions[patch_sw_version][pkgname]: + self.package_versions[patch_sw_version][pkgname][arch] = {} + for pkgver in list(new_patch.package_versions[patch_sw_version][pkgname][arch]): + self.package_versions[patch_sw_version][pkgname][arch][pkgver] = patch_id + + for patch_sw_version in list(new_patch.groups): + if patch_sw_version not in self.groups: + self.groups[patch_sw_version] = {} + for ptype in list(new_patch.groups[patch_sw_version]): + if ptype not in self.groups[patch_sw_version]: + self.groups[patch_sw_version][ptype] = {} + for patch_id in list(new_patch.groups[patch_sw_version][ptype]): + if patch_id not in self.groups[patch_sw_version][ptype]: + self.groups[patch_sw_version][ptype][patch_id] = {} + self.groups[patch_sw_version][ptype][patch_id].update( + new_patch.groups[patch_sw_version][ptype][patch_id]) + + def update_patch(self, updated_patch): + for patch_id in list(updated_patch.metadata): + # Update all fields except repostate + cur_repostate = self.metadata[patch_id]['repostate'] + self.metadata[patch_id].update(updated_patch.metadata[patch_id]) + self.metadata[patch_id]['repostate'] = cur_repostate + + def delete_patch(self, patch_id): + for patch_sw_version in list(self.package_versions): + for pkgname in list(self.package_versions[patch_sw_version]): + for arch in list(self.package_versions[patch_sw_version][pkgname]): + for pkgver in list(self.package_versions[patch_sw_version][pkgname][arch]): + if self.package_versions[patch_sw_version][pkgname][arch][pkgver] == patch_id: + del self.package_versions[patch_sw_version][pkgname][arch][pkgver] + if len(self.package_versions[patch_sw_version][pkgname][arch]) == 0: + del self.package_versions[patch_sw_version][pkgname][arch] + if len(self.package_versions[patch_sw_version][pkgname]) == 0: + del self.package_versions[patch_sw_version][pkgname] + if len(self.package_versions[patch_sw_version]) == 0: + del self.package_versions[patch_sw_version] + + for patch_sw_version in list(self.groups): + for ptype in list(self.groups[patch_sw_version]): + if patch_id in self.groups[patch_sw_version][ptype]: + del self.groups[patch_sw_version][ptype][patch_id] + + del self.content_versions[patch_id] + del self.contents[patch_id] + del self.semantics[patch_id] + del self.metadata[patch_id] + + @staticmethod + def modify_metadata_text(filename, + key, + value): + """ + Open an xml file, find first element matching 'key' and replace the text with 'value' + """ + new_filename = "%s.new" % filename + tree = ElementTree.parse(filename) + + # Prevent a proliferation of carriage returns when we write this XML back out to file. + for e in tree.getiterator(): + if e.text is not None: + e.text = e.text.rstrip() + if e.tail is not None: + e.tail = e.tail.rstrip() + + root = tree.getroot() + + # Make the substitution + e = root.find(key) + if e is None: + msg = "modify_metadata_text: failed to find tag '%s'" % key + LOG.error(msg) + raise PatchValidationFailure(msg) + e.text = value + + # write the modified file + outfile = open(new_filename, 'w') + rough_xml = ElementTree.tostring(root, 'utf-8') + if platform.python_version() == "2.7.2": + # The 2.7.2 toprettyxml() function unnecessarily indents + # childless tags, adding whitespace. In the case of the + # yum comps.xml file, it makes the file unusable, so just + # write the rough xml + outfile.write(rough_xml) + else: + outfile.write(minidom.parseString(rough_xml).toprettyxml(indent=" ")) + outfile.close() + os.rename(new_filename, filename) + + def parse_metadata(self, + filename, + repostate=None): + """ + Parse an individual patch metadata XML file + :param filename: XML file + :param repostate: Indicates Applied, Available, or Committed + :return: Patch ID + """ + tree = ElementTree.parse(filename) + root = tree.getroot() + + # + # + # PATCH_0001 + # Brief description + # Longer description + # + # + # Dev + # + # + # + # pkgA + # pkgB + # + # + # pkgB + # + # + # + + patch_id = root.findtext("id") + if patch_id is None: + LOG.error("Patch metadata contains no id tag") + return None + + self.metadata[patch_id] = {} + + self.metadata[patch_id]["repostate"] = repostate + + # Patch state is unknown at this point + self.metadata[patch_id]["patchstate"] = "n/a" + + self.metadata[patch_id]["sw_version"] = "unknown" + + for key in ["status", + "unremovable", + "sw_version", + "summary", + "description", + "install_instructions", + "warnings", + "apply_active_release_only"]: + value = root.findtext(key) + if value is not None: + self.metadata[patch_id][key] = value + + # Default reboot_required to Y + rr_value = root.findtext("reboot_required") + if rr_value is None or rr_value != "N": + self.metadata[patch_id]["reboot_required"] = "Y" + else: + self.metadata[patch_id]["reboot_required"] = "N" + + patch_sw_version = self.metadata[patch_id]["sw_version"] + global package_dir + if patch_sw_version not in package_dir: + package_dir[patch_sw_version] = "%s/%s" % (root_package_dir, patch_sw_version) + repo_dir[patch_sw_version] = "%s/rel-%s" % (repo_root_dir, patch_sw_version) + + # Specifying personality for given packages is optional, + # intended to allow a patch to include a new package. + # For each configured personality type, create a software group. + for personality in root.findall("personality"): + ptype = personality.attrib["type"] + tag = "personality-%s" % ptype + self.metadata[patch_id][tag] = list() + for pkg in personality.findall("package"): + self.metadata[patch_id][tag].append(pkg.text) + if patch_sw_version not in self.groups: + self.groups[patch_sw_version] = {} + if ptype not in self.groups[patch_sw_version]: + self.groups[patch_sw_version][ptype] = {} + if patch_id not in self.groups[patch_sw_version][ptype]: + self.groups[patch_sw_version][ptype][patch_id] = {} + self.groups[patch_sw_version][ptype][patch_id][pkg.text] = True + + self.metadata[patch_id]["requires"] = [] + for req in root.findall("requires"): + for req_patch in req.findall("req_patch_id"): + self.metadata[patch_id]["requires"].append(req_patch.text) + + self.contents[patch_id] = list() + self.content_versions[patch_id] = {} + + for content in root.findall("contents"): + for rpmname in content.findall("rpm"): + try: + pkgname, arch, pkgver = parse_rpm_filename(rpmname.text) + except ValueError as e: + LOG.exception(e) + return None + + self.contents[patch_id].append(rpmname.text) + self.content_versions[patch_id][pkgname] = "%s-%s" % (pkgver.version, pkgver.release) + + if patch_sw_version not in self.package_versions: + self.package_versions[patch_sw_version] = {} + if pkgname not in self.package_versions[patch_sw_version]: + self.package_versions[patch_sw_version][pkgname] = {} + if arch not in self.package_versions[patch_sw_version][pkgname]: + self.package_versions[patch_sw_version][pkgname][arch] = {} + + self.package_versions[patch_sw_version][pkgname][arch][pkgver] = patch_id + + self.semantics[patch_id] = list() + for semantics in root.findall("semantics"): + for action in semantics.findall("action"): + self.semantics[patch_id].append(action.text) + + return patch_id + + def find_patch_with_pkgver(self, sw_ver, pkgname, arch, pkgver): + if sw_ver not in self.package_versions or \ + pkgname not in self.package_versions[sw_ver] or \ + arch not in self.package_versions[sw_ver][pkgname] or \ + pkgver not in self.package_versions[sw_ver][pkgname][arch]: + return None + + return self.package_versions[sw_ver][pkgname][arch][pkgver] + + def load_all_metadata(self, + loaddir, + repostate=None): + """ + Parse all metadata files in the specified dir + :return: + """ + for fname in glob.glob("%s/*.xml" % loaddir): + self.parse_metadata(fname, repostate) + + def load_all(self): + # Reset the data + self.__init__() + self.load_all_metadata(applied_dir, repostate=constants.APPLIED) + self.load_all_metadata(avail_dir, repostate=constants.AVAILABLE) + self.load_all_metadata(committed_dir, repostate=constants.COMMITTED) + + def gen_release_groups_xml(self, sw_version, output_dir=None): + """ + Generate the groups configuration file for the patching repo + """ + if output_dir is None: + output_dir = repo_dir[sw_version] + + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + fname = "%s/comps.xml" % output_dir + top = ElementTree.Element('comps') + if sw_version in self.groups: + for groupname in sorted(list(self.groups[sw_version])): + if self.groups[sw_version][groupname]: + group = ElementTree.SubElement(top, 'group') + + add_text_tag_to_xml(group, 'id', + "updates-%s" % groupname) + add_text_tag_to_xml(group, 'default', + "false") + add_text_tag_to_xml(group, 'uservisible', + "true") + add_text_tag_to_xml(group, 'display_order', + "1024") + add_text_tag_to_xml(group, 'name', + "updates-%s" % groupname) + add_text_tag_to_xml(group, 'description', + "Patches for %s" % groupname) + + package_element = ElementTree.SubElement(group, + 'packagelist') + + for patch_id in sorted(self.groups[sw_version][groupname]): + if self.metadata[patch_id]["repostate"] == constants.APPLIED \ + or self.metadata[patch_id]["repostate"] == constants.COMMITTED: + for pkg in sorted(self.groups[sw_version][groupname][patch_id]): + tag = ElementTree.SubElement(package_element, + 'packagereq', + type="mandatory") + tag.text = pkg + + write_xml_file(top, fname) + + def gen_groups_xml(self): + for ver in repo_dir: + self.gen_release_groups_xml(ver) + + def query_line(self, + patch_id, + index): + if index is None: + return None + + if index == "contents": + return self.contents[patch_id] + + if index not in self.metadata[patch_id]: + return None + + value = self.metadata[patch_id][index] + return value + + +class PatchMetadata(object): + """ + Creating metadata for a single patch + """ + def __init__(self): + self.id = None + self.sw_version = None + self.summary = None + self.description = None + self.install_instructions = None + self.warnings = None + self.status = None + self.unremovable = None + self.reboot_required = None + self.apply_active_release_only = None + self.requires = [] + self.groups = {} + self.contents = {} + self.semantics = [] + + def add_package(self, + groupname, + pkg): + """ + Add a package to a particular group + :param groupname: Yum software group, eg. "controller" + :param pkg: Name of the package + :return: + """ + if groupname not in self.groups: + self.groups[groupname] = {} + + self.groups[groupname][pkg] = True + + def add_rpm(self, + fname): + """ + Add an RPM to the patch + :param fname: RPM filename + :return: + """ + rpmname = os.path.basename(fname) + self.contents[rpmname] = True + + def add_semantic(self, + action): + """ + Add a semantic check to the patch + :param action: semantic action + :return: + """ + self.semantics.append(action) + + def gen_xml(self, + fname="metadata.xml"): + """ + Generate patch metadata XML file + :param fname: Path to output file + :return: + """ + top = ElementTree.Element('patch') + + add_text_tag_to_xml(top, 'id', + self.id) + add_text_tag_to_xml(top, 'sw_version', + self.sw_version) + add_text_tag_to_xml(top, 'summary', + self.summary) + add_text_tag_to_xml(top, 'description', + self.description) + add_text_tag_to_xml(top, 'install_instructions', + self.install_instructions) + add_text_tag_to_xml(top, 'warnings', + self.warnings) + add_text_tag_to_xml(top, 'status', + self.status) + add_text_tag_to_xml(top, 'unremovable', + self.unremovable) + add_text_tag_to_xml(top, 'reboot_required', + self.reboot_required) + add_text_tag_to_xml(top, 'apply_active_release_only', + self.apply_active_release_only) + + for groupname in sorted(list(self.groups)): + if self.groups[groupname]: + group = ElementTree.SubElement(top, + 'personality', + type=groupname) + + for pkg in sorted(self.groups[groupname]): + add_text_tag_to_xml(group, 'package', pkg) + + content = ElementTree.SubElement(top, 'contents') + for rpmname in sorted(list(self.contents)): + add_text_tag_to_xml(content, 'rpm', rpmname) + + req = ElementTree.SubElement(top, 'requires') + for req_patch in sorted(self.requires): + add_text_tag_to_xml(req, 'req_patch_id', req_patch) + + semantics = ElementTree.SubElement(top, 'semantics') + for action in sorted(self.semantics): + add_text_tag_to_xml(semantics, 'action', action) + + write_xml_file(top, fname) + + +class PatchFile(object): + """ + Patch file + """ + def __init__(self): + self.meta = PatchMetadata() + self.rpmlist = {} + self.semantics = {} + + def add_rpm(self, + fname, + personality=None): + """ + Add an RPM to the patch + :param fname: Path to RPM + :param personality: Optional: Node type to which + the package belongs. Can be a + string or a list of strings. + :return: + """ + # Add the RPM to the metadata + self.meta.add_rpm(fname) + + # Add the RPM to the patch + self.rpmlist[os.path.abspath(fname)] = True + + if personality is not None: + # Get the package name from the RPM itself, + # and add it to the appropriate group(s) + pkgname = subprocess.check_output(["rpm", + "-qp", + "--queryformat", + "%{NAME}", + "--nosignature", + fname]) + if isinstance(personality, list): + for p in personality: + self.meta.add_package(p, pkgname) + elif isinstance(personality, str): + self.meta.add_package(personality, pkgname) + + def add_semantic(self, + action, + fname): + """ + Add a semantic check to the patch + :param action: Semantic check type + :param fname: Path to semantic check + :return: + """ + # Add the semantic to the metadata + self.meta.add_semantic(action) + + self.semantics[action] = os.path.abspath(fname) + + def gen_patch(self, outdir): + """ + Generate the patch file, named PATCHID.patch + :param outdir: Output directory for the patch + :return: + """ + if self.meta.sw_version is None or self.meta.sw_version == '': + raise MetadataFail("The release version must be specified in the sw_version field") + + if not self.rpmlist: + raise MetadataFail("Cannot generate empty patch") + + patchfile = "%s/%s.patch" % (outdir, self.meta.id) + + # Create a temporary working directory + tmpdir = tempfile.mkdtemp(prefix="patch_") + + # Save the current directory, so we can chdir back after + orig_wd = os.getcwd() + + # Change to the tmpdir + os.chdir(tmpdir) + + # Copy RPM files to tmpdir + for rpmfile in list(self.rpmlist): + shutil.copy(rpmfile, tmpdir) + + # add file signatures to RPMs + try: + subprocess.check_call(["sign-rpms", "-d", tmpdir]) + except subprocess.CalledProcessError as e: + print("Failed to to add file signatures to RPMs. Call to sign-rpms process returned non-zero exit status %i" % e.returncode) + os.chdir(orig_wd) + shutil.rmtree(tmpdir) + raise SystemExit(e.returncode) + + # generate tar file + tar = tarfile.open("software.tar", "w") + for rpmfile in list(self.rpmlist): + tar.add(os.path.basename(rpmfile)) + tar.close() + + # Copy semantics to tmpdir, if any + if len(self.semantics) > 0: + tar = tarfile.open("semantics.tar", "w") + for action in list(self.semantics): + os.mkdir(action, 0o755) + sname = os.path.join(action, self.meta.id) + shutil.copy(self.semantics[action], sname) + tar.add(sname) + tar.close() + + # Generate the metadata xml file + self.meta.gen_xml("metadata.xml") + + # assemble the patch + PatchFile.write_patch(patchfile) + + # Change back to original working dir + os.chdir(orig_wd) + + shutil.rmtree(tmpdir) + + print("Patch is %s" % patchfile) + + @staticmethod + def write_patch(patchfile, cert_type=None): + # Write the patch file. Assumes we are in a directory containing metadata.tar, and software.tar. + + # Generate the metadata tarfile + tar = tarfile.open("metadata.tar", "w") + tar.add("metadata.xml") + tar.close() + + filelist = ["metadata.tar", "software.tar"] + if os.path.exists("semantics.tar"): + filelist.append("semantics.tar") + + # Generate the signature file + sig = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + for f in filelist: + sig ^= get_md5(f) + + sigfile = open("signature", "w") + sigfile.write("%x" % sig) + sigfile.close() + + # Generate the detached signature + # + # Note: if cert_type requests a formal signature, but the signing key + # is not found, we'll instead sign with the 'dev' key and + # need_resign_with_formal is set to True. + need_resign_with_formal = sign_files( + filelist, + detached_signature_file, + cert_type=cert_type) + + # Create the patch + tar = tarfile.open(patchfile, "w:gz") + for f in filelist: + tar.add(f) + tar.add("signature") + tar.add(detached_signature_file) + tar.close() + + if need_resign_with_formal: + try: + # Try to ensure "sign_patch_formal.sh" will be in our PATH + if 'MY_REPO' in os.environ: + os.environ['PATH'] += os.pathsep + os.environ['MY_REPO'] + "/build-tools" + if 'MY_PATCH_REPO' in os.environ: + os.environ['PATH'] += os.pathsep + os.environ['MY_PATCH_REPO'] + "/build-tools" + + # Note: This can fail if the user is not authorized to sign with the formal key. + subprocess.check_call(["sign_patch_formal.sh", patchfile]) + except subprocess.CalledProcessError as e: + print("Failed to sign official patch. Call to sign_patch_formal.sh process returned non-zero exit status %i" % e.returncode) + raise SystemExit(e.returncode) + + @staticmethod + def read_patch(path, metadata_only=False, cert_type=None): + # We want to enable signature checking by default + # Note: cert_type=None is required if we are to enforce 'no dev patches on a formal load' rule. + + # Open the patch file and extract the contents to the current dir + tar = tarfile.open(path, "r:gz") + + filelist = ["metadata.tar", "software.tar"] + if "semantics.tar" in [f.name for f in tar.getmembers()]: + filelist.append("semantics.tar") + + for f in filelist: + tar.extract(f) + + tar.extract("signature") + try: + tar.extract(detached_signature_file) + except KeyError: + msg = "Patch has not been signed" + LOG.warning(msg) + + # Verify the data integrity signature first + sigfile = open("signature", "r") + sig = int(sigfile.read(), 16) + sigfile.close() + + expected_sig = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + for f in filelist: + sig ^= get_md5(f) + + if sig != expected_sig: + msg = "Patch failed verification" + LOG.error(msg) + raise PatchValidationFailure(msg) + + # Verify detached signature + if os.path.exists(detached_signature_file): + sig_valid = verify_files( + filelist, + detached_signature_file, + cert_type=cert_type) + if sig_valid is True: + msg = "Signature verified, patch has been signed" + if cert_type is None: + LOG.info(msg) + else: + msg = "Signature check failed" + if cert_type is None: + LOG.error(msg) + raise PatchValidationFailure(msg) + else: + msg = "Patch has not been signed" + if cert_type is None: + LOG.error(msg) + raise PatchValidationFailure(msg) + + tar = tarfile.open("metadata.tar") + tar.extractall() + + if not metadata_only: + tar = tarfile.open("software.tar") + tar.extractall() + + if os.path.exists("semantics.tar"): + tar = tarfile.open("semantics.tar") + tar.extractall() + + @staticmethod + def query_patch(patch, field=None): + + abs_patch = os.path.abspath(patch) + + # Create a temporary working directory + tmpdir = tempfile.mkdtemp(prefix="patch_") + + # Save the current directory, so we can chdir back after + orig_wd = os.getcwd() + + # Change to the tmpdir + os.chdir(tmpdir) + + r = {} + + try: + if field is None or field == "cert": + # Need to determine the cert_type + for cert_type_str in cert_type_all: + try: + PatchFile.read_patch(abs_patch, metadata_only=True, cert_type=[cert_type_str]) + except PatchValidationFailure: + pass + else: + # Successfully opened the file for reading, and we have discovered the cert_type + r["cert"] = cert_type_str + break + + if "cert" not in r: + # If cert is unknown, then file is not yet open for reading. + # Try to open it for reading now, using all available keys. + # We can't omit cert_type, or pass None, because that will trigger the code + # path used by installed product, in which dev keys are not accepted unless + # a magic file exists. + PatchFile.read_patch(abs_patch, metadata_only=True, cert_type=cert_type_all) + + thispatch = PatchData() + patch_id = thispatch.parse_metadata("metadata.xml") + + if field is None or field == "id": + r["id"] = patch_id + + if field is None: + for f in ["status", "sw_version", "unremovable", "summary", + "description", "install_instructions", + "warnings", "reboot_required", "apply_active_release_only"]: + r[f] = thispatch.query_line(patch_id, f) + else: + if field not in ['id', 'cert']: + r[field] = thispatch.query_line(patch_id, field) + + except PatchValidationFailure as e: + msg = "Patch validation failed during extraction" + LOG.exception(msg) + raise e + except PatchMismatchFailure as e: + msg = "Patch Mismatch during extraction" + LOG.exception(msg) + raise e + except tarfile.TarError: + msg = "Failed during patch extraction" + LOG.exception(msg) + raise PatchValidationFailure(msg) + finally: + # Change back to original working dir + os.chdir(orig_wd) + shutil.rmtree(tmpdir) + + return r + + @staticmethod + def modify_patch(patch, + key, + value): + rc = False + abs_patch = os.path.abspath(patch) + new_abs_patch = "%s.new" % abs_patch + + # Create a temporary working directory + tmpdir = tempfile.mkdtemp(prefix="patch_") + + # Save the current directory, so we can chdir back after + orig_wd = os.getcwd() + + # Change to the tmpdir + os.chdir(tmpdir) + + try: + cert_type = None + meta_data = PatchFile.query_patch(abs_patch) + if 'cert' in meta_data: + cert_type = meta_data['cert'] + PatchFile.read_patch(abs_patch, metadata_only=True, cert_type=cert_type) + PatchData.modify_metadata_text("metadata.xml", key, value) + PatchFile.write_patch(new_abs_patch, cert_type=cert_type) + os.rename(new_abs_patch, abs_patch) + rc = True + + except PatchValidationFailure as e: + raise e + except PatchMismatchFailure as e: + raise e + except tarfile.TarError: + msg = "Failed during patch extraction" + LOG.exception(msg) + raise PatchValidationFailure(msg) + except Exception as e: + template = "An exception of type {0} occurred. Arguments:\n{1!r}" + message = template.format(type(e).__name__, e.args) + print(message) + finally: + # Change back to original working dir + os.chdir(orig_wd) + shutil.rmtree(tmpdir) + + return rc + + @staticmethod + def extract_patch(patch, + metadata_dir=avail_dir, + metadata_only=False, + existing_content=None, + allpatches=None, + base_pkgdata=None): + """ + Extract the metadata and patch contents + :param patch: Patch file + :param metadata_dir: Directory to store the metadata XML file + :return: + """ + thispatch = None + + abs_patch = os.path.abspath(patch) + abs_metadata_dir = os.path.abspath(metadata_dir) + + # Create a temporary working directory + tmpdir = tempfile.mkdtemp(prefix="patch_") + + # Save the current directory, so we can chdir back after + orig_wd = os.getcwd() + + # Change to the tmpdir + os.chdir(tmpdir) + + try: + # Open the patch file and extract the contents to the tmpdir + PatchFile.read_patch(abs_patch, metadata_only) + + thispatch = PatchData() + patch_id = thispatch.parse_metadata("metadata.xml") + + if patch_id is None: + print("Failed to import patch") + # Change back to original working dir + os.chdir(orig_wd) + shutil.rmtree(tmpdir) + return None + + if not metadata_only and base_pkgdata is not None: + # Run version validation tests first + patch_sw_version = thispatch.metadata[patch_id]["sw_version"] + if not base_pkgdata.check_release(patch_sw_version): + msg = "Patch %s software release (%s) is not installed" % (patch_id, patch_sw_version) + LOG.exception(msg) + raise PatchValidationFailure(msg) + + for rpmname in thispatch.contents[patch_id]: + pkgname, arch, pkgver = parse_rpm_filename(rpmname) + base_pkgver = base_pkgdata.find_version(patch_sw_version, pkgname, arch) + if base_pkgver is not None: + # Compare the patch RPM's version against the base + if pkgver <= base_pkgver: + msg = "RPM %s in patch %s must be higher version than original (%s)" % \ + (rpmname, patch_id, base_pkgver) + LOG.exception(msg) + raise PatchValidationFailure(msg) + + if allpatches is not None: + # Compare the patch RPM's version against other patches + other = allpatches.find_patch_with_pkgver(patch_sw_version, pkgname, arch, pkgver) + if other is not None: + msg = "Patch %s contains rpm %s, which is already provided by patch %s" % \ + (patch_id, rpmname, other) + LOG.exception(msg) + raise PatchValidationFailure(msg) + + if metadata_only: + # This is a re-import. Ensure the content lines up + if existing_content is None \ + or len(existing_content) != len(thispatch.contents[patch_id]): + msg = "Contents of re-imported patch do not match" + LOG.exception(msg) + raise PatchMismatchFailure(msg) + for rpmname in existing_content: + if rpmname not in thispatch.contents[patch_id]: + msg = "Contents of re-imported patch do not match" + LOG.exception(msg) + raise PatchMismatchFailure(msg) + + shutil.move("metadata.xml", + "%s/%s-metadata.xml" % (abs_metadata_dir, patch_id)) + + if not metadata_only: + for rpmname in thispatch.contents[patch_id]: + patch_sw_version = thispatch.metadata[patch_id]["sw_version"] + rpm_dir = package_dir[patch_sw_version] + if not os.path.exists(rpm_dir): + os.makedirs(rpm_dir) + shutil.move(rpmname, "%s/" % rpm_dir) + + for action in constants.SEMANTIC_ACTIONS: + action_file = os.path.join(action, patch_id) + if not os.path.exists(action_file): + continue + + action_dir = os.path.join(semantics_dir, action) + if not os.path.exists(action_dir): + os.makedirs(action_dir) + + os.chmod(action_file, 0o544) + shutil.move(action_file, action_dir) + + except PatchValidationFailure as e: + raise e + except PatchMismatchFailure as e: + raise e + except tarfile.TarError: + msg = "Failed during patch extraction" + LOG.exception(msg) + raise PatchValidationFailure(msg) + except KeyError: + msg = "Failed during patch extraction" + LOG.exception(msg) + raise PatchValidationFailure(msg) + except OSError: + msg = "Failed during patch extraction" + LOG.exception(msg) + raise PatchFail(msg) + except IOError: # pylint: disable=duplicate-except + msg = "Failed during patch extraction" + LOG.exception(msg) + raise PatchFail(msg) + finally: + # Change back to original working dir + os.chdir(orig_wd) + shutil.rmtree(tmpdir) + + return thispatch + + +def patch_build(): + configure_logging(logtofile=False) + + try: + opts, remainder = getopt.getopt(sys.argv[1:], + '', + ['id=', + 'release=', + 'summary=', + 'status=', + 'unremovable', + 'reboot-required=', + 'desc=', + 'warn=', + 'inst=', + 'req=', + 'controller=', + 'controller-worker=', + 'controller-worker-lowlatency=', + 'worker=', + 'worker-lowlatency=', + 'storage=', + 'all-nodes=', + 'pre-apply=', + 'pre-remove=', + 'apply-active-release-only']) + except getopt.GetoptError: + print("Usage: %s [ ] ... " + % os.path.basename(sys.argv[0])) + print("Options:") + print("\t--id Patch ID") + print("\t--release Platform release version") + print("\t--status Patch Status Code (ie. O, R, V)") + print("\t--unremovable Marks patch as unremovable") + print("\t--reboot-required Marks patch as reboot-required (default=Y)") + print("\t--summary Patch Summary") + print("\t--desc Patch Description") + print("\t--warn Patch Warnings") + print("\t--inst Patch Install Instructions") + print("\t--req Required Patch") + print("\t--controller New package for controller") + print("\t--worker New package for worker node") + print("\t--worker-lowlatency New package for worker-lowlatency node") + print("\t--storage New package for storage node") + print("\t--controller-worker New package for combined node") + print("\t--controller-worker-lowlatency New package for lowlatency combined node") + print("\t--all-nodes New package for all node types") + print("\t--pre-apply