From 527e098821c306b02e2ef3ac1a0f71bb57db6b25 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 30 May 2018 16:15:53 -0700 Subject: [PATCH] StarlingX open source release updates Signed-off-by: Dean Troyer --- CONTRIBUTORS.wrs | 7 + LICENSE | 202 ++ README.rst | 5 + cgcs-patch/LICENSE | 202 ++ cgcs-patch/PKG-INFO | 13 + cgcs-patch/bin/make_patch | 15 + cgcs-patch/bin/modify_patch | 15 + cgcs-patch/bin/patch-functions | 52 + cgcs-patch/bin/patch-tmpdirs.conf | 2 + cgcs-patch/bin/patch_build | 16 + cgcs-patch/bin/patch_check_goenabled.sh | 29 + cgcs-patch/bin/patching.conf | 7 + cgcs-patch/bin/patching.logrotate | 15 + cgcs-patch/bin/pmon-sw-patch-agent.conf | 19 + .../bin/pmon-sw-patch-controller-daemon.conf | 19 + cgcs-patch/bin/policy.json | 5 + cgcs-patch/bin/query_patch | 15 + cgcs-patch/bin/rpm-audit | 175 ++ cgcs-patch/bin/run-patch-scripts | 60 + cgcs-patch/bin/setup_patch_repo | 182 ++ cgcs-patch/bin/sw-patch | 16 + cgcs-patch/bin/sw-patch-agent | 16 + cgcs-patch/bin/sw-patch-agent-init.sh | 97 + cgcs-patch/bin/sw-patch-agent-restart | 20 + cgcs-patch/bin/sw-patch-agent.service | 16 + cgcs-patch/bin/sw-patch-controller-daemon | 16 + .../bin/sw-patch-controller-daemon-init.sh | 78 + .../bin/sw-patch-controller-daemon-restart | 20 + .../bin/sw-patch-controller-daemon.service | 16 + cgcs-patch/bin/sw-patch-controller-init.sh | 104 + cgcs-patch/bin/sw-patch-controller.service | 14 + cgcs-patch/bin/sw-patch-init.sh | 147 + cgcs-patch/bin/sw-patch.completion | 124 + cgcs-patch/bin/sw-patch.service | 16 + cgcs-patch/bin/upgrade-start-pkg-extract | 135 + cgcs-patch/centos/build_srpm | 101 + cgcs-patch/centos/build_srpm.data | 1 + cgcs-patch/centos/cgcs-patch.spec | 180 ++ cgcs-patch/cgcs-patch/LICENSE | 202 ++ .../cgcs-patch/cgcs_make_patch/__init__.py | 0 .../cgcs_make_patch/make_patch_functions.py | 1988 +++++++++++++ .../cgcs_make_patch/make_patching_branch | 226 ++ .../cgcs_make_patch/make_patching_tag | 98 + .../cgcs_make_patch/make_patching_workspace | 181 ++ cgcs-patch/cgcs-patch/cgcs_patch/__init__.py | 7 + .../cgcs-patch/cgcs_patch/api/__init__.py | 30 + cgcs-patch/cgcs-patch/cgcs_patch/api/app.py | 45 + .../cgcs-patch/cgcs_patch/api/config.py | 23 + .../cgcs_patch/api/controllers/__init__.py | 7 + .../cgcs_patch/api/controllers/root.py | 266 ++ cgcs-patch/cgcs-patch/cgcs_patch/app.py | 27 + .../cgcs-patch/cgcs_patch/authapi/__init__.py | 25 + .../cgcs-patch/cgcs_patch/authapi/acl.py | 28 + .../cgcs-patch/cgcs_patch/authapi/app.py | 77 + .../cgcs_patch/authapi/auth_token.py | 38 + .../cgcs-patch/cgcs_patch/authapi/config.py | 23 + .../cgcs-patch/cgcs_patch/authapi/hooks.py | 100 + .../cgcs-patch/cgcs_patch/authapi/policy.py | 117 + cgcs-patch/cgcs-patch/cgcs_patch/base.py | 166 ++ .../cgcs-patch/cgcs_patch/certificates.py | 51 + cgcs-patch/cgcs-patch/cgcs_patch/config.py | 126 + cgcs-patch/cgcs-patch/cgcs_patch/constants.py | 43 + .../cgcs-patch/cgcs_patch/exceptions.py | 45 + cgcs-patch/cgcs-patch/cgcs_patch/messages.py | 64 + .../cgcs-patch/cgcs_patch/patch_agent.py | 1060 +++++++ .../cgcs-patch/cgcs_patch/patch_client.py | 1308 +++++++++ .../cgcs-patch/cgcs_patch/patch_controller.py | 2468 +++++++++++++++++ .../cgcs-patch/cgcs_patch/patch_functions.py | 1205 ++++++++ .../cgcs-patch/cgcs_patch/patch_signing.py | 66 + .../cgcs-patch/cgcs_patch/patch_verify.py | 147 + .../cgcs_patch/templates/query.html | 92 + .../cgcs-patch/cgcs_patch/templates/query.xml | 95 + .../cgcs_patch/templates/query_agents.html | 32 + .../cgcs_patch/templates/query_hosts.xml | 75 + .../cgcs-patch/cgcs_patch/templates/show.html | 83 + .../cgcs-patch/cgcs_patch/templates/show.xml | 92 + cgcs-patch/cgcs-patch/cgcs_patch/utils.py | 74 + .../cgcs-patch/cgcs_patch_id/README.txt | 34 + .../cgcs_patch_id/patch_id_allocator.py | 49 + .../patch_id_allocator_client.py | 57 + .../patch_id_allocator_server.conf | 16 + .../patch_id_allocator_server.py | 43 + cgcs-patch/cgcs-patch/setup.py | 20 + cgcs-patch/restart-info.html | 712 +++++ mwa-chilon.map | 10 + patch-alarm/LICENSE | 202 ++ patch-alarm/PKG-INFO | 13 + patch-alarm/centos/build_srpm | 95 + patch-alarm/centos/build_srpm.data | 1 + patch-alarm/centos/patch-alarm.spec | 55 + patch-alarm/patch-alarm/LICENSE | 202 ++ .../patch-alarm/patch_alarm/__init__.py | 6 + .../patch_alarm/patch_alarm_manager.py | 223 ++ patch-alarm/patch-alarm/setup.py | 19 + patch-alarm/scripts/bin/patch-alarm-manager | 18 + .../scripts/init.d/patch-alarm-manager | 98 + patch-boot-args/LICENSE | 202 ++ .../EXAMPLE_0001/centos/EXAMPLE_0001.spec | 27 + .../EXAMPLE_0001/centos/build_srpm.data | 2 + .../EXAMPLE_0001/scripts/example-restart | 128 + .../EXAMPLE_0002/centos/EXAMPLE_0002.spec | 27 + .../EXAMPLE_0002/centos/build_srpm.data | 2 + .../scripts/example-cgcs-patch-restart | 48 + .../EXAMPLE_0003/centos/EXAMPLE_0003.spec | 27 + .../EXAMPLE_0003/centos/build_srpm.data | 2 + .../scripts/example-process-restart | 57 + .../EXAMPLE_AODH/centos/EXAMPLE_AODH.spec | 27 + .../EXAMPLE_AODH/centos/build_srpm.data | 2 + .../EXAMPLE_AODH/scripts/aodh-restart-example | 48 + .../EXAMPLE_HEAT/centos/EXAMPLE_HEAT.spec | 27 + .../EXAMPLE_HEAT/centos/build_srpm.data | 2 + .../EXAMPLE_HEAT/scripts/heat-restart-example | 48 + .../EXAMPLE_MTCE/centos/EXAMPLE_MTCE.spec | 27 + .../EXAMPLE_MTCE/centos/build_srpm.data | 2 + .../EXAMPLE_MTCE/scripts/mtce-restart-example | 46 + .../centos/EXAMPLE_NEUTRON.spec | 27 + .../EXAMPLE_NEUTRON/centos/build_srpm.data | 2 + .../scripts/neutron-restart-example | 40 + .../EXAMPLE_NOVA/centos/EXAMPLE_NOVA.spec | 27 + .../EXAMPLE_NOVA/centos/build_srpm.data | 2 + .../EXAMPLE_NOVA/scripts/nova-restart-example | 39 + .../EXAMPLE_RR/centos/EXAMPLE_RR.spec | 21 + .../EXAMPLE_RR/centos/build_srpm.data | 2 + .../EXAMPLE_SYSINV/centos/EXAMPLE_SYSINV.spec | 26 + .../EXAMPLE_SYSINV/centos/build_srpm.data | 3 + .../scripts/sysinv-restart-example | 52 + .../EXAMPLE_VIM/centos/EXAMPLE_VIM.spec | 26 + .../EXAMPLE_VIM/centos/build_srpm.data | 2 + .../EXAMPLE_VIM/scripts/vim-restart-example | 48 + .../SUITE_B_KERNEL/centos/SUITE_B_KERNEL.spec | 21 + .../SUITE_B_KERNEL/centos/build_srpm.data | 2 + .../centos/SUITE_B_PATCH_A.spec | 27 + .../SUITE_B_PATCH_A/centos/build_srpm.data | 2 + .../SUITE_B_PATCH_A/scripts/restart-script | 56 + .../centos/SUITE_B_PATCH_B.spec | 27 + .../SUITE_B_PATCH_B/centos/build_srpm.data | 2 + .../SUITE_B_PATCH_B/scripts/restart-script | 56 + .../centos/SUITE_B_PATCH_C.spec | 27 + .../SUITE_B_PATCH_C/centos/build_srpm.data | 2 + .../SUITE_B_PATCH_C/scripts/restart-script | 55 + .../centos/SUITE_B_PATCH_D.spec | 27 + .../SUITE_B_PATCH_D/centos/build_srpm.data | 2 + .../SUITE_B_PATCH_D/scripts/restart-script | 62 + .../centos/SUITE_B_PATCH_E.spec | 27 + .../SUITE_B_PATCH_E/centos/build_srpm.data | 2 + .../SUITE_B_PATCH_E/scripts/restart-script | 46 + .../centos/SUITE_B_PATCH_F.spec | 27 + .../SUITE_B_PATCH_F/centos/build_srpm.data | 2 + .../SUITE_B_PATCH_F/scripts/restart-script | 86 + patch-scripts/test-patches/A/centos/A.spec | 21 + .../test-patches/A/centos/build_srpm.data | 2 + patch-scripts/test-patches/B/centos/B.spec | 21 + .../test-patches/B/centos/build_srpm.data | 2 + patch-scripts/test-patches/C/centos/C.spec | 21 + .../test-patches/C/centos/build_srpm.data | 2 + .../INSVC_ALLNODES/centos/INSVC_ALLNODES.spec | 27 + .../INSVC_ALLNODES/centos/build_srpm.data | 2 + .../INSVC_ALLNODES/scripts/allnodes-restart | 36 + .../INSVC_COMPUTE/centos/INSVC_COMPUTE.spec | 27 + .../INSVC_COMPUTE/centos/build_srpm.data | 2 + .../INSVC_COMPUTE/scripts/compute-restart | 34 + .../centos/INSVC_CONTROLLER.spec | 26 + .../INSVC_CONTROLLER/centos/build_srpm.data | 2 + .../scripts/controller-restart | 48 + .../INSVC_NOVA/centos/INSVC_NOVA.spec | 27 + .../INSVC_NOVA/centos/build_srpm.data | 2 + .../INSVC_NOVA/scripts/nova-restart | 39 + .../centos/INSVC_RESTART_FAILURE.spec | 27 + .../centos/build_srpm.data | 2 + .../scripts/restart-failure | 22 + .../INSVC_STORAGE/centos/INSVC_STORAGE.spec | 27 + .../INSVC_STORAGE/centos/build_srpm.data | 2 + .../INSVC_STORAGE/scripts/storage-restart | 24 + .../test-patches/LARGE/centos/LARGE.spec | 21 + .../test-patches/LARGE/centos/build_srpm.data | 2 + .../RR_ALLNODES/centos/RR_ALLNODES.spec | 21 + .../RR_ALLNODES/centos/build_srpm.data | 2 + .../RR_COMPUTE/centos/RR_COMPUTE.spec | 21 + .../RR_COMPUTE/centos/build_srpm.data | 2 + .../RR_CONTROLLER/centos/RR_CONTROLLER.spec | 21 + .../RR_CONTROLLER/centos/build_srpm.data | 2 + .../test-patches/RR_NOVA/centos/RR_NOVA.spec | 21 + .../RR_NOVA/centos/build_srpm.data | 2 + .../RR_STORAGE/centos/RR_STORAGE.spec | 21 + .../RR_STORAGE/centos/build_srpm.data | 2 + requests-toolbelt/PKG-INFO | 12 + requests-toolbelt/centos/build_srpm.data | 7 + .../centos/requests-toolbelt.spec | 43 + smart-helper/LICENSE | 202 ++ smart-helper/files/etc.rpm.platform | 11 + smart-helper/files/etc.rpm.sysinfo.Dirnames | 1 + tsconfig/.gitignore | 6 + tsconfig/LICENSE | 202 ++ tsconfig/PKG-INFO | 13 + tsconfig/centos/build_srpm.data | 3 + tsconfig/centos/tsconfig.spec | 48 + tsconfig/scripts/tsconfig | 48 + tsconfig/tsconfig/LICENSE | 202 ++ tsconfig/tsconfig/setup.py | 19 + tsconfig/tsconfig/tsconfig/__init__.py | 6 + tsconfig/tsconfig/tsconfig/tsconfig.py | 208 ++ 201 files changed, 18085 insertions(+) create mode 100644 CONTRIBUTORS.wrs create mode 100644 LICENSE create mode 100644 README.rst create mode 100644 cgcs-patch/LICENSE create mode 100644 cgcs-patch/PKG-INFO create mode 100755 cgcs-patch/bin/make_patch create mode 100755 cgcs-patch/bin/modify_patch create mode 100644 cgcs-patch/bin/patch-functions create mode 100644 cgcs-patch/bin/patch-tmpdirs.conf create mode 100755 cgcs-patch/bin/patch_build create mode 100644 cgcs-patch/bin/patch_check_goenabled.sh create mode 100644 cgcs-patch/bin/patching.conf create mode 100644 cgcs-patch/bin/patching.logrotate create mode 100644 cgcs-patch/bin/pmon-sw-patch-agent.conf create mode 100644 cgcs-patch/bin/pmon-sw-patch-controller-daemon.conf create mode 100644 cgcs-patch/bin/policy.json create mode 100755 cgcs-patch/bin/query_patch create mode 100755 cgcs-patch/bin/rpm-audit create mode 100644 cgcs-patch/bin/run-patch-scripts create mode 100755 cgcs-patch/bin/setup_patch_repo create mode 100755 cgcs-patch/bin/sw-patch create mode 100755 cgcs-patch/bin/sw-patch-agent create mode 100755 cgcs-patch/bin/sw-patch-agent-init.sh create mode 100644 cgcs-patch/bin/sw-patch-agent-restart create mode 100644 cgcs-patch/bin/sw-patch-agent.service create mode 100755 cgcs-patch/bin/sw-patch-controller-daemon create mode 100755 cgcs-patch/bin/sw-patch-controller-daemon-init.sh create mode 100644 cgcs-patch/bin/sw-patch-controller-daemon-restart create mode 100644 cgcs-patch/bin/sw-patch-controller-daemon.service create mode 100644 cgcs-patch/bin/sw-patch-controller-init.sh create mode 100644 cgcs-patch/bin/sw-patch-controller.service create mode 100644 cgcs-patch/bin/sw-patch-init.sh create mode 100644 cgcs-patch/bin/sw-patch.completion create mode 100644 cgcs-patch/bin/sw-patch.service create mode 100644 cgcs-patch/bin/upgrade-start-pkg-extract create mode 100755 cgcs-patch/centos/build_srpm create mode 100644 cgcs-patch/centos/build_srpm.data create mode 100644 cgcs-patch/centos/cgcs-patch.spec create mode 100644 cgcs-patch/cgcs-patch/LICENSE create mode 100644 cgcs-patch/cgcs-patch/cgcs_make_patch/__init__.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_make_patch/make_patch_functions.py create mode 100755 cgcs-patch/cgcs-patch/cgcs_make_patch/make_patching_branch create mode 100755 cgcs-patch/cgcs-patch/cgcs_make_patch/make_patching_tag create mode 100755 cgcs-patch/cgcs-patch/cgcs_make_patch/make_patching_workspace create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/__init__.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/api/__init__.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/api/app.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/api/config.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/api/controllers/__init__.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/api/controllers/root.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/app.py create mode 100755 cgcs-patch/cgcs-patch/cgcs_patch/authapi/__init__.py create mode 100755 cgcs-patch/cgcs-patch/cgcs_patch/authapi/acl.py create mode 100755 cgcs-patch/cgcs-patch/cgcs_patch/authapi/app.py create mode 100755 cgcs-patch/cgcs-patch/cgcs_patch/authapi/auth_token.py create mode 100755 cgcs-patch/cgcs-patch/cgcs_patch/authapi/config.py create mode 100755 cgcs-patch/cgcs-patch/cgcs_patch/authapi/hooks.py create mode 100755 cgcs-patch/cgcs-patch/cgcs_patch/authapi/policy.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/base.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/certificates.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/config.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/constants.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/exceptions.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/messages.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/patch_agent.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/patch_client.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/patch_controller.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/patch_functions.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/patch_signing.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/patch_verify.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/templates/query.html create mode 100755 cgcs-patch/cgcs-patch/cgcs_patch/templates/query.xml create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/templates/query_agents.html create mode 100755 cgcs-patch/cgcs-patch/cgcs_patch/templates/query_hosts.xml create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/templates/show.html create mode 100755 cgcs-patch/cgcs-patch/cgcs_patch/templates/show.xml create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch/utils.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch_id/README.txt create mode 100755 cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator.py create mode 100755 cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_client.py create mode 100644 cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_server.conf create mode 100755 cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_server.py create mode 100644 cgcs-patch/cgcs-patch/setup.py create mode 100755 cgcs-patch/restart-info.html create mode 100644 mwa-chilon.map create mode 100644 patch-alarm/LICENSE create mode 100644 patch-alarm/PKG-INFO create mode 100755 patch-alarm/centos/build_srpm create mode 100644 patch-alarm/centos/build_srpm.data create mode 100644 patch-alarm/centos/patch-alarm.spec create mode 100644 patch-alarm/patch-alarm/LICENSE create mode 100644 patch-alarm/patch-alarm/patch_alarm/__init__.py create mode 100644 patch-alarm/patch-alarm/patch_alarm/patch_alarm_manager.py create mode 100644 patch-alarm/patch-alarm/setup.py create mode 100644 patch-alarm/scripts/bin/patch-alarm-manager create mode 100644 patch-alarm/scripts/init.d/patch-alarm-manager create mode 100644 patch-boot-args/LICENSE create mode 100644 patch-scripts/EXAMPLE_0001/centos/EXAMPLE_0001.spec create mode 100644 patch-scripts/EXAMPLE_0001/centos/build_srpm.data create mode 100644 patch-scripts/EXAMPLE_0001/scripts/example-restart create mode 100644 patch-scripts/EXAMPLE_0002/centos/EXAMPLE_0002.spec create mode 100644 patch-scripts/EXAMPLE_0002/centos/build_srpm.data create mode 100644 patch-scripts/EXAMPLE_0002/scripts/example-cgcs-patch-restart create mode 100644 patch-scripts/EXAMPLE_0003/centos/EXAMPLE_0003.spec create mode 100644 patch-scripts/EXAMPLE_0003/centos/build_srpm.data create mode 100644 patch-scripts/EXAMPLE_0003/scripts/example-process-restart create mode 100644 patch-scripts/EXAMPLE_AODH/centos/EXAMPLE_AODH.spec create mode 100644 patch-scripts/EXAMPLE_AODH/centos/build_srpm.data create mode 100644 patch-scripts/EXAMPLE_AODH/scripts/aodh-restart-example create mode 100644 patch-scripts/EXAMPLE_HEAT/centos/EXAMPLE_HEAT.spec create mode 100644 patch-scripts/EXAMPLE_HEAT/centos/build_srpm.data create mode 100644 patch-scripts/EXAMPLE_HEAT/scripts/heat-restart-example create mode 100644 patch-scripts/EXAMPLE_MTCE/centos/EXAMPLE_MTCE.spec create mode 100644 patch-scripts/EXAMPLE_MTCE/centos/build_srpm.data create mode 100644 patch-scripts/EXAMPLE_MTCE/scripts/mtce-restart-example create mode 100644 patch-scripts/EXAMPLE_NEUTRON/centos/EXAMPLE_NEUTRON.spec create mode 100644 patch-scripts/EXAMPLE_NEUTRON/centos/build_srpm.data create mode 100644 patch-scripts/EXAMPLE_NEUTRON/scripts/neutron-restart-example create mode 100644 patch-scripts/EXAMPLE_NOVA/centos/EXAMPLE_NOVA.spec create mode 100644 patch-scripts/EXAMPLE_NOVA/centos/build_srpm.data create mode 100644 patch-scripts/EXAMPLE_NOVA/scripts/nova-restart-example create mode 100644 patch-scripts/EXAMPLE_RR/centos/EXAMPLE_RR.spec create mode 100644 patch-scripts/EXAMPLE_RR/centos/build_srpm.data create mode 100644 patch-scripts/EXAMPLE_SYSINV/centos/EXAMPLE_SYSINV.spec create mode 100644 patch-scripts/EXAMPLE_SYSINV/centos/build_srpm.data create mode 100644 patch-scripts/EXAMPLE_SYSINV/scripts/sysinv-restart-example create mode 100644 patch-scripts/EXAMPLE_VIM/centos/EXAMPLE_VIM.spec create mode 100644 patch-scripts/EXAMPLE_VIM/centos/build_srpm.data create mode 100644 patch-scripts/EXAMPLE_VIM/scripts/vim-restart-example create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_KERNEL/centos/SUITE_B_KERNEL.spec create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_KERNEL/centos/build_srpm.data create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_A/centos/SUITE_B_PATCH_A.spec create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_A/centos/build_srpm.data create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_A/scripts/restart-script create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_B/centos/SUITE_B_PATCH_B.spec create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_B/centos/build_srpm.data create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_B/scripts/restart-script create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_C/centos/SUITE_B_PATCH_C.spec create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_C/centos/build_srpm.data create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_C/scripts/restart-script create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_D/centos/SUITE_B_PATCH_D.spec create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_D/centos/build_srpm.data create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_D/scripts/restart-script create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_E/centos/SUITE_B_PATCH_E.spec create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_E/centos/build_srpm.data create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_E/scripts/restart-script create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_F/centos/SUITE_B_PATCH_F.spec create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_F/centos/build_srpm.data create mode 100644 patch-scripts/test-patches-suite-b/SUITE_B_PATCH_F/scripts/restart-script create mode 100644 patch-scripts/test-patches/A/centos/A.spec create mode 100644 patch-scripts/test-patches/A/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/B/centos/B.spec create mode 100644 patch-scripts/test-patches/B/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/C/centos/C.spec create mode 100644 patch-scripts/test-patches/C/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/INSVC_ALLNODES/centos/INSVC_ALLNODES.spec create mode 100644 patch-scripts/test-patches/INSVC_ALLNODES/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/INSVC_ALLNODES/scripts/allnodes-restart create mode 100644 patch-scripts/test-patches/INSVC_COMPUTE/centos/INSVC_COMPUTE.spec create mode 100644 patch-scripts/test-patches/INSVC_COMPUTE/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/INSVC_COMPUTE/scripts/compute-restart create mode 100644 patch-scripts/test-patches/INSVC_CONTROLLER/centos/INSVC_CONTROLLER.spec create mode 100644 patch-scripts/test-patches/INSVC_CONTROLLER/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/INSVC_CONTROLLER/scripts/controller-restart create mode 100644 patch-scripts/test-patches/INSVC_NOVA/centos/INSVC_NOVA.spec create mode 100644 patch-scripts/test-patches/INSVC_NOVA/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/INSVC_NOVA/scripts/nova-restart create mode 100644 patch-scripts/test-patches/INSVC_RESTART_FAILURE/centos/INSVC_RESTART_FAILURE.spec create mode 100644 patch-scripts/test-patches/INSVC_RESTART_FAILURE/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/INSVC_RESTART_FAILURE/scripts/restart-failure create mode 100644 patch-scripts/test-patches/INSVC_STORAGE/centos/INSVC_STORAGE.spec create mode 100644 patch-scripts/test-patches/INSVC_STORAGE/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/INSVC_STORAGE/scripts/storage-restart create mode 100644 patch-scripts/test-patches/LARGE/centos/LARGE.spec create mode 100644 patch-scripts/test-patches/LARGE/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/RR_ALLNODES/centos/RR_ALLNODES.spec create mode 100644 patch-scripts/test-patches/RR_ALLNODES/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/RR_COMPUTE/centos/RR_COMPUTE.spec create mode 100644 patch-scripts/test-patches/RR_COMPUTE/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/RR_CONTROLLER/centos/RR_CONTROLLER.spec create mode 100644 patch-scripts/test-patches/RR_CONTROLLER/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/RR_NOVA/centos/RR_NOVA.spec create mode 100644 patch-scripts/test-patches/RR_NOVA/centos/build_srpm.data create mode 100644 patch-scripts/test-patches/RR_STORAGE/centos/RR_STORAGE.spec create mode 100644 patch-scripts/test-patches/RR_STORAGE/centos/build_srpm.data create mode 100644 requests-toolbelt/PKG-INFO create mode 100644 requests-toolbelt/centos/build_srpm.data create mode 100644 requests-toolbelt/centos/requests-toolbelt.spec create mode 100644 smart-helper/LICENSE create mode 100644 smart-helper/files/etc.rpm.platform create mode 100644 smart-helper/files/etc.rpm.sysinfo.Dirnames create mode 100644 tsconfig/.gitignore create mode 100644 tsconfig/LICENSE create mode 100644 tsconfig/PKG-INFO create mode 100644 tsconfig/centos/build_srpm.data create mode 100644 tsconfig/centos/tsconfig.spec create mode 100644 tsconfig/scripts/tsconfig create mode 100644 tsconfig/tsconfig/LICENSE create mode 100644 tsconfig/tsconfig/setup.py create mode 100644 tsconfig/tsconfig/tsconfig/__init__.py create mode 100644 tsconfig/tsconfig/tsconfig/tsconfig.py diff --git a/CONTRIBUTORS.wrs b/CONTRIBUTORS.wrs new file mode 100644 index 00000000..3bb02f38 --- /dev/null +++ b/CONTRIBUTORS.wrs @@ -0,0 +1,7 @@ +The following contributors from Wind River have developed the seed code in this +repository. We look forward to community collaboration and contributions for +additional features, enhancements and refactoring. + +Contributors: +============= +Don Penney diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..941d7b02 --- /dev/null +++ b/README.rst @@ -0,0 +1,5 @@ +========== +stx-update +========== + +StarlingX Software Management diff --git a/cgcs-patch/LICENSE b/cgcs-patch/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/cgcs-patch/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cgcs-patch/PKG-INFO b/cgcs-patch/PKG-INFO new file mode 100644 index 00000000..835a7eff --- /dev/null +++ b/cgcs-patch/PKG-INFO @@ -0,0 +1,13 @@ +Metadata-Version: 1.1 +Name: cgcs-patch +Version: 1.0 +Summary: TIS Platform Patching +Home-page: +Author: Windriver +Author-email: info@windriver.com +License: Apache-2.0 + +Description: TIS Platform Patching + + +Platform: UNKNOWN diff --git a/cgcs-patch/bin/make_patch b/cgcs-patch/bin/make_patch new file mode 100755 index 00000000..95cf05b3 --- /dev/null +++ b/cgcs-patch/bin/make_patch @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + + +import sys + +from cgcs_make_patch.make_patch_functions import make_patch + +if __name__ == "__main__": + sys.exit(make_patch()) diff --git a/cgcs-patch/bin/modify_patch b/cgcs-patch/bin/modify_patch new file mode 100755 index 00000000..470f0fc1 --- /dev/null +++ b/cgcs-patch/bin/modify_patch @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + + +import sys + +from cgcs_make_patch.make_patch_functions import modify_patch + +if __name__ == "__main__": + sys.exit(modify_patch()) diff --git a/cgcs-patch/bin/patch-functions b/cgcs-patch/bin/patch-functions new file mode 100644 index 00000000..adb36a28 --- /dev/null +++ b/cgcs-patch/bin/patch-functions @@ -0,0 +1,52 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This bash source file provides variables and functions that +# may be used by in-service patching scripts. +# + +# Source platform.conf, for nodetype and subfunctions +. /etc/platform/platform.conf + +declare PATCH_SCRIPTDIR=/run/patching/patch-scripts +declare PATCH_FLAGDIR=/run/patching/patch-flags +declare -i PATCH_STATUS_OK=0 +declare -i PATCH_STATUS_FAILED=1 + +declare logfile=/var/log/patching.log +declare NAME=$(basename $0) + +function loginfo() +{ + echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile +} + +function is_controller() +{ + [[ $nodetype == "controller" ]] +} + +function is_compute() +{ + [[ $nodetype == "compute" ]] +} + +function is_storage() +{ + [[ $nodetype == "storage" ]] +} + +function is_cpe() +{ + [[ $nodetype == "controller" && $subfunction =~ compute ]] +} + +function is_locked() +{ + test -f /var/run/.node_locked +} + diff --git a/cgcs-patch/bin/patch-tmpdirs.conf b/cgcs-patch/bin/patch-tmpdirs.conf new file mode 100644 index 00000000..b30284f2 --- /dev/null +++ b/cgcs-patch/bin/patch-tmpdirs.conf @@ -0,0 +1,2 @@ +d /run/patching 0700 root root - + diff --git a/cgcs-patch/bin/patch_build b/cgcs-patch/bin/patch_build new file mode 100755 index 00000000..580890d2 --- /dev/null +++ b/cgcs-patch/bin/patch_build @@ -0,0 +1,16 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import sys + +from cgcs_patch.patch_functions import patch_build + +if __name__ == "__main__": + sys.exit(patch_build()) + diff --git a/cgcs-patch/bin/patch_check_goenabled.sh b/cgcs-patch/bin/patch_check_goenabled.sh new file mode 100644 index 00000000..29fc16cd --- /dev/null +++ b/cgcs-patch/bin/patch_check_goenabled.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# +# Copyright (c) 2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# Patching "goenabled" check. +# If a patch has been applied on this node, it is now out-of-date and should be rebooted. + +NAME=$(basename $0) +SYSTEM_CHANGED_FLAG=/var/run/node_is_patched + +logfile=/var/log/patching.log + +function LOG() +{ + logger "$NAME: $*" + echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile +} + +if [ -f $SYSTEM_CHANGED_FLAG ] +then + LOG "Node has been patched. Failing goenabled check." + exit 1 +fi + +exit 0 + diff --git a/cgcs-patch/bin/patching.conf b/cgcs-patch/bin/patching.conf new file mode 100644 index 00000000..b3c3adac --- /dev/null +++ b/cgcs-patch/bin/patching.conf @@ -0,0 +1,7 @@ +[runtime] +controller_multicast = 239.1.1.3 +agent_multicast = 239.1.1.4 +api_port = 5487 +controller_port = 5488 +agent_port = 5489 + diff --git a/cgcs-patch/bin/patching.logrotate b/cgcs-patch/bin/patching.logrotate new file mode 100644 index 00000000..2dbdeffa --- /dev/null +++ b/cgcs-patch/bin/patching.logrotate @@ -0,0 +1,15 @@ +/var/log/patching.log +/var/log/patching-api.log +/var/log/patching-insvc.log +{ + nodateext + size 10M + start 1 + rotate 10 + missingok + notifempty + compress + delaycompress + copytruncate +} + diff --git a/cgcs-patch/bin/pmon-sw-patch-agent.conf b/cgcs-patch/bin/pmon-sw-patch-agent.conf new file mode 100644 index 00000000..09872dab --- /dev/null +++ b/cgcs-patch/bin/pmon-sw-patch-agent.conf @@ -0,0 +1,19 @@ +[process] +process = sw-patch-agent +pidfile = /var/run/sw-patch-agent.pid +script = /etc/init.d/sw-patch-agent +style = lsb ; ocf or lsb +severity = major ; Process failure severity + ; critical : host is failed + ; major : host is degraded + ; minor : log is generated +restarts = 3 ; Number of back to back unsuccessful restarts before severity assertion +interval = 5 ; Number of seconds to wait between back-to-back unsuccessful restarts +debounce = 20 ; Number of seconds the process needs to run before declaring + ; it as running O.K. after a restart. + ; Time after which back-to-back restart count is cleared. +startuptime = 10 ; Seconds to wait after process start before starting the debounce monitor +mode = passive ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active: heartbeat monitoring, i.e. request / response messaging + diff --git a/cgcs-patch/bin/pmon-sw-patch-controller-daemon.conf b/cgcs-patch/bin/pmon-sw-patch-controller-daemon.conf new file mode 100644 index 00000000..834cd201 --- /dev/null +++ b/cgcs-patch/bin/pmon-sw-patch-controller-daemon.conf @@ -0,0 +1,19 @@ +[process] +process = sw-patch-controller-daemon +pidfile = /var/run/sw-patch-controller-daemon.pid +script = /etc/init.d/sw-patch-controller-daemon +style = lsb ; ocf or lsb +severity = major ; Process failure severity + ; critical : host is failed + ; major : host is degraded + ; minor : log is generated +restarts = 3 ; Number of back to back unsuccessful restarts before severity assertion +interval = 5 ; Number of seconds to wait between back-to-back unsuccessful restarts +debounce = 20 ; Number of seconds the process needs to run before declaring + ; it as running O.K. after a restart. + ; Time after which back-to-back restart count is cleared. +startuptime = 10 ; Seconds to wait after process start before starting the debounce monitor +mode = passive ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active: heartbeat monitoring, i.e. request / response messaging + diff --git a/cgcs-patch/bin/policy.json b/cgcs-patch/bin/policy.json new file mode 100644 index 00000000..94ac3a5b --- /dev/null +++ b/cgcs-patch/bin/policy.json @@ -0,0 +1,5 @@ +{ + "admin": "role:admin or role:administrator", + "admin_api": "is_admin:True", + "default": "rule:admin_api" +} diff --git a/cgcs-patch/bin/query_patch b/cgcs-patch/bin/query_patch new file mode 100755 index 00000000..f6637d90 --- /dev/null +++ b/cgcs-patch/bin/query_patch @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + + +import sys + +from cgcs_make_patch.make_patch_functions import query_patch + +if __name__ == "__main__": + sys.exit(query_patch()) diff --git a/cgcs-patch/bin/rpm-audit b/cgcs-patch/bin/rpm-audit new file mode 100755 index 00000000..2d17201d --- /dev/null +++ b/cgcs-patch/bin/rpm-audit @@ -0,0 +1,175 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +if [[ $EUID -ne 0 ]]; then + echo "This utility must be run as root." >&2 + exit 1 +fi + +function show_usage() +{ + cat <>$SCRIPTLOG +############################################################ +`date "+%FT%T.%3N"`: Running $NUM_SCRIPTS in-service patch scripts: + +$SCRIPTS + +############################################################ +EOF + +declare -i FAILURES=0 +for cmd in $SCRIPTS +do + cat <>$SCRIPTLOG +############################################################ +`date "+%FT%T.%3N"`: Running $cmd + +EOF + + bash -x $cmd >>$SCRIPTLOG 2>&1 + rc=$? + if [ $rc -ne $PATCH_STATUS_OK ] + then + let -i FAILURES++ + fi + cat <>$SCRIPTLOG +`date "+%FT%T.%3N"`: Completed running $cmd (rc=$rc) +############################################################ + +EOF +done + +cat <>$SCRIPTLOG + +`date "+%FT%T.%3N"`: Completed running scripts with $FAILURES failures +############################################################ +EOF + +exit $FAILURES + diff --git a/cgcs-patch/bin/setup_patch_repo b/cgcs-patch/bin/setup_patch_repo new file mode 100755 index 00000000..f878c18c --- /dev/null +++ b/cgcs-patch/bin/setup_patch_repo @@ -0,0 +1,182 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2018 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import getopt +import os +import platform +import rpm +import shutil +import subprocess +import sys +import tempfile + +import cgcs_patch.patch_functions as pf +import cgcs_patch.patch_verify as pv +import cgcs_patch.constants as constants + +import logging +logging.getLogger('main_logger') +logging.basicConfig(level=logging.INFO) + +# Override the pv.dev_certificate_marker so we can verify signatures off-box +cgcs_patch_bindir = os.path.dirname(os.path.abspath(sys.argv[0])) +dev_cert_path = os.path.abspath(os.path.join(cgcs_patch_bindir, '../../enable-dev-patch/enable-dev-patch/dev_certificate_enable.bin')) + +pv.dev_certificate_marker = dev_cert_path + +def usage(): + print "Usage: %s -o ..." \ + % os.path.basename(sys.argv[0]) + exit(1) + + +def main(): + try: + opts, remainder = getopt.getopt(sys.argv[1:], + 'o:', + ['output=']) + except getopt.GetoptError: + usage() + + output = None + + for opt, arg in opts: + if opt == "--output" or opt == '-o': + output = arg + + if output is None: + usage() + + sw_version = os.environ['PLATFORM_RELEASE'] + + allpatches = pf.PatchData() + + output = os.path.abspath(output) + + pkgdir = os.path.join(output, 'Packages') + datadir = os.path.join(output, 'metadata') + committed_dir = os.path.join(datadir, 'committed') + + if os.path.exists(output): + # Check to see if the expected structure already exists, + # maybe we're appending a patch. + if not os.path.exists(committed_dir) or not os.path.exists(pkgdir): + print "Packages or metadata dir missing from existing %s. Aborting..." % output + exit(1) + + # Load the existing metadata + allpatches.load_all_metadata(committed_dir, constants.COMMITTED) + else: + os.mkdir(output, 0755) + os.mkdir(datadir, 0755) + os.mkdir(committed_dir, 0755) + os.mkdir(pkgdir, 0755) + + # Save the current directory, so we can chdir back after + orig_wd = os.getcwd() + + tmpdir = None + try: + for p in remainder: + fpath = os.path.abspath(p) + + # Create a temporary working directory + tmpdir = tempfile.mkdtemp(prefix="patchrepo_") + + # Change to the tmpdir + os.chdir(tmpdir) + + print "Parsing %s" % fpath + pf.PatchFile.read_patch(fpath) + + thispatch = pf.PatchData() + patch_id = thispatch.parse_metadata("metadata.xml", constants.COMMITTED) + + if patch_id in allpatches.metadata: + print "Skipping %s as it's already in the repo" % patch_id + # Change back to original working dir + os.chdir(orig_wd) + + shutil.rmtree(tmpdir) + tmpdir = None + + continue + + patch_sw_version = thispatch.query_line(patch_id, 'sw_version') + if patch_sw_version != sw_version: + print "%s is for release %s, not %s" % (patch_sw_version, sw_version) + + # Move the metadata to the "committed" dir, and the rpms to the Packages dir + shutil.move('metadata.xml', os.path.join(committed_dir, "%s-metadata.xml" % patch_id)) + for f in thispatch.query_line(patch_id, 'contents'): + shutil.move(f, pkgdir) + + allpatches.add_patch(patch_id, thispatch) + + # Change back to original working dir + os.chdir(orig_wd) + + shutil.rmtree(tmpdir) + tmpdir = None + except: + if tmpdir is not None: + # Change back to original working dir + os.chdir(orig_wd) + + shutil.rmtree(tmpdir) + tmpdir = None + raise + + allpatches.gen_release_groups_xml(sw_version, output) + + # Purge unneeded RPMs + keep = {} + for patch_id in allpatches.metadata.keys(): + for rpmname in allpatches.contents[patch_id]: + try: + pkgname, arch, pkgver = pf.parse_rpm_filename(rpmname) + except ValueError as e: + raise e + + if pkgname not in keep: + keep[pkgname] = { arch: pkgver } + continue + elif arch not in keep[pkgname]: + keep[pkgname][arch] = pkgver + continue + + # Compare versions + keep_pkgver = keep[pkgname][arch] + if pkgver > keep_pkgver: + # Find the rpmname + keep_rpmname = keep_pkgver.generate_rpm_filename(pkgname, arch) + + filename = os.path.join(pkgdir, keep_rpmname) + if os.path.exists(filename): + os.remove(filename) + + # Keep the new pkgver + keep[pkgname][arch] = pkgver + else: + filename = os.path.join(pkgdir, rpmname) + if os.path.exists(filename): + os.remove(filename) + + # Create the repo metadata + if os.path.exists('/usr/bin/createrepo_c'): + createrepo = '/usr/bin/createrepo_c' + else: + createrepo = 'createrepo' + + os.chdir(output) + subprocess.check_call([createrepo, '-g', 'comps.xml', '.']) + +if __name__ == "__main__": + sys.exit(main()) + diff --git a/cgcs-patch/bin/sw-patch b/cgcs-patch/bin/sw-patch new file mode 100755 index 00000000..030cd796 --- /dev/null +++ b/cgcs-patch/bin/sw-patch @@ -0,0 +1,16 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import sys + +from cgcs_patch.patch_client import main + +if __name__ == "__main__": + main() + diff --git a/cgcs-patch/bin/sw-patch-agent b/cgcs-patch/bin/sw-patch-agent new file mode 100755 index 00000000..37de539c --- /dev/null +++ b/cgcs-patch/bin/sw-patch-agent @@ -0,0 +1,16 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import sys + +from cgcs_patch.patch_agent import main + +if __name__ == "__main__": + main() + diff --git a/cgcs-patch/bin/sw-patch-agent-init.sh b/cgcs-patch/bin/sw-patch-agent-init.sh new file mode 100755 index 00000000..fe1b8f40 --- /dev/null +++ b/cgcs-patch/bin/sw-patch-agent-init.sh @@ -0,0 +1,97 @@ +#!/bin/sh +# +# Copyright (c) 2014-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# chkconfig: 345 26 30 + +### BEGIN INIT INFO +# Provides: sw-patch-agent +# Required-Start: $syslog +# Required-Stop: $syslog +# Default-Start: 2 3 5 +# Default-Stop: 0 1 6 +# Short-Description: sw-patch-agent +# Description: Provides the CGCS Patch Agent Daemon +### END INIT INFO + +DESC="sw-patch-agent" +DAEMON="/usr/sbin/sw-patch-agent" +PIDFILE="/var/run/sw-patch-agent.pid" +PATCH_INSTALLING_FILE="/var/run/patch_installing" + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop() +{ + if [ -f $PATCH_INSTALLING_FILE ] + then + echo "Patches are installing. Waiting for install to complete." + while [ -f $PATCH_INSTALLING_FILE ] + do + # Verify the agent is still running + pid=$(cat $PATCH_INSTALLING_FILE) + cat /proc/$pid/cmdline 2>/dev/null | grep -q $DAEMON + if [ $? -ne 0 ] + then + echo "Patch agent not running." + break + fi + sleep 1 + done + echo "Continuing with shutdown." + fi + + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload) + stop + start + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart}" + exit 1 + ;; +esac + +exit 0 diff --git a/cgcs-patch/bin/sw-patch-agent-restart b/cgcs-patch/bin/sw-patch-agent-restart new file mode 100644 index 00000000..45e86798 --- /dev/null +++ b/cgcs-patch/bin/sw-patch-agent-restart @@ -0,0 +1,20 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +. /etc/patching/patch-functions + +# +# Triggering a restart of the patching daemons is done by +# creating a flag file and letting the daemon handle the restart. +# +loginfo "Requesting restart of patch-agent" + +restart_patch_agent_flag="/run/patching/.restart.patch-agent" +touch $restart_patch_agent_flag + +exit 0 + diff --git a/cgcs-patch/bin/sw-patch-agent.service b/cgcs-patch/bin/sw-patch-agent.service new file mode 100644 index 00000000..c008fc12 --- /dev/null +++ b/cgcs-patch/bin/sw-patch-agent.service @@ -0,0 +1,16 @@ +[Unit] +Description=TIS Patching Agent +After=syslog.target network.target sw-patch.service +Before=pmon.service + +[Service] +Type=forking +User=root +ExecStart=/etc/init.d/sw-patch-agent start +ExecStop=/etc/init.d/sw-patch-agent stop +ExecReload=/etc/init.d/sw-patch-agent restart +PIDFile=/var/run/sw-patch-agent.pid + +[Install] +WantedBy=multi-user.target + diff --git a/cgcs-patch/bin/sw-patch-controller-daemon b/cgcs-patch/bin/sw-patch-controller-daemon new file mode 100755 index 00000000..282dbd88 --- /dev/null +++ b/cgcs-patch/bin/sw-patch-controller-daemon @@ -0,0 +1,16 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import sys + +from cgcs_patch.patch_controller import main + +if __name__ == "__main__": + main() + diff --git a/cgcs-patch/bin/sw-patch-controller-daemon-init.sh b/cgcs-patch/bin/sw-patch-controller-daemon-init.sh new file mode 100755 index 00000000..2a0e8728 --- /dev/null +++ b/cgcs-patch/bin/sw-patch-controller-daemon-init.sh @@ -0,0 +1,78 @@ +#!/bin/sh +# +# Copyright (c) 2014-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# chkconfig: 345 25 30 + +### BEGIN INIT INFO +# Provides: sw-patch-controller-daemon +# Required-Start: $syslog +# Required-Stop: $syslog +# Default-Start: 2 3 5 +# Default-Stop: 0 1 6 +# Short-Description: sw-patch-controller-daemon +# Description: Provides the CGCS Patch Controller Daemon +### END INIT INFO + +DESC="sw-patch-controller-daemon" +DAEMON="/usr/sbin/sw-patch-controller-daemon" +PIDFILE="/var/run/sw-patch-controller-daemon.pid" + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload) + stop + start + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart}" + exit 1 + ;; +esac + +exit 0 diff --git a/cgcs-patch/bin/sw-patch-controller-daemon-restart b/cgcs-patch/bin/sw-patch-controller-daemon-restart new file mode 100644 index 00000000..129348f0 --- /dev/null +++ b/cgcs-patch/bin/sw-patch-controller-daemon-restart @@ -0,0 +1,20 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +. /etc/patching/patch-functions + +# +# Triggering a restart of the patching daemons is done by +# creating a flag file and letting the daemon handle the restart. +# +loginfo "Requesting restart of patch-controller" + +restart_patch_controller_flag="/run/patching/.restart.patch-controller" +touch $restart_patch_controller_flag + +exit 0 + diff --git a/cgcs-patch/bin/sw-patch-controller-daemon.service b/cgcs-patch/bin/sw-patch-controller-daemon.service new file mode 100644 index 00000000..b63a82ba --- /dev/null +++ b/cgcs-patch/bin/sw-patch-controller-daemon.service @@ -0,0 +1,16 @@ +[Unit] +Description=TIS Patching Controller Daemon +After=syslog.target network.target sw-patch.service sw-patch-controller.service +Before=pmon.service + +[Service] +Type=forking +User=root +ExecStart=/etc/init.d/sw-patch-controller-daemon start +ExecStop=/etc/init.d/sw-patch-controller-daemon stop +ExecReload=/etc/init.d/sw-patch-controller-daemon restart +PIDFile=/var/run/sw-patch-controller-daemon.pid + +[Install] +WantedBy=multi-user.target + diff --git a/cgcs-patch/bin/sw-patch-controller-init.sh b/cgcs-patch/bin/sw-patch-controller-init.sh new file mode 100644 index 00000000..ee8348f0 --- /dev/null +++ b/cgcs-patch/bin/sw-patch-controller-init.sh @@ -0,0 +1,104 @@ +#!/bin/bash +# +# Copyright (c) 2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# CGCS Patching Controller setup +# chkconfig: 345 20 24 +# description: CGCS Patching Controller init script + +. /usr/bin/tsconfig + +NAME=$(basename $0) + +REPO_ID=updates +REPO_ROOT=/www/pages/${REPO_ID} +REPO_DIR=${REPO_ROOT}/rel-${SW_VERSION} +GROUPS_FILE=$REPO_DIR/comps.xml +PATCHING_DIR=/opt/patching + +logfile=/var/log/patching.log + +function LOG() +{ + logger "$NAME: $*" + echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile +} + +function LOG_TO_FILE() +{ + echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile +} + +function create_groups() +{ + if [ -f $GROUPS_FILE ] + then + return 0 + fi + + cat >$GROUPS_FILE < + + +EOF +} + +function do_setup() +{ + # Does the repo exist? + if [ ! -d $REPO_DIR ] + then + LOG "Creating repo" + mkdir -p $REPO_DIR + + # Setup the groups file + create_groups + + createrepo -g $GROUPS_FILE $REPO_DIR >> $logfile 2>&1 + fi + + if [ ! -d $PATCHING_DIR ] + then + LOG "Creating $PATCHING_DIR" + mkdir -p $PATCHING_DIR + fi + + # If we can ping the active controller, sync the repos + LOG_TO_FILE "ping -c 1 -w 1 controller" + ping -c 1 -w 1 controller >> $logfile 2>&1 || ping6 -c 1 -w 1 controller >> $logfile 2>&1 + if [ $? -ne 0 ] + then + LOG "Cannot ping controller. Nothing to do" + return 0 + fi + + # Sync the patching dir + LOG_TO_FILE "rsync -acv --delete rsync://controller/patching/ ${PATCHING_DIR}/" + rsync -acv --delete rsync://controller/patching/ ${PATCHING_DIR}/ >> $logfile 2>&1 + + # Sync the patching dir + LOG_TO_FILE "rsync -acv --delete rsync://controller/repo/ ${REPO_ROOT}/" + rsync -acv --delete rsync://controller/repo/ ${REPO_ROOT}/ >> $logfile 2>&1 +} + +case "$1" in + start) + do_setup + ;; + status) + ;; + stop) + # Nothing to do here + ;; + restart) + do_setup + ;; + *) + echo "Usage: $0 {status|start|stop|restart}" + exit 1 +esac + +exit 0 + diff --git a/cgcs-patch/bin/sw-patch-controller.service b/cgcs-patch/bin/sw-patch-controller.service new file mode 100644 index 00000000..af4bc355 --- /dev/null +++ b/cgcs-patch/bin/sw-patch-controller.service @@ -0,0 +1,14 @@ +[Unit] +Description=TIS Patching Controller +After=syslog.service network.target sw-patch.service +Before=sw-patch-agent.service sw-patch-controller-daemon.service + +[Service] +Type=oneshot +User=root +ExecStart=/etc/init.d/sw-patch-controller start +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target + diff --git a/cgcs-patch/bin/sw-patch-init.sh b/cgcs-patch/bin/sw-patch-init.sh new file mode 100644 index 00000000..1cddab96 --- /dev/null +++ b/cgcs-patch/bin/sw-patch-init.sh @@ -0,0 +1,147 @@ +#!/bin/bash +# +# Copyright (c) 2014-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# CGCS Patching +# chkconfig: 345 20 23 +# description: CGCS Patching init script + +NAME=$(basename $0) + +. /usr/bin/tsconfig +. /etc/platform/platform.conf + +logfile=/var/log/patching.log + +function LOG_TO_FILE() +{ + echo "`date "+%FT%T.%3N"`: $NAME: $*" >> $logfile +} + +function check_for_rr_patch() +{ + if [ -f /var/run/node_is_patched_rr ] + then + echo + echo "Node has been patched and requires an immediate reboot." + echo + LOG_TO_FILE "Node has been patched, with reboot-required flag set. Rebooting" + /sbin/reboot + fi +} + +function check_install_uuid() +{ + # Check whether our installed load matches the active controller + CONTROLLER_UUID=`curl -sf http://controller/feed/rel-${SW_VERSION}/install_uuid` + if [ $? -ne 0 ] + then + if [ "$HOSTNAME" = "controller-1" ] + then + # If we're on controller-1, controller-0 may not have the install_uuid + # matching this release, if we're in an upgrade. If the file doesn't exist, + # bypass this check + return 0 + fi + + LOG_TO_FILE "Unable to retrieve installation uuid from active controller" + echo "Unable to retrieve installation uuid from active controller" + return 1 + fi + + if [ "$INSTALL_UUID" != "$CONTROLLER_UUID" ] + then + LOG_TO_FILE "This node is running a different load than the active controller and must be reinstalled" + echo "This node is running a different load than the active controller and must be reinstalled" + return 1 + fi + + return 0 +} + +# Check for installation failure +if [ -f /etc/platform/installation_failed ] ; then + LOG_TO_FILE "/etc/platform/installation_failed flag is set. Aborting." + echo "$(basename $0): Detected installation failure. Aborting." + exit 1 +fi + +# Clean up the RPM DB +if [ ! -f /var/run/.rpmdb_cleaned ] +then + LOG_TO_FILE "Cleaning RPM DB" + rm -f /var/lib/rpm/__db* + touch /var/run/.rpmdb_cleaned +fi + +# If the management interface is bonded, it may take some time +# before communications can be properly setup. +# Allow up to $DELAY_SEC seconds to reach controller. +DELAY_SEC=120 +START=`date +%s` +FOUND=0 +while [ $(date +%s) -lt $(( ${START} + ${DELAY_SEC} )) ] +do + ping -c 1 controller > /dev/null 2>&1 || ping6 -c 1 controller > /dev/null 2>&1 + if [ $? -eq 0 ] + then + FOUND=1 + break + fi + sleep 1 +done + +if [ ${FOUND} -eq 0 ] +then + # 'controller' is not available, just exit + LOG_TO_FILE "Unable to contact active controller (controller). Boot will continue." + exit 1 +fi + +case "$1" in + start) + if [ "${system_mode}" = "simplex" ] + then + # On a simplex CPE, we need to launch the http server first, + # before we can do the patch installation + LOG_TO_FILE "***** Launching lighttpd *****" + /etc/init.d/lighttpd start + + LOG_TO_FILE "***** Starting patch operation *****" + /usr/sbin/sw-patch-agent --install 2>>$logfile + LOG_TO_FILE "***** Finished patch operation *****" + + LOG_TO_FILE "***** Shutting down lighttpd *****" + /etc/init.d/lighttpd stop + else + check_install_uuid + if [ $? -ne 0 ] + then + # The INSTALL_UUID doesn't match the active controller, so exit + exit 1 + fi + + LOG_TO_FILE "***** Starting patch operation *****" + /usr/sbin/sw-patch-agent --install 2>>$logfile + LOG_TO_FILE "***** Finished patch operation *****" + fi + + check_for_rr_patch + ;; + stop) + # Nothing to do here + ;; + restart) + LOG_TO_FILE "***** Starting patch operation *****" + /usr/sbin/sw-patch-agent --install 2>>$logfile + LOG_TO_FILE "***** Finished patch operation *****" + ;; + *) + echo "Usage: $0 {start|stop|restart}" + exit 1 +esac + +exit 0 + diff --git a/cgcs-patch/bin/sw-patch.completion b/cgcs-patch/bin/sw-patch.completion new file mode 100644 index 00000000..145e7638 --- /dev/null +++ b/cgcs-patch/bin/sw-patch.completion @@ -0,0 +1,124 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This file provides bash-completion functionality for the sw-patch CLI +# + +function _swpatch() +{ + COMPREPLY=() + local cur="${COMP_WORDS[COMP_CWORD]}" + local prev="${COMP_WORDS[COMP_CWORD-1]}" + local subcommand=${COMP_WORDS[1]} + + # + # The available sw-patch subcommands + # + local subcommands=" + apply + commit + delete + query + query-dependencies + query-hosts + remove + show + upload + upload-dir + what-requires + drop-host + " + if [ -f /etc/platform/.initial_config_complete ]; then + # Post-config, so the host-install commands are accessible + subcommands="${subcommands} host-install host-install-async" + else + # Pre-config, so the install-local command is accessible + subcommands="${subcommands} install-local" + fi + + # Appends the '/' when completing dir names + set mark-directories on + + if [ $COMP_CWORD -gt 1 ]; then + # + # Complete the arguments to the subcommands. + # + case "$subcommand" in + apply|remove|delete|show|what-requires) + # Query the list of known patches + local patches=$(sw-patch completion patches 2>/dev/null) + COMPREPLY=( $(compgen -W "${patches}" -- ${cur}) ) + return 0 + ;; + host-install|host-install-async|drop-host) + if [ "${prev}" = "${subcommand}" -o "${prev}" = "--force" ]; then + # Query the list of known hosts + local names=$(sw-patch completion hosts 2>/dev/null) + COMPREPLY=( $(compgen -W "${names}" -- ${cur}) ) + else + # Only one host can be specified, so no more completion + COMPREPLY=( $(compgen -- ${cur}) ) + fi + return 0 + ;; + upload) + # Allow dirs and files with .patch extension for completion + COMPREPLY=( $(compgen -f -o plusdirs -X '!*.patch' -- ${cur}) ) + return 0 + ;; + upload-dir) + # Allow dirs only for completion + COMPREPLY=( $(compgen -d -- ${cur}) ) + return 0 + ;; + query) + if [ "${prev}" = "--release" ]; then + # If --release has been specified, provide installed releases for completion + local releases=$(/bin/ls -d /www/pages/feed/rel-* 2>/dev/null | sed 's#/www/pages/feed/rel-##') + COMPREPLY=( $(compgen -W "${releases}" -- ${cur}) ) + else + # --release is only completion option for query + COMPREPLY=( $(compgen -W "--release" -- ${cur}) ) + fi + return 0 + ;; + query-hosts|install-local) + # These subcommands have no options/arguments + COMPREPLY=( $(compgen -- ${cur}) ) + return 0 + ;; + query-dependencies) + # Query the list of known patches + local patches=$(sw-patch completion patches 2>/dev/null) + COMPREPLY=( $(compgen -W "--recursive ${patches}" -- ${cur}) ) + return 0 + ;; + commit) + if [ "${prev}" = "--release" ]; then + # If --release has been specified, provide installed releases for completion + local releases=$(/bin/ls -d /www/pages/feed/rel-* 2>/dev/null | sed 's#/www/pages/feed/rel-##') + COMPREPLY=( $(compgen -W "${releases}" -- ${cur}) ) + else + # Query the list of known patches + local patches=$(sw-patch completion patches 2>/dev/null) + COMPREPLY=( $(compgen -W "--all --dry-run --release ${patches}" -- ${cur}) ) + fi + return 0 + ;; + *) + ;; + esac + fi + + # Provide subcommands for completion + COMPREPLY=($(compgen -W "${subcommands}" -- ${cur})) + return 0 +} + +# Bind the above function to the sw-patch CLI +complete -F _swpatch -o filenames sw-patch + diff --git a/cgcs-patch/bin/sw-patch.service b/cgcs-patch/bin/sw-patch.service new file mode 100644 index 00000000..69acfa97 --- /dev/null +++ b/cgcs-patch/bin/sw-patch.service @@ -0,0 +1,16 @@ +[Unit] +Description=TIS Patching +After=syslog.target network.target +Before=sw-patch-agent.service + +[Service] +Type=oneshot +User=root +ExecStart=/etc/init.d/sw-patch start +RemainAfterExit=yes +StandardOutput=syslog+console +StandardError=syslog+console + +[Install] +WantedBy=multi-user.target + diff --git a/cgcs-patch/bin/upgrade-start-pkg-extract b/cgcs-patch/bin/upgrade-start-pkg-extract new file mode 100644 index 00000000..f5cab7fd --- /dev/null +++ b/cgcs-patch/bin/upgrade-start-pkg-extract @@ -0,0 +1,135 @@ +#!/bin/bash +# +# Copyright (c) 2018 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +function show_usage() +{ + cat >&2 < + +This tool will extract required packages to support upgrade-start + +Options: + -r : Release ID for target release. + +EOF + exit 1 +} + +. /etc/build.info +if [ -z "${SW_VERSION}" ]; then + logger -t $0 "Unable to identify running release ID" + exit 1 +fi + +declare TGT_RELEASE= + +while getopts "r:h" opt; do + case $opt in + r) + TGT_RELEASE=$OPTARG + ;; + h) + show_usage + ;; + *) + logger -t $0 "Unsupported option" + show_usage + ;; + esac +done + +if [ -z "${TGT_RELEASE}" ]; then + logger -t $0 "You must specify the target release." + exit 1 +fi + +if [ "${TGT_RELEASE}" = "${SW_VERSION}" ]; then + logger -t $0 "Target release cannot be running release." + exit 1 +fi + +declare TGT_BASE_REPO=/www/pages/feed/rel-${TGT_RELEASE} +declare TGT_PATCHES_REPO=/www/pages/updates/rel-${TGT_RELEASE} + +if [ ! -d ${TGT_BASE_REPO} ]; then + logger -t $0 "Target release ${TGT_RELEASE} is not installed" + exit 1 +fi + +declare TGT_PATCHES_REPO_OPT="" +if [ -d ${TGT_PATCHES_REPO} ]; then + TGT_PATCHES_REPO_OPT="--repofrompath updates,${TGT_PATCHES_REPO}" +fi + +declare WORKDIR= + +function cleanup() { + if [ -n "${WORKDIR}" -a -d "${WORKDIR}" ]; then + rm -rf ${WORKDIR} + fi +} + +trap cleanup EXIT + +function extract_pkg() { + local pkgname=$1 + + ORIG_PWD=$PWD + cd $WORKDIR + + # Find the RPM + local pkgfile=$(repoquery --repofrompath base,${TGT_BASE_REPO} ${TGT_PATCHES_REPO_OPT} --location -q ${pkgname}) + if [ -z "${pkgfile}" ]; then + logger -t $0 "Could not find ${pkgname}" + exit 1 + fi + + # Chop off the file: from the start of the file location + local rpmfile=${pkgfile/file://} + + rpm2cpio ${rpmfile} | cpio -idm + if [ $? -ne 0 ]; then + logger -t $0 "Failed to extract $pkgname files from ${pkgfile/file://}" + exit 1 + fi + + cd ${ORIG_PWD} +} + +# Extract files from pxe-network-installer +WORKDIR=$(mktemp -d --tmpdir=/scratch pkgextract_XXXX) +if [ -z "${WORKDIR}" -o ! -d "${WORKDIR}" ]; then + logger -t $0 "Failed to create workdir" + exit 1 +fi +extract_pkg pxe-network-installer +rsync -ac ${WORKDIR}/usr/ /usr/ && +rsync -ac ${WORKDIR}/pxeboot/rel-${TGT_RELEASE}/ /pxeboot/rel-${TGT_RELEASE}/ && +rsync -c ${WORKDIR}/pxeboot/pxelinux.cfg.files/*-${TGT_RELEASE} /pxeboot/pxelinux.cfg.files/ && +rsync -ac ${WORKDIR}/www/pages/feed/rel-${TGT_RELEASE}/ /www/pages/feed/rel-${TGT_RELEASE}/ +if [ $? -ne 0 ]; then + logger -t $0 "rsync command failed, extracting pxe-network-installer" + exit 1 +fi +rm -rf ${WORKDIR} + +# Extract files from platform-kickstarts +WORKDIR=$(mktemp -d --tmpdir=/scratch pkgextract_XXXX) +if [ -z "${WORKDIR}" -o ! -d "${WORKDIR}" ]; then + logger -t $0 "Failed to create workdir" + exit 1 +fi +extract_pkg platform-kickstarts +rsync -ac ${WORKDIR}/www/pages/feed/rel-${TGT_RELEASE}/ /www/pages/feed/rel-${TGT_RELEASE}/ +if [ $? -ne 0 ]; then + logger -t $0 "rsync command failed, extracting platform-kickstarts" + exit 1 +fi +rm -rf ${WORKDIR} + +exit 0 + diff --git a/cgcs-patch/centos/build_srpm b/cgcs-patch/centos/build_srpm new file mode 100755 index 00000000..1766e8fd --- /dev/null +++ b/cgcs-patch/centos/build_srpm @@ -0,0 +1,101 @@ +source "$SRC_BASE/build-tools/spec-utils" + +if [ "x$DATA" == "x" ]; then + echo "ERROR: Environment variable 'DATA' not defined." + exit 1 +fi + +if [ ! -f "$DATA" ]; then + echo "ERROR: Couldn't find '$PWD/$DATA'" + exit 1 +fi + +unset TIS_PATCH_VER # Ensure there's nothing in the env already + +source $DATA + +if [ -z "$TIS_PATCH_VER" ]; then + echo "ERROR: TIS_PATCH_VER must be defined" + exit 1 +fi + +SRC_DIR="cgcs-patch" +EXTRA_DIR="bin" + +VERSION=$(grep '^Version:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//') +TAR_NAME=$(grep '^Name:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//') +CUR_DIR=`pwd` +BUILD_DIR="$RPMBUILD_BASE" + +mkdir -p $BUILD_DIR/SRPMS + +TAR="$TAR_NAME-$VERSION.tar.gz" +TAR_PATH="$BUILD_DIR/SOURCES/$TAR" + +# copy the LICENSE for rpm spec %license directive +cp $SRC_DIR/LICENSE $BUILD_DIR/SOURCES/ + +TAR_NEEDED=0 +if [ -f $TAR_PATH ]; then + n=`find . -cnewer $TAR_PATH -and ! -path './.git*' \ + -and ! -path './build/*' \ + -and ! -path './.pc/*' \ + -and ! -path './patches/*' \ + -and ! -path "./$DISTRO/*" \ + -and ! -path './pbr-*.egg/*' \ + | wc -l` + if [ $n -gt 0 ]; then + TAR_NEEDED=1 + fi +else + TAR_NEEDED=1 +fi + +if [ $TAR_NEEDED -gt 0 ]; then + tar czvf $TAR_PATH $SRC_DIR $EXTRA_DIR \ + --exclude='cgcs-patch/cgcs_patch_id' \ + --exclude='cgcs-patch/cgcs_make_patch' \ + --exclude='.git*' \ + --exclude='build' \ + --exclude='.pc' \ + --exclude='patches' \ + --exclude="$DISTRO" \ + --exclude='pbr-*.egg' \ + --transform "s,^$SRC_DIR/LICENSE,LICENSE," \ + --transform "s,^$SRC_DIR,$TAR_NAME-$VERSION," +fi + + +for SPEC in `ls $BUILD_DIR/SPECS`; do + SPEC_PATH="$BUILD_DIR/SPECS/$SPEC" + RELEASE=`spec_find_tag Release "$SPEC_PATH" 2>> /dev/null` + if [ $? -ne 0 ]; then + echo "ERROR: 'Release' not found in '$SPEC_PATH'" + fi + NAME=`spec_find_tag Name "$SPEC_PATH" 2>> /dev/null` + if [ $? -ne 0 ]; then + echo "ERROR: 'Name' not found in '$SPEC_PATH'" + fi + SRPM="$NAME-$VERSION-$RELEASE.src.rpm" + SRPM_PATH="$BUILD_DIR/SRPMS/$SRPM" + + BUILD_NEEDED=0 + if [ -f $SRPM_PATH ]; then + n=`find . -cnewer $SRPM_PATH | wc -l` + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + else + BUILD_NEEDED=1 + fi + + if [ $BUILD_NEEDED -gt 0 ]; then + echo "SPEC file: $SPEC_PATH" + echo "SRPM build directory: $BUILD_DIR" + echo "TIS_PATCH_VER: $TIS_PATCH_VER" + + sed -i -e "1 i%define tis_patch_ver $TIS_PATCH_VER" $SPEC_PATH + rpmbuild -bs $SPEC_PATH --define="%_topdir $BUILD_DIR" --define="_tis_dist .tis" + fi +done + diff --git a/cgcs-patch/centos/build_srpm.data b/cgcs-patch/centos/build_srpm.data new file mode 100644 index 00000000..fa02dcbd --- /dev/null +++ b/cgcs-patch/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=25 diff --git a/cgcs-patch/centos/cgcs-patch.spec b/cgcs-patch/centos/cgcs-patch.spec new file mode 100644 index 00000000..7a4672e1 --- /dev/null +++ b/cgcs-patch/centos/cgcs-patch.spec @@ -0,0 +1,180 @@ +Summary: TIS Platform Patching +Name: cgcs-patch +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown +Source0: %{name}-%{version}.tar.gz +Source1: LICENSE + +BuildRequires: python-setuptools +BuildRequires: systemd-units +BuildRequires: systemd-devel +Requires: python-devel +Requires: /bin/bash + +%description +TIS Platform Patching + +%define pythonroot /usr/lib64/python2.7/site-packages + +%define debug_package %{nil} + +%prep +%setup + +%build +%{__python} setup.py build + +%install +%{__python} setup.py install --root=$RPM_BUILD_ROOT \ + --install-lib=%{pythonroot} \ + --prefix=/usr \ + --install-data=/usr/share \ + --single-version-externally-managed + + install -m 755 -d %{buildroot}%{_sbindir} + install -m 755 -d %{buildroot}%{_sysconfdir}/bash_completion.d + install -m 755 -d %{buildroot}%{_sysconfdir}/goenabled.d + install -m 755 -d %{buildroot}%{_sysconfdir}/init.d + install -m 755 -d %{buildroot}%{_sysconfdir}/logrotate.d + install -m 755 -d %{buildroot}%{_sysconfdir}/patching + install -m 700 -d %{buildroot}%{_sysconfdir}/patching/patch-scripts + install -m 755 -d %{buildroot}%{_sysconfdir}/pmon.d + install -m 755 -d %{buildroot}%{_unitdir} + + + install -m 500 ${RPM_BUILD_DIR}/bin/sw-patch-agent \ + %{buildroot}%{_sbindir}/sw-patch-agent + install -m 500 ${RPM_BUILD_DIR}/bin/sw-patch-controller-daemon \ + %{buildroot}%{_sbindir}/sw-patch-controller-daemon + install -m 555 ${RPM_BUILD_DIR}/bin/sw-patch \ + %{buildroot}%{_sbindir}/sw-patch + + install -m 555 ${RPM_BUILD_DIR}/bin/rpm-audit \ + %{buildroot}%{_sbindir}/rpm-audit + + install -m 500 ${RPM_BUILD_DIR}/bin/sw-patch-controller-daemon-init.sh \ + %{buildroot}%{_sysconfdir}/init.d/sw-patch-controller-daemon + install -m 500 ${RPM_BUILD_DIR}/bin/sw-patch-agent-init.sh \ + %{buildroot}%{_sysconfdir}/init.d/sw-patch-agent + + install -m 600 ${RPM_BUILD_DIR}/bin/patching.conf \ + %{buildroot}%{_sysconfdir}/patching/patching.conf + install -m 644 ${RPM_BUILD_DIR}/bin/policy.json \ + %{buildroot}%{_sysconfdir}/patching/policy.json + + install -m 444 ${RPM_BUILD_DIR}/bin/pmon-sw-patch-controller-daemon.conf \ + %{buildroot}%{_sysconfdir}/pmon.d/sw-patch-controller-daemon.conf + install -m 444 ${RPM_BUILD_DIR}/bin/pmon-sw-patch-agent.conf \ + %{buildroot}%{_sysconfdir}/pmon.d/sw-patch-agent.conf + + install -m 444 ${RPM_BUILD_DIR}/bin/*.service %{buildroot}%{_unitdir} + + install -m 444 ${RPM_BUILD_DIR}/bin/sw-patch.completion %{buildroot}%{_sysconfdir}/bash_completion.d/sw-patch + + install -m 400 ${RPM_BUILD_DIR}/bin/patch-functions \ + %{buildroot}%{_sysconfdir}/patching/patch-functions + + install -D -m 444 ${RPM_BUILD_DIR}/bin/patch-tmpdirs.conf \ + %{buildroot}%{_tmpfilesdir}/patch-tmpdirs.conf + install -m 500 ${RPM_BUILD_DIR}/bin/run-patch-scripts \ + %{buildroot}%{_sbindir}/run-patch-scripts + + install -m 500 ${RPM_BUILD_DIR}/bin/sw-patch-controller-daemon-restart \ + %{buildroot}%{_sbindir}/sw-patch-controller-daemon-restart + install -m 500 ${RPM_BUILD_DIR}/bin/sw-patch-agent-restart \ + %{buildroot}%{_sbindir}/sw-patch-agent-restart + + install -m 500 ${RPM_BUILD_DIR}/bin/sw-patch-init.sh \ + %{buildroot}%{_sysconfdir}/init.d/sw-patch + install -m 500 ${RPM_BUILD_DIR}/bin/sw-patch-controller-init.sh \ + %{buildroot}%{_sysconfdir}/init.d/sw-patch-controller + + install -m 555 ${RPM_BUILD_DIR}/bin/patch_check_goenabled.sh \ + %{buildroot}%{_sysconfdir}/goenabled.d/patch_check_goenabled.sh + + install -m 444 ${RPM_BUILD_DIR}/bin/patching.logrotate \ + %{buildroot}%{_sysconfdir}/logrotate.d/patching + + install -m 500 ${RPM_BUILD_DIR}/bin/upgrade-start-pkg-extract \ + %{buildroot}%{_sbindir}/upgrade-start-pkg-extract + +%clean +rm -rf $RPM_BUILD_ROOT + +%package -n cgcs-patch-controller +Summary: TIS Platform Patching +Group: base +Requires: /usr/bin/env +Requires: /bin/sh +Requires: requests-toolbelt +Requires: createrepo +Requires(post): /usr/bin/env +Requires(post): /bin/sh + +%description -n cgcs-patch-controller +TIS Platform Patching + +%post -n cgcs-patch-controller +/usr/bin/systemctl enable sw-patch-controller.service +/usr/bin/systemctl enable sw-patch-controller-daemon.service + + +%package -n cgcs-patch-agent +Summary: TIS Platform Patching +Group: base +Requires: /usr/bin/env +Requires: /bin/sh +Requires(post): /usr/bin/env +Requires(post): /bin/sh + +%description -n cgcs-patch-agent +TIS Platform Patching + +%post -n cgcs-patch-agent +/usr/bin/systemctl enable sw-patch-agent.service + +%post +/usr/bin/systemctl enable sw-patch.service + +%files +%license ../LICENSE +%defattr(-,root,root,-) +%{pythonroot}/cgcs_patch +%{pythonroot}/cgcs_patch-*.egg-info +%{_sbindir}/rpm-audit +%config(noreplace) %{_sysconfdir}/patching/policy.json +%config(noreplace) %{_sysconfdir}/patching/patching.conf +%dir %{_sysconfdir}/patching/patch-scripts +%{_sysconfdir}/patching/patch-functions +%{_tmpfilesdir}/patch-tmpdirs.conf +%{_sbindir}/run-patch-scripts +%{_sysconfdir}/init.d/sw-patch +%{_unitdir}/sw-patch.service +%{_sysconfdir}/goenabled.d/patch_check_goenabled.sh +%{_sysconfdir}/logrotate.d/patching + +%files -n cgcs-patch-controller +%defattr(-,root,root,-) +%{_sbindir}/sw-patch +%{_sbindir}/sw-patch-controller-daemon +%{_sbindir}/sw-patch-controller-daemon-restart +%{_sbindir}/upgrade-start-pkg-extract +%{_sysconfdir}/pmon.d/sw-patch-controller-daemon.conf +%{_sysconfdir}/init.d/sw-patch-controller-daemon +%{_unitdir}/sw-patch-controller-daemon.service +%{_sysconfdir}/bash_completion.d/sw-patch +%{_sysconfdir}/init.d/sw-patch-controller +%{_unitdir}/sw-patch-controller.service + +%files -n cgcs-patch-agent +%defattr(-,root,root,-) +%{_sbindir}/sw-patch-agent +%{_sbindir}/sw-patch-agent-restart +%{_sysconfdir}/pmon.d/sw-patch-agent.conf +%{_sysconfdir}/init.d/sw-patch-agent +%{_unitdir}/sw-patch-agent.service + diff --git a/cgcs-patch/cgcs-patch/LICENSE b/cgcs-patch/cgcs-patch/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/cgcs-patch/cgcs-patch/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cgcs-patch/cgcs-patch/cgcs_make_patch/__init__.py b/cgcs-patch/cgcs-patch/cgcs_make_patch/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cgcs-patch/cgcs-patch/cgcs_make_patch/make_patch_functions.py b/cgcs-patch/cgcs-patch/cgcs_make_patch/make_patch_functions.py new file mode 100644 index 00000000..76b389c9 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_make_patch/make_patch_functions.py @@ -0,0 +1,1988 @@ +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import sys +import os +import shutil +import tempfile +import platform +import collections +import logging +import fnmatch +import getopt +import subprocess +import time +import re +from cgcs_patch.patch_functions import PatchFile +# import twisted.python.lockfile + +import xml.etree.ElementTree as ElementTree +from xml.dom import minidom + +STATUS_OBSOLETE = 'OBS' +STATUS_RELEASED = 'REL' +STATUS_DEVELOPEMENT = 'DEV' + +RPM_DIR = "rpmbuild/RPMS" +RPM_ARCHIVE_DIR = "rpm_archive/RPMS" +REMOTE_COPY_PATCH_DATA_DIR = "export/remote_patch_data" +PATCH_DATA_GIT = "cgcs-patches" +LOCAL_PATCH_DATA_DIR = "export/patch_data" +ORDER_FILE = "patch_order" +ARCH_DEFAULT = "x86_64" + +METADATA_TAGS = [ 'ID', 'SW_VERSION', 'SUMMARY', 'DESCRIPTION', + 'INSTALL_INSTRUCTIONS', 'WARNINGS', 'STATUS', + 'UNREMOVABLE', 'REBOOT_REQUIRED' ] +RMP_EXCLUDES = [ '-dev-', '-dbg-', '-doc-' ] +BUILD_TYPES = [ 'std', 'rt' ] + + +SAME = 0 +MINOR_DIFF = 1 +MAJOR_DIFF = 2 + +# These from environment +MY_REPO = None +MY_WORKSPACE = None +PROJECT = None +SRC_BUILD_ENVIRONMENT = None +MY_SRC_RPM_BUILD_DIR = None +MY_BUILD_CFG = None +MY_BUILD_DIR = None + +WORKDIR_BUILD_INFO_LOCATION = "build.info" +SRCDIR_UNBUILT_PATTERN_FILE = "build-data/unbuilt_rpm_patterns" +SRCDIR_IMAGE_INC_FILE = "build-tools/build_iso/image.inc" + +build_info = {} + +temp_rpm_db_dir = None +workdir = None +srcdir = None +branch = None +sw_version = None +formal_flag = False +pre_compiled_flag = False +pre_clean_flag = False +all_flag = False +capture_source_flag = False +capture_rpms_flag = False + +capture_source_path = None + +logfile = "/var/log/patching.log" + +LOG = logging.getLogger(__name__) + + +def configure_logging(logtofile=True, level=logging.DEBUG): + if logtofile: + my_exec = os.path.basename(sys.argv[0]) + + log_format = '%(asctime)s: ' \ + + my_exec + '[%(process)s]: ' \ + + '%(filename)s(%(lineno)s): ' \ + + '%(levelname)s: %(message)s' + + logging.basicConfig(filename=logfile, level=level, format=log_format, datefmt="%FT%T") + + # Log uncaught exceptions to file + sys.excepthook = handle_exception + else: + logging.basicConfig(level=level) + +def rev_lt(num1, num2): + n1w=num1.split('.') + n2w=num2.split('.') + while True: + try: + n1=int(n1w.pop(0)) + except: + return True + try: + n2=int(n2w.pop(0)) + except: + return False + if n1n2: + return False + + +def add_text_tag_to_xml(parent, name, text): + """ + Utility function for adding a text tag to an XML object + :param parent: Parent element + :param name: Element name + :param text: Text value + :return:The created element + """ + tag = ElementTree.SubElement(parent, name) + tag.text = text + return tag + +def handle_exception(exc_type, exc_value, exc_traceback): + """ + Exception handler to log any uncaught exceptions + """ + LOG.error("Uncaught exception", + exc_info=(exc_type, exc_value, exc_traceback)) + sys.__excepthook__(exc_type, exc_value, exc_traceback) + +def write_xml_file(top, fname): + # Generate the file, in a readable format if possible + outfile = open(fname, 'w') + rough_xml = ElementTree.tostring(top, 'utf-8') + if platform.python_version() == "2.7.2": + # The 2.7.2 toprettyxml() function unnecessarily indents + # childless tags, adding whitespace. In the case of the + # yum comps.xml file, it makes the file unusable, so just + # write the rough xml + outfile.write(rough_xml) + else: + outfile.write(minidom.parseString(rough_xml).toprettyxml(indent=" ")) + +class PatchRecipeError(Exception): + """Base class for patch recipe exceptions.""" + + def __init__(self, message=None): + self.message = message + + def __str__(self): + return self.message or "" + +class PatchRecipeXMLFail(PatchRecipeError): + """Problem parsing XML of patch recipe.""" + pass + +class PatchBuildFail(PatchRecipeError): + """Problem Compiling the patch.""" + pass + +class PatchPackagingFail(PatchRecipeError): + """Problem assembling the patch.""" + pass + +class PatchPackagingMiss(PatchRecipeError): + """Problem assembling the patch - might be correctable.""" + pass + +class PatchRequirementFail(PatchRecipeError): + """Missing Requirement.""" + pass + +class PatchRecipeCmdFail(PatchRecipeError): + """Shell command Failure.""" + pass + + +class PatchList: + """ + Patch List + """ + def __init__(self, patch_xml_list): + self.data_path = "%s/%s" % (workdir, LOCAL_PATCH_DATA_DIR) + self.remote_copy_data_path = "%s/%s" % (workdir, REMOTE_COPY_PATCH_DATA_DIR) + self.order_file = "%s" % ORDER_FILE + self.patch_git = "%s-%s" % (PATCH_DATA_GIT, sw_version) + self.patch_data = {} # map patch name to PatchRecipeData + self.xml_to_patch = {} # map xml path to patch name + self.patch_to_xml = {} # map patch name to xml + self.patches_to_build = [] # list of patches to build + self.patches_built = [] # patches already built + self.patches_to_deliver = [] + + self._prep_workspace() + self._obtain_official_patches() + self._validate_patch_order() + self._load_built_patches() + self._load_official_patches() + if patch_xml_list is not None: + for patch_xml in patch_xml_list: + self.add(patch_xml, built=False, rebuild=True, require_context=False) + + def __str__(self): + return "[ data_path: %s, order_file: %s, patches_built: %s, patches_to_build: %s, xml_to_patch: %s, patch_to_xml: %s ]" % (str(self.data_path), str(self.order_file), str(self.patches_built), str(self.patches_to_build), str(self.xml_to_patch), str(self.patch_to_xml)) + + def myprint(self, indent=""): + print "%s%s" % (indent, str(self)) + + def _std_xml_patch_recipe_name(self, patch_id): + xml_name = "%s.xml" % patch_id + return xml_name + + def _std_local_path(self, name): + xml_path = "%s/%s" % (self.data_path, name) + return xml_path + + def _std_remote_copy_path(self, name): + xml_path = "%s/%s" % (self.remote_copy_data_path, name) + return xml_path + + def _std_patch_git_path(self, name=None): + git_path = "%s/%s/%s/%s" % (self.remote_copy_data_path, self.patch_git, sw_version, name) + return git_path + + def _prep_workspace(self): + os.chdir(workdir) + issue_cmd("mkdir -p %s" % self._std_local_path("")) + issue_cmd("touch %s" % self._std_local_path(self.order_file)) + + def find_patch_id(self, patch_id): + for patch in self.patches_built: + if patch == patch_id: + return self.patch_data[patch] + for patch in self.patches_to_build: + if patch == patch_id: + return self.patch_data[patch] + return None + + def _validate_patch_order(self): + fix_local_order = False + remote_order = [] + local_order = [] + validated_order = [] + with open(self._std_patch_git_path(self.order_file)) as f: + for line in f: + remote_order.append(line.strip()) + with open(self._std_local_path(self.order_file)) as f: + for line in f: + local_order.append(line.strip()) + while len(remote_order) and len(local_order): + remote_patch = remote_order.pop(0) + local_patch = local_order.pop(0) + if remote_patch == local_patch: + print "_validate_patch_order: %s ok" % local_patch + validated_order.append(remote_patch) + else: + fix_local_order = True + print "_validate_patch_order: %s vs %s fail" % (local_patch, remote_patch) + local_order.insert(0, local_patch) + break + if fix_local_order: + print "_validate_patch_order: fix patch order" + f = open(self._std_local_path(self.order_file),'w') + for patch_id in validated_order: + f.write("%s\n" % patch_id) + print "_validate_patch_order: %s" % patch_id + f.close() + + # remove remaining local patches + for patch_id in local_order: + xml_path = self._std_local_path(self._std_xml_patch_recipe_name(patch_id)) + print "_validate_patch_order: rm %s" % xml_path + os.remove(xml_path) + + def _obtain_official_patches(self): + os.chdir(workdir) + issue_cmd("mkdir -p %s" % self._std_remote_copy_path("")) + os.chdir(self._std_remote_copy_path("")) + + if not os.path.isdir(self.patch_git): + issue_cmd("git clone ssh://%s@vxgit.wrs.com:7999/cgcs/%s.git" % (os.environ['USER'], self.patch_git)) + os.chdir(self.patch_git) + issue_cmd("git checkout master") + else: + os.chdir(self.patch_git) + issue_cmd("git checkout master") + issue_cmd("git pull") + + try: + issue_cmd("git checkout %s" % sw_version) + except PatchRecipeCmdFail: + issue_cmd("git checkout -b %s master" % sw_version) + issue_cmd("git push -u origin %s:%s" % (sw_version, sw_version)) + issue_cmd("git checkout master") + issue_cmd("git pull") + issue_cmd("git checkout %s" % sw_version) + + issue_cmd("git pull") + + os.chdir(workdir) + if not os.path.isdir(self._std_patch_git_path("")): + issue_cmd("mkdir -p %s" % self._std_patch_git_path("")) + os.chdir(self._std_patch_git_path("..")) + issue_cmd("git add %s" % self._std_patch_git_path("")) + os.chdir(workdir) + if not os.path.isfile(self._std_patch_git_path(self.order_file)): + issue_cmd("touch %s" % self._std_patch_git_path(self.order_file)) + os.chdir(self._std_patch_git_path("..")) + issue_cmd("git add %s" % self._std_patch_git_path(self.order_file)) + os.chdir(workdir) + + def _load_official_patches(self): + with open(self._std_patch_git_path(self.order_file)) as f: + for line in f: + patch_id = line.strip() + print "remote patch_id = '%s'" % patch_id + xml_path = self._std_patch_git_path(self._std_xml_patch_recipe_name(patch_id)) + self.add(xml_path, built=False, fix=True) + + def sign_official_patches(self): + for patch_id in self.patches_to_deliver: + os.chdir(workdir) + patch = "%s.patch" % patch_id + print "signing patch '%s'" % self._std_local_path(patch) + + try: + subprocess.check_call(["sign_patch_formal.sh", self._std_local_path(patch)]) + except subprocess.CalledProcessError as e: + print "Failed to to sign official patch. Call to sign_patch_formal.sh process returned non-zero exit status %i" % e.returncode + raise SystemExit(e.returncode) + + def deliver_official_patch(self): + something_to_push = False + os.chdir(workdir) + issue_cmd("cp %s %s" % (self._std_local_path(self.order_file), self._std_patch_git_path(self.order_file))) + os.chdir(self._std_patch_git_path(".")) + issue_cmd("git add %s" % self.order_file) + + for patch_id in self.patches_to_deliver: + prevent_overwrite = False + os.chdir(workdir) + patch = "%s.patch" % patch_id + xml = "%s.xml" % patch_id + if os.path.isfile(self._std_patch_git_path(patch)): + answer = PatchFile.query_patch(self._std_patch_git_path(patch), field="status") + if answer is not None and "status" in answer: + if answer["status"] == "REL": + prevent_overwrite = True + print "Warning: '%s' already exists in git repo and is in released state! Cowardly refusing to overwrite it." % patch + + if not prevent_overwrite: + issue_cmd("cp %s %s" % (self._std_local_path(patch), self._std_patch_git_path("."))) + issue_cmd("cp %s %s" % (self._std_local_path(xml), self._std_patch_git_path("."))) + os.chdir(self._std_patch_git_path(".")) + issue_cmd("git add %s" % patch) + issue_cmd("git add %s" % xml) + issue_cmd("git commit -m \"%s\"" % patch_id) + something_to_push = True + + if something_to_push: + os.chdir(workdir) + os.chdir(self._std_patch_git_path("..")) + issue_cmd("git push --dry-run --set-upstream origin %s:%s" % (sw_version, sw_version)) + issue_cmd("git push --set-upstream origin %s:%s" % (sw_version, sw_version)) + + def _load_built_patches(self): + with open(self._std_local_path(self.order_file)) as f: + for line in f: + patch_id = line.strip() + print "local patch_id = '%s'" % patch_id + xml_path = self._std_local_path(self._std_xml_patch_recipe_name(patch_id)) + self.add(xml_path, built=True, fix=False) + + + def get_implicit_requires(self, patch_id, recipies): + list = [] + for r in recipies: + print "get_implicit_requires r=%s" % r + for patch in self.patches_built: + if patch == patch_id: + continue + if self.patch_data[patch].has_common_recipies(recipies): + print "get_implicit_requires built patch '%s' provides one of %s" % (patch, str(recipies)) + list.append(patch) + for patch in self.patches_to_build: + if patch == patch_id: + continue + if self.patch_data[patch].has_common_recipies(recipies): + print "get_implicit_requires unbuilt patch '%s' provides one of %s" % (patch, str(recipies)) + list.append(patch) + return list + + def is_built(self, patch): + if patch not in self.patches_built: + print "Queried patch '%s' is not built" % patch + return False + return True + + def is_known(self, patch): + if patch not in self.patches_built: + if patch not in self.patches_to_build: + print "Queried patch '%s' is not known" % patch + return False + return True + + def add(self, patch_xml, built=False, fix=False, rebuild=False, require_context=True): + print "processing patch_xml %s, built=%s, fix=%s, rebuild=%s, require_context=%s" % (patch_xml, str(built), str(fix), str(rebuild), str(require_context)) + prd = PatchRecipeData(built, self) + prd.parse_xml(patch_xml) + if prd.patch_id is None: + msg = "Invalid patch '%s' patch_xml contains no patch_id" % patch_xml + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + if len(prd.recipies) <= 0: + msg = "Invalid patch '%s' contains no recipies" % prd.patch_id + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + if require_context and prd.build_context is None: + msg = "Invalid patch '%s' contains no context" % prd.patch_id + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + if not rebuild: + if prd.patch_id in self.patch_to_xml: + if self.patch_to_xml[prd.patch_id] == patch_xml: + msg = "Previously added patch '%s' from same xml '%s'" % (prd.patch_id, patch_xml) + LOG.warn(msg) + print "%s\n" % msg + return + rc = issue_cmd_rc("diff %s %s" % (self.patch_to_xml[prd.patch_id], patch_xml)) + if rc != 0: + msg = "Previously added patch '%s' from different xml '%s' and different content" % (prd.patch_id, patch_xml) + LOG.exception(msg) + print "%s\n" % msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + else: + msg = "Previously added patch '%s' from different xml '%s' but same content" % (prd.patch_id, patch_xml) + LOG.warn(msg) + print "%s\n" % msg + return + if prd.patch_id in self.patch_data.keys(): + if not rebuild: + # Already know about this patch, perhaps local vs remote + rc2 = prd.compare(self.patch_data[prd.patch_id]) + if (fix and (rc2 > MAJOR_DIFF)) or (not fix and (rc2 > MINOR_DIFF)): + msg = "Patch '%s' added twice with differing content" + LOG.exception(msg) + print msg + raise PatchRequirementFail(msg) + sys.exit(2) + if fix and (rc2 > MINOR_DIFF): + new_status = self.get_status() + old_status = prd.get_status() + # TODO should we update status + prd.set_status(new_status) + rc2 = prd.compare(self.patch_data[prd.patch_id]) + if rc2 > MINOR_DIFF: + msg = "Failed to resolve patch difference by status update for patch '%s'" % prd.patch_id + LOG.exception(msg) + print msg + raise PatchRequirementFail(msg) + sys.exit(2) + # TODO write revised xml to local/remote ? + # patch is already knwon and has same content + # nothing more to do since rebuild is not requested + return + + self.patch_to_xml[prd.patch_id] = patch_xml + self.xml_to_patch[patch_xml] = prd.patch_id + self.patch_data[prd.patch_id] = prd + + prd.set_implicit_requires(self) + + rc = prd.check_requires_known(self) + if not rc: + msg = "Can't proceed because patch %s has requirements on an unknown patch." % prd.patch_id + LOG.exception(msg) + print msg + raise PatchRequirementFail(msg) + sys.exit(2) + rc = prd.check_requires_built(self) + if built and not rc: + msg = "Patch %s claims to be built yet it requires a patch that is unbuilt." + LOG.exception(msg) + print msg + raise PatchRequirementFail(msg) + sys.exit(2) + + rc = prd.check_requires_buildable(self) + if not rc: + msg = "Can't proceed because patch %s has requirements on a patch that lacks a build context." % prd.patch_id + LOG.exception(msg) + print msg + raise PatchRequirementFail(msg) + sys.exit(2) + + if built: + self.patches_built.append(prd.patch_id) + else: + self.patches_to_build.append(prd.patch_id) + + prd.gen_xml(fname=self._std_local_path(self._std_xml_patch_recipe_name(prd.patch_id))) + + + def build_patches(self): + global capture_source_flag + # While unbuild patches exist + while len(self.patches_to_build) > 0: + built = 0 + # Search for a buildable patch, i.e. one for whom all requirements are built + for patch_id in self.patches_to_build: + prd = self.patch_data[patch_id] + rc = prd.check_requires_built(self) + print "check_requires_built(%s) -> %s" % (patch_id, str(rc)) + if rc: + # This patch is ready to build, build it now + print "Ready to build patch %s." % patch_id + rc = prd.build_patch() + if rc: + # append new built patch to order file + issue_cmd("sed -i '/^%s$/d' %s" % (patch_id, self._std_local_path(self.order_file))) + issue_cmd("echo %s >> %s" % (patch_id, self._std_local_path(self.order_file))) + print "Built patch %s." % patch_id + self.patches_built.append(patch_id) + self.patches_to_deliver.append(patch_id) + self.patches_to_build.remove(patch_id) + built += 1 + + if capture_rpms_flag: + capture_rpms() + + if capture_source_flag: + prd.capture_source() + + # It is important to break here. + # We just edited the patches_to_build which an enclosing for loop is iterating over. + # without the break, the result is skipping patches and/or building patches out of order. + break + else: + msg = "Failed to build patch %s" % patch_id + LOG.exception(msg) + print msg + raise PatchBuildFail(msg) + sys.exit(2) + if built == 0: + msg = "No patches are buildable, Remaining patches: %s" % str(self.patches_to_build) + LOG.exception(msg) + print msg + raise PatchBuildFail(msg) + sys.exit(2) + print "All patches built." + + +class PackageData: + """ + Package data + """ + def __init__(self, e): + self.name = None + self.personalities = [] + self.architectures = [] + self._parse_package(e) + + def __str__(self): + return "[ name: %s, personalities: %s, architectures: %s ]" % (str(self.name), str(self.personalities), str(self.architectures)) + + def myprint(self, indent=""): + print "%s%s" % (indent, str(self)) + + def compare(self, package): + rc = SAME + if self.name != package.name: + return MAJOR_DIFF + if len(self.personalities) != len(package.personalities): + return MAJOR_DIFF + if len(self.architectures) != len(package.architectures): + return MAJOR_DIFF + for personality in self.personalities: + if personality not in package.personalities: + return MAJOR_DIFF + for arch in self.architectures: + if arch not in package.architectures: + return MAJOR_DIFF + return rc + + def _parse_package(self, e): + for key in e.attrib: + val = e.attrib[key] + # DBG print "_parse_package attr %s" % key + if key == "name": + self.name = val + else: + msg = "Unknow attribute '%s' in " % key + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + + for child in e: + # DBG print "_parse_package child %s" % child.tag + if child.tag == "PERSONALITY": + txt = child.text and child.text.strip() or None + if txt is None: + msg = "personality missing under " + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + self.personalities.append(txt) + elif child.tag == "ARCH": + txt = child.text and child.text.strip() or None + if txt is None: + msg = "personality missing under " + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + self.architectures.append(txt) + else: + msg = "Unknow tag '%s' under " % child.tag + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + + + def gen_xml(self, e_package): + for personality in self.personalities: + add_text_tag_to_xml(e_package, 'PERSONALITY', personality) + for arch in self.architectures: + add_text_tag_to_xml(e_package, 'ARCH', arch) + + def _get_rpm_dir(self, build_type='std', arch=ARCH_DEFAULT, prebuilt=False): + if prebuilt: + if build_type == 'std': + rpm_dir = "%s/%s/repo/cgcs-centos-repo/Binary/%s" % (workdir, build_type, arch) + else: + # Any directory with no rpm's would do + rpm_dir = "%s/%s/repo/cgcs-centos-repo/Data" % (workdir, build_type) + else: + rpm_dir = "%s/%s/%s" % (workdir, build_type, RPM_DIR) + print "================= rpm_dir=%s ============" % rpm_dir + return rpm_dir + + def _clean_rpms(self, prebuilt=False): + global BUILD_TYPES + + print "cleaning self.name %s\n" % self.name + for build_type in BUILD_TYPES: + for arch in self.architectures: + rpm_dir = self._get_rpm_dir(build_type=build_type, arch=arch, prebuilt=prebuilt) + rpm_search_pattern = "%s-*%s.rpm" % (self.name, arch) + print "cleaning arch %s\n" % arch + print "cleaning dir %s\n" % rpm_dir + print "cleaning rpm_search_pattern %s\n" % rpm_search_pattern + for file in os.listdir(rpm_dir): + if fnmatch.fnmatch(file, rpm_search_pattern): + file_path = "%s/%s" % (rpm_dir, file) + if os.path.isfile(file_path): + print "cleaning match %s\n" % file + rpm_name_cmd = [ "rpm", "-qp", "--dbpath", temp_rpm_db_dir, "--queryformat", "%{NAME}", "%s" % file_path ] + rpm_name = issue_cmd_w_stdout(rpm_name_cmd) + if rpm_name == self.name: + rpm_release_cmd = [ "rpm", "-qp", "--dbpath", temp_rpm_db_dir, "--queryformat", "%{RELEASE}", "%s" % file_path ] + rpm_release = issue_cmd_w_stdout(rpm_release_cmd) + print "cleaning release %s" % rpm_release + rm_cmd = "rm -f %s/%s-*-%s.%s.rpm" % (rpm_dir, self.name, rpm_release, arch) + issue_cmd(rm_cmd) + + def clean(self, prebuilt=False): + print "package clean" + self._clean_rpms(prebuilt=prebuilt) + + def _add_rpms(self, pf, arch=ARCH_DEFAULT, fatal=True, prebuilt=False): + global BUILD_TYPES + + added = 0 + for build_type in BUILD_TYPES: + rpm_dir = self._get_rpm_dir(build_type=build_type, arch=arch, prebuilt=prebuilt) + rpm_search_pattern = "%s*%s.rpm" % (self.name, arch) + for file in os.listdir(rpm_dir): + if fnmatch.fnmatch(file, rpm_search_pattern): + reject = False + with open("%s/%s" % (srcdir, SRCDIR_UNBUILT_PATTERN_FILE)) as myfile: + for line in myfile: + line = line.strip() + if line.startswith('#'): + continue + if len(line) == 0: + continue + exclude = line + exclude_search_pattern = "" + if exclude[0] == '^': + if exclude[-1] == '$': + exclude_search_pattern = "%s" % (exclude[1:-1]) + else: + exclude_search_pattern = "%s*" % (exclude[1:]) + else: + if exclude[-1] == '$': + exclude_search_pattern = "*%s" % (exclude[:-1]) + else: + exclude_search_pattern = "*%s*" % (exclude) + if fnmatch.fnmatch(file, exclude_search_pattern): + print "reject file '%s' due to pattern '%s' -> '%s'" % (file, exclude, exclude_search_pattern) + reject = True + break + if reject: + with open("%s/%s" % (srcdir, SRCDIR_IMAGE_INC_FILE)) as myfile: + for line in myfile: + line = line.strip() + if line.startswith('#'): + continue + if len(line) == 0: + continue + include_search_pattern = "%s-[0-9]*.rpm" % (line) + if fnmatch.fnmatch(file, include_search_pattern): + print "Including file '%s' due to match in IMAGE_INC_FILE '%s'" % (file, SRCDIR_IMAGE_INC_FILE) + reject = False + break + + # for exclude in RMP_EXCLUDES: + # exclude_search_pattern = "%s%s*.rpm" % (self.name, exclude) + # if fnmatch.fnmatch(file, exclude_search_pattern): + # print "reject file '%s' due to pattern '%s'" % (file, exclude) + # reject = True + # break + + if not reject: + rpm_name_cmd = [ "rpm", "-qp", "--dbpath", temp_rpm_db_dir, "--queryformat", "%{NAME}", "%s/%s" % (rpm_dir, file) ] + rpm_name = issue_cmd_w_stdout(rpm_name_cmd) + if rpm_name != self.name: + print "reject file '%s' due to rpm_name '%s'" % (file, rpm_name) + reject = True + if reject: + # proceed to next matching file + continue + print "accept file '%s'" % file + rpm_path = "%s/%s" % (rpm_dir, file) + if len(self.personalities) > 0: + print "pf.add_rpm(%s, personality=%s)" % (rpm_path, str(self.personalities)) + pf.add_rpm(rpm_path, personality=self.personalities) + added += 1 + else: + print "pf.add_rpm(%s)" % (rpm_path) + pf.add_rpm(rpm_path) + added += 1 + if added == 0: + if fatal: + msg = "No rpms found matching %s/%s" % (rpm_dir, rpm_search_pattern) + LOG.exception(msg) + print msg + raise PatchPackagingFail(msg) + sys.exit(2) + msg = "No rpms found matching %s/%s" % (rpm_dir, rpm_search_pattern) + print msg + raise PatchPackagingMiss(msg) + + def build_patch(self, pf, fatal=True, prebuilt=False): + if len(self.architectures) > 0: + for arch in self.architectures: + self._add_rpms(pf, arch=arch, fatal=fatal, prebuilt=prebuilt) + else: + self._add_rpms(pf, fatal=fatal, prebuilt=prebuilt) + + def check_release(self, recipe_name, release_map, prev_release_map): + if self.name in release_map.keys(): + if self.name in prev_release_map.keys(): + if not rev_lt(prev_release_map[self.name], release_map[self.name]): + msg = "Failed to upversion rpm %s in recipe %s: old release %s, new release %s" % (self.name, recipe_name, prev_release_map[self.name], release_map[self.name]) + LOG.exception(msg) + print msg + raise PatchPackagingFail(msg) + sys.exit(2) + +class RecipeData: + """ + Recipe data + """ + def __init__(self, e): + self.name = None + self.prebuilt = False + self.packages = collections.OrderedDict() # map package name to PackageData + self._parse_recipe(e) + + def __str__(self): + return "name: %s, packages: %s" % (self.name, str(self.packages.keys())) + + def myprint(self, indent=""): + print "%sname: %s" % (indent, self.name) + for key in self.packages: + self.packages[key].myprint("%s " % indent) + + def compare(self, recipe): + rc = SAME + if self.name != recipe.name: + return MAJOR_DIFF + if len(self.packages) != len(recipe.packages): + return MAJOR_DIFF + if self.prebuilt != recipe.prebuilt: + return MAJOR_DIFF + for key in self.packages.keys(): + if key not in recipe.packages.keys(): + return MAJOR_DIFF + rc2 = self.packages[key].compare(recipe.packages[key]) + if rc2 >= MAJOR_DIFF: + return MAJOR_DIFF + if rc2 >= rc: + rc = rc2 + return rc + + def in_list(self, recipies): + for recipe in recipies: + if self.name == recipe.name: + return True + return False + + def _parse_recipe(self, e): + for key in e.attrib: + val = e.attrib[key] + # DBG print "_parse_recipe attr %s" % key + if key == "name": + self.name = val + else: + msg = "Unknow attribute '%s' in " % key + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + + for child in e: + # DBG print "_parse_recipe child %s" % child.tag + if child.tag == "PACKAGE": + p = PackageData(child) + self.packages[p.name] = p + elif child.tag == "PREBUILT": + self.prebuilt = True + print "=========== set prebuilt=%s for %s =============" % (self.prebuilt, self.name) + else: + msg = "Unknow tag '%s' under " % child.tag + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + + def gen_xml(self, e_recipe): + if self.prebuilt: + ElementTree.SubElement(e_recipe, 'PREBUILT') + + for package in self.packages.keys(): + e_package = ElementTree.SubElement(e_recipe, 'PACKAGE', attrib={'name': package}) + self.packages[package].gen_xml(e_package) + + def clean(self): + print "recipe clean" + if not self.prebuilt: + for package in self.packages: + self.packages[package].clean(prebuilt=self.prebuilt) + + def capture_source(self): + self.name + my_repo = None + path = capture_source_path + extra_arg = "" + + if 'MY_REPO' in os.environ.keys(): + my_repo = os.environ['MY_REPO'] + + if 'MY_PATCH_REPO' in os.environ.keys(): + my_repo = os.environ['MY_PATCH_REPO'] + + if my_repo is not None: + altpath = "%s/addons/wr-cgcs/layers/cgcs/extras.ND/scripts/source_collect_package" % my_repo + if os.path.isfile(altpath): + path = altpath + + if self.prebuilt: + extra_arg = "--prebuilt" + + if os.path.isfile(path): + rc = issue_cmd_rc("%s %s %s >> %s/%s.log" % (path, self.name, extra_arg, os.environ['DEST'], os.environ['PREFIX'])) + + def build_patch(self, pf, fatal=True): + for package in self.packages: + self.packages[package].build_patch(pf, fatal=fatal, prebuilt=self.prebuilt) + + def check_release(self, release_map, prev_release_map): + for package in self.packages: + self.packages[package].check_release(self.name, release_map, prev_release_map) + + def is_prebuilt(self): + print "=========== is_prebuilt prebuilt=%s for %s =============" % (self.prebuilt, self.name) + return self.prebuilt + +class PatchRecipeData: + """ + Patch recipe data + """ + def __init__(self, built=False, pl=None): + self.patch_id = None + self.sw_version = None + self.built = built + self.build_context = None + self.metadata = collections.OrderedDict() + self.requires = [] + self.auto_requires = [] + self.recipies = collections.OrderedDict() # recipe name to RecipeData + self.pl = pl + + def compare(self, prd): + rc = SAME + if self.patch_id != prd.patch_id: + return MAJOR_DIFF + if self.built != prd.built: + rc = MINOR_DIFF + if len(self.metadata) != len(prd.metadata): + return MAJOR_DIFF + if len(self.requires) != len(prd.requires): + return MAJOR_DIFF + if len(self.recipies) != len(prd.recipies): + return MAJOR_DIFF + for require in self.requires: + if require not in prd.requires: + return MAJOR_DIFF + for item in self.metadata.keys(): + if item not in prd.metadata.keys(): + return MAJOR_DIFF + if self.metadata[item] != prd.metadata[item]: + if item == "STATUS": + rc = MINOR_DIFF + else: + return MAJOR_DIFF + for recipe in self.recipies.keys(): + if recipe not in prd.recipies.keys(): + return MAJOR_DIFF + rc2 = self.recipies[recipe].compare(prd.recipies[recipe]) + if rc2 >= MAJOR_DIFF: + return MAJOR_DIFF + if rc2 >= rc: + rc = rc2 + return rc + + + def set_implicit_requires(self, patch_list): + self.auto_requires = patch_list.get_implicit_requires(self.patch_id, self.recipies.keys()) + + def get_build_context(self): + return self.build_context + + def check_requires_known(self, patch_list): + rc = True + for patch in self.requires: + if not patch_list.is_known(patch): + print "patch '%s' is missing required patch '%s'" % (self.patch_id, patch) + rc = False + for patch in self.auto_requires: + if not patch_list.is_known(patch): + print "patch '%s' is missing implicitly required patch '%s'" % (self.patch_id, patch) + rc = False + return rc + + def check_requires_buildable(self, patch_list): + rc = True + for patch in self.requires: + if not patch_list.is_built(patch): + ctx = patch_list.patch_data[patch].get_build_context() + if ctx is None: + print "patch '%s' requires patch '%s' to be built first, but lack a context to do so" % (self.patch_id, patch) + rc = False + for patch in self.auto_requires: + if not patch_list.is_built(patch): + ctx = patch_list.patch_data[patch].get_build_context() + if ctx is None: + print "patch '%s' requires patch '%s' to be built first, but lack a context to do so" % (self.patch_id, patch) + rc = False + return rc + + def check_requires_built(self, patch_list): + rc = True + for patch in self.requires: + if not patch_list.is_built(patch): + print "patch '%s' requires patch '%s' to be built first" % (self.patch_id, patch) + rc = False + for patch in self.auto_requires: + if not patch_list.is_built(patch): + print "patch '%s' requires patch '%s' to be built first" % (self.patch_id, patch) + rc = False + return rc + + def has_common_recipies(self, recipies): + for recipe in self.recipies.keys(): + if recipe in recipies: + return True + return False + + def build(self): + if self.built: + return 0 + return 0 + + def _parse_requires(self, e): + for child in e: + # DBG print "_parse_requires %s" % child.tag + if child.tag == "ID": + req = child.text and child.text.strip() or None + if req is None: + msg = "Patch id missing under " + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + self.requires.append(req) + else: + msg = "Unknow tag '%s' under " % child.tag + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + + def _parse_metadata(self, e): + for child in e: + # DBG print "_parse_metadata %s" % child.tag + if child.tag == "REQUIRES": + self._parse_requires(child.getchildren()) + elif child.tag in METADATA_TAGS: + self.metadata[child.tag] = child.text and child.text.strip() or "" + else: + msg = "Unknow tag '%s' under " % child.tag + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + + def _parse_build(self, e): + for child in e: + # DBG print "_parse_build %s" % child.tag + if child.tag == "RECIPE": + r = RecipeData(child) + self.recipies[r.name] = r + elif child.tag == "CONTEXT": + self.build_context = child.text and child.text.strip() or None + print "====== CONTEXT = %s ========" % self.build_context + else: + msg = "Unknow tag '%s' under " % child.tag + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + + def _parse_root(self, e): + for child in e: + # DBG print "_parse_root %s" % child.tag + if child.tag == "METADATA": + self._parse_metadata(child.getchildren()) + elif child.tag == "BUILD": + self._parse_build(child.getchildren()) + else: + msg = "Unknow tag '%s' under " % child.tag + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + if 'ID' in self.metadata: + self.patch_id = self.metadata['ID'] + else: + msg = "patch is missing required field " + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + + if 'SW_VERSION' in self.metadata: + self.sw_version = self.metadata['SW_VERSION'] + if self.sw_version != build_info['SW_VERSION']: + msg = "patch '%s' SW_VERSION is inconsistent with that of workdir '%s'" % (self.patch_id, workdir) + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + + else: + msg = "patch '%s' is missing required field " % self.patch_id + LOG.exception(msg) + print msg + raise PatchRecipeXMLFail(msg) + sys.exit(2) + + print "_parse_root patch_id = '%s'" % self.patch_id + + def recursive_print(self, e, depth=0): + for child in e: + print "%sTag: %s, attr: %s, text: %s" % (" "*depth, child.tag, child.attrib, child.text and child.text.strip() or "") + self.recursive_print(child.getchildren(), depth+1) + # for child in e.iter('BUILD'): + # print "Tag: %s, attr: %s" % (child.tag, child.attrib) + + def parse_xml(self, + filename, + adminstate=None): + """ + Parse an individual patch recipe XML file + :param filename: XML file + :param adminstate: Indicates Applied or Available + :return: Patch ID + """ + tree = ElementTree.parse(filename) + root = tree.getroot() + + # DBG print("tree: %r" % dir(tree)) + # DBG print("root: %r" % dir(root)) + # DBG self.recursive_print(root) + self._parse_root(root) + self.myprint() + + def write_xml_file(self, top, fname): + # Generate the file, in a readable format if possible + outfile = open(fname, 'w') + rough_xml = ElementTree.tostring(top, 'utf-8') + if platform.python_version() == "2.7.2": + # The 2.7.2 toprettyxml() function unnecessarily indents + # childless tags, adding whitespace. In the case of the + # yum comps.xml file, it makes the file unusable, so just + # write the rough xml + outfile.write(rough_xml) + else: + outfile.write(minidom.parseString(rough_xml).toprettyxml(indent=" ")) + + def gen_xml(self, fname="metadata.xml"): + """ + Generate patch recipe XML file + :param fname: Path to output file + :return: + """ + e_top = ElementTree.Element('PATCH_RECIPE') + e_metadata = ElementTree.SubElement(e_top, 'METADATA') + for key in self.metadata.keys(): + add_text_tag_to_xml(e_metadata, key, self.metadata[key]) + if len(self.requires) > 0: + e_requires = ElementTree.SubElement(e_metadata, 'REQUIRES') + for require in self.requires: + add_text_tag_to_xml(e_requires, 'ID', require) + e_build = ElementTree.SubElement(e_top, 'BUILD') + if self.build_context: + add_text_tag_to_xml(e_build, 'CONTEXT', self.build_context) + else: + add_text_tag_to_xml(e_build, 'CONTEXT', patch_id_to_tag(self.patch_id)) + for recipe in self.recipies.keys(): + e_recipe = ElementTree.SubElement(e_build, 'RECIPE', attrib={'name': recipe}) + self.recipies[recipe].gen_xml(e_recipe) + + write_xml_file(e_top, fname) + + def __str__(self): + return "[ patch_id: %s, context: %s, metadata: %s, requires: %s, recipies: %s ]" % (str(self.patch_id), str(self.build_context), str(self.metadata), str(self.requires), str(self.recipies,keys())) + + def myprint(self, indent=""): + print "patch_id: %s" % str(self.patch_id) + print "context: %s" % str(self.build_context) + print "metadata: %s" % str(self.metadata) + print "requires: %s" % str(self.requires) + for key in self.recipies: + self.recipies[key].myprint("%s " % indent) + + def _configure(self): + if workdir is None: + msg = "workdir not provided" + LOG.exception(msg) + print msg + raise PatchBuildFail(msg) + sys.exit(2) + return False + + os.chdir(workdir) + + def _set_context(self): + global pre_compiled_flag + + if pre_compiled_flag: + return + + if (self.build_context is None) and (branch is None): + # Nothing to do + return + + if srcdir is None: + msg = "srcdir not provided" + LOG.exception(msg) + print msg + raise PatchBuildFail(msg) + sys.exit(2) + return False + + os.chdir(srcdir) + + if self.build_context is not None: + # Before checkout, make sure there are no untracked temporary files + # left by a previous build that may prevent the checkout... + # e.g. horizon's pbr-2015.1.0-py2.7.egg directory is a build artifact + issue_cmd("for d in $(find . -type d -name .git | xargs --max-args=1 dirname); do (cd $d; echo $d; git clean -df; git reset --hard; git ls-files --others --exclude-standard | xargs --no-run-if-empty rm; if [ ! -f .subgits ]; then if [ -f .gitignore ]; then git ls-files --others --ignored --exclude-from=.gitignore | xargs --no-run-if-empty rm; fi; fi); done") + issue_cmd("wrgit checkout %s" % self.build_context) + elif branch is not None: + issue_cmd("wrgit checkout %s" % branch) + else: + msg = "Don't know what build context to use for patch %s" % self.patch_id + LOG.exception(msg) + print msg + raise PatchBuildFail(msg) + sys.exit(2) + return False + + if workdir is None: + msg = "workdir not provided" + LOG.exception(msg) + print msg + raise PatchBuildFail(msg) + sys.exit(2) + return False + + return True + + + def _get_prev_patch_id(self, patch_id): + patch_order_file = self.pl._std_local_path(self.pl.order_file) + prev_patch_id = None + with open(patch_order_file) as f: + for line in f: + this_patch_id = line.strip() + if patch_id == this_patch_id: + return prev_patch_id + prev_patch_id = this_patch_id + return prev_patch_id + + def _get_rpm_db_path(self, patch_id): + rpm_db = self.pl._std_local_path("%s.rpm_db" % patch_id) + return rpm_db + + def _write_rpm_db(self): + global BUILD_TYPES + + for build_type in BUILD_TYPES: + rpm_dir = "%s/%s/%s" % (workdir, build_type, RPM_DIR) + rpm_db = self._get_rpm_db_path(self.patch_id) + issue_cmd("echo > %s" % rpm_db) + for subdir in os.walk(rpm_dir).next()[1]: + rpm_sub_dir = "%s/%s" % (rpm_dir, subdir) + issue_cmd("rpm -qp --dbpath %s --queryformat '%s %%{NAME} %%{RELEASE}\n' %s/*rpm >> %s 2> /dev/null" % (temp_rpm_db_dir, subdir, rpm_sub_dir, rpm_db)) + + def _read_rpm_db(self, patch_id): + release_map={} + rpm_db_dir = "export/patch_data" + rpm_db = self._get_rpm_db_path(patch_id) + with open(rpm_db) as f: + for line in f: + words = line.split() + if len(words) == 3: + arch = words[0] + rpm = words[1] + release = words[2] + release_map[rpm] = release[1:] + return release_map + + def check_release(self): + prev_patch_id = self._get_prev_patch_id(self.patch_id) + if prev_patch_id is None: + delim = "_" + words = self.patch_id.split(delim) + l = len(words[-1]) + words[-1] = '0'*l + prev_patch_id = delim.join(words) + prev_release_map = self._read_rpm_db(prev_patch_id) + release_map = self._read_rpm_db(self.patch_id) + for recipe in self.recipies.keys(): + self.recipies[recipe].check_release(release_map, prev_release_map) + + def capture_source(self): + os.environ['PREFIX'] = self.patch_id + os.environ['MY_REPO'] = os.environ['MY_PATCH_REPO'] + os.environ['MY_WORKSPACE'] = os.environ['MY_PATCH_WORKSPACE'] + os.environ['DEST'] = "%s/export/patch_source/%s" % (os.environ['MY_PATCH_WORKSPACE'], self.patch_id) + issue_cmd("mkdir -p %s" % os.environ['DEST']) + for recipe in self.recipies.keys(): + print "capture source of recipe %s" % recipe + self.recipies[recipe].capture_source() + + def build_patch(self, local_path="."): + global pre_compiled_flag + global pre_clean_flag + self._set_context() + self._configure() + + recipe_str = "" + build_recipe_str = "" + for recipe in self.recipies.keys(): + recipe_str += recipe + " " + if not self.recipies[recipe].is_prebuilt(): + build_recipe_str += recipe + " " + print "recipe_str = %s" % recipe_str + print "build_recipe_str = %s" % build_recipe_str + if recipe_str == "": + msg = "No recipies for patch %s" % self.patch_id + LOG.exception(msg) + print msg + raise PatchBuildFail(msg) + sys.exit(2) + return False + + if pre_compiled_flag and pre_clean_flag: + print "pre clean" + for recipe in self.recipies.keys(): + print "pre clean recipe %s" % recipe + self.recipies[recipe].clean() + print "done" + sys.exit(0) + + if not pre_compiled_flag: + # compile patch + os.chdir(workdir) + print "pre clean" + if build_recipe_str == "": + print " ... nothing to clean" + else: + issue_cmd("build-pkgs --no-build-info --clean %s" % build_recipe_str) + for recipe in self.recipies.keys(): + print "pre clean recipe %s" % recipe + self.recipies[recipe].clean() + print "Build" + if build_recipe_str == "": + print " ... nothing to build" + else: + issue_cmd("build-pkgs --no-build-info --careful %s" % build_recipe_str) + + # create rpm release number db + self._write_rpm_db() + + if not pre_compiled_flag: + # check rpm release numbers + self.check_release() + + # assemble patch + pf = PatchFile() + if self.patch_id: + pf.meta.id = self.patch_id + if 'STATUS' in self.metadata: + pf.meta.status = self.metadata['STATUS'] + else: + pf.meta.status = STATUS_DEVELOPEMENT + if 'UNREMOVABLE' in self.metadata: + pf.meta.removable = self.metadata['UNREMOVABLE'] + if 'SUMMARY' in self.metadata: + pf.meta.summary = self.metadata['SUMMARY'] + if 'DESCRIPTION' in self.metadata: + pf.meta.description = self.metadata['DESCRIPTION'] + if 'INSTALL_INSTRUCTIONS' in self.metadata: + pf.meta.install_instructions = self.metadata['INSTALL_INSTRUCTIONS'] + if 'WARNINGS' in self.metadata: + pf.meta.warnings = self.metadata['WARNINGS'] + if 'SW_VERSION' in self.metadata: + pf.meta.sw_version = self.metadata['SW_VERSION'] + if 'REBOOT_REQUIRED' in self.metadata: + pf.meta.reboot_required = self.metadata['REBOOT_REQUIRED'] + + for patch in list(set(self.requires) | set(self.auto_requires)): + pf.meta.requires.append(patch) + + for recipe in self.recipies.keys(): + if not pre_compiled_flag: + self.recipies[recipe].build_patch(pf, fatal=True) + else: + try: + self.recipies[recipe].build_patch(pf, fatal=False) + except PatchPackagingMiss: + print "Warning: attempting rebuild of recipe %s" % self.recipies[recipe].name + if not self.recipies[recipe].is_prebuilt(): + issue_cmd("build-pkgs --no-build-info --careful %s" % self.recipies[recipe].name) + self.recipies[recipe].build_patch(pf, fatal=True) + + + local_path=self.pl._std_local_path("") + print "=== local_path = %s ===" % local_path + pf.gen_patch(outdir=local_path) + + return True + +def _tag_build_context(): + os.chdir(srcdir) + issue_cmd("for e in . `wrgit all-core-gits` ; do (cd $e ; git tag v%s) done" % self.patch_id) + +def read_build_info(): + try: + build_info_find_cmd = [ "find", "std/rpmbuild/RPMS/", "-name", "build-info-[0-9]*.x86_64.rpm" ] + build_info_path = issue_cmd_w_stdout(build_info_find_cmd) + if build_info_path == "": + issue_cmd("build-pkgs --no-descendants build-info") + + issue_cmd("rpm2cpio std/rpmbuild/RPMS/build-info-[0-9]*.x86_64.rpm | cpio -i --to-stdout --quiet ./etc/build.info > %s" % WORKDIR_BUILD_INFO_LOCATION) + with open(WORKDIR_BUILD_INFO_LOCATION) as myfile: + for line in myfile: + line = line.strip() + if line.startswith('#'): + continue + if len(line) == 0: + continue + + name, var = line.partition("=")[::2] + name = name.strip() + var = var.strip() + if var.startswith('"') and var.endswith('"'): + var = var[1:-1] + build_info[name] = var + except: + return False + return True + +def patch_id_to_tag(patch_id): + tag = "v%s" % patch_id + return tag + +def validate_tag(tag): + try: + cmd = "git tag | grep %s" % tag + issue_cmd(cmd) + except PatchRecipeCmdFail: + msg = "TAG '%s' is invalid" % tag + LOG.exception(msg) + print msg + return False + return True + +def issue_cmd_w_stdout(cmd): + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out = p.communicate()[0] + rc = p.returncode + if rc != 0: + msg = "CMD failed: %s" % str(cmd) + LOG.exception(msg) + print msg + raise PatchRecipeCmdFail(msg) + return out + + +def issue_cmd(cmd): + print "CMD: %s" % cmd + rc = subprocess.call(cmd, shell=True) + if rc != 0: + msg = "CMD failed: %s" % cmd + LOG.exception(msg) + print msg + raise PatchRecipeCmdFail(msg) + +def issue_cmd_no_raise(cmd): + print "CMD: %s" % cmd + rc = subprocess.call(cmd, shell=True) + if rc != 0: + msg = "CMD failed: %s" % cmd + LOG.exception(msg) + print msg + +def issue_cmd_rc(cmd): + print "CMD: %s" % cmd + rc = subprocess.call(cmd, shell=True) + return rc + +def set_capture_source_path(): + global capture_source_path + my_repo = None + new_dir = "/tmp/%s" % os.environ['USER'] + new_path = "%s/source_collect_package" % new_dir + + if 'MY_REPO' in os.environ.keys(): + my_repo = os.environ['MY_REPO'] + + if 'MY_PATCH_REPO' in os.environ.keys(): + my_repo = os.environ['MY_PATCH_REPO'] + + if my_repo is not None: + old_path = "%s/addons/wr-cgcs/layers/cgcs/extras.ND/scripts/source_collect_package" % my_repo + if os.path.isfile(old_path): + rc = issue_cmd_rc("mkdir -p %s" % new_dir) + rc = issue_cmd_rc("\cp -f %s %s" % (old_path, new_path)) + if rc == 0: + capture_source_path = new_path + +def capture_rpms(): + for build_type in BUILD_TYPES: + src_rpm_dir = "%s/%s/%s" % (workdir, build_type, RPM_DIR) + if os.path.isdir(src_rpm_dir): + dest_rpm_dir = "%s/%s/%s" % (workdir, build_type, RPM_ARCHIVE_DIR) + issue_cmd("mkdir -p %s" % dest_rpm_dir) + issue_cmd("rsync -avu %s/*.rpm %s" % (src_rpm_dir, dest_rpm_dir)) + +def modify_patch_usage(): + msg = "modify_patch [ --obsolete | --released | --development ] [ --sw_version --id | --file ]" + LOG.exception(msg) + print msg + sys.exit(1) + +def modify_patch(): + global workdir + global temp_rpm_db_dir + global sw_version + global build_info + + configure_logging(logtofile=False) + + try: + opts, remainder = getopt.getopt(sys.argv[1:], + 'h', + ['help', + 'obsolete', + 'released', + 'development', + 'sw_version=', + 'id=', + 'file=', + ]) + except getopt.GetoptError as e: + print str(e) + modify_patch_usage() + + + patch_path = None + cwd = os.getcwd() + + status_set = False + + for opt, arg in opts: + if opt == "--obsolete": + if status_set: + modify_patch_usage() + status_set = True + new_status = STATUS_OBSOLETE + elif opt == "--released": + if status_set: + modify_patch_usage() + status_set = True + new_status = STATUS_RELEASED + elif opt == "--development": + if status_set: + modify_patch_usage() + status_set = True + new_status = STATUS_DEVELOPEMENT + elif opt == "--file": + patch_path = os.path.normpath(os.path.join(cwd, os.path.expanduser(arg))) + elif opt == "--sw_version": + sw_version = arg + elif opt == "--id": + patch_id = arg + elif opt in ("-h", "--help"): + modify_patch_usage() + else: + print "unknown option '%s'" % opt + modify_patch_usage() + + if not status_set: + print "new status not specified" + modify_patch_usage() + + workdir = tempfile.mkdtemp(prefix="patch_modify_") + os.chdir(workdir) + try: + temp_rpm_db_dir = "%s/%s" % (workdir, ".rpmdb") + if patch_path is not None: + PatchFile.modify_patch(patch_path, "status", new_status) + print "Patch '%s' has been modified to status '%s'" % (patch_path, new_status) + else: + if sw_version is None or patch_id is None: + print "--sw_version and --id are required" + shutil.rmtree(workdir) + modify_patch_usage() + + build_info['SW_VERSION'] = sw_version + pl = PatchList([]) + patch_file_name = "%s.patch" % patch_id + patch_path = pl._std_patch_git_path(patch_file_name) + print "patch_id = %s" % patch_id + print "patch_file_name = %s" % patch_file_name + print "patch_path = %s" % patch_path + PatchFile.modify_patch(patch_path, "status", new_status) + os.chdir(pl._std_patch_git_path("..")) + issue_cmd("git add %s" % patch_path) + issue_cmd("git commit -m \"Modify status of patch '%s' to '%s'\"" % (patch_id, new_status)) + issue_cmd("git push --dry-run --set-upstream origin %s:%s" % (sw_version, sw_version)) + issue_cmd("git push --set-upstream origin %s:%s" % (sw_version, sw_version)) + print "Patch '%s' has been modified to status '%s'" % (patch_id, new_status) + + if new_status == STATUS_RELEASED: + tm = time.localtime(time.time()) + ts = time.strftime("%Y%m%d", tm) + munged_patch_id = re.sub('[_.]', '-', patch_id.lower()) + swv = sw_version.split(".") + sw_mjr = swv[0] + local_dest = "" + deliver_dest = "" + + local_dest = "/folk/cgts/rel-ops/%s/patches/" % sw_version + deliver_dest = "/folk/prj-wrlinux/release/tis/tis-%s/update/ti%s-%s/Titanium-Cloud-%s/patches" % (sw_mjr, ts, munged_patch_id, sw_mjr) + human_release = "Titanium Cloud %s" % sw_mjr + windshare_folder = "Titanium-Cloud-%s" % sw_mjr + + if sw_version == "14.10": + local_dest = "/folk/cgts/rel-ops/Titanium-Server-14/patches/%s" % sw_version + deliver_dest = "/folk/prj-wrlinux/release/tis/tis-14/update/ti%s-%s/Titanium-Server-14/patches" % (ts, munged_patch_id) + human_release = "Titanium server 14" + windshare_folder = "Titanium-server-14" + + if sw_version == "15.04" or sw_version == "15.10": + local_dest = "/folk/cgts/rel-ops/%s/patches/" % sw_version + deliver_dest = "" + human_release = "Titanium server 15" + windshare_folder = "" + + if sw_version == "15.05": + local_dest = "/folk/cgts/rel-ops/%s/patches/" % sw_version + deliver_dest = "/folk/prj-wrlinux/release/tis/tis-15/update/ti%s-%s/Titanium-Server-15.05-ER/patches" % (ts, munged_patch_id) + human_release = "Titanium server 15" + windshare_folder = "Titanium-server-15.05-ER" + + if sw_version == "15.09": + local_dest = "/folk/cgts/rel-ops/%s/patches/" % sw_version + deliver_dest = "/folk/prj-wrlinux/release/tis/tis-15/update/ti%s-%s/Titanium-Server-15.09-ER/patches" % (ts, munged_patch_id) + human_release = "Titanium server 15" + windshare_folder = "Titanium-server-15.09-ER" + + if sw_version == "15.12": + local_dest = "/folk/cgts/rel-ops/%s/patches/" % sw_version + deliver_dest = "/folk/prj-wrlinux/release/tis/tis-2/update/ti%s-%s/Titanium-Server-2/patches" % (ts, munged_patch_id) + human_release = "Titanium Cloud 2" + windshare_folder = "Titanium-Cloud-2" + + if sw_version == "16.10": + local_dest = "/folk/cgts/rel-ops/%s/patches/" % sw_version + deliver_dest = "/folk/prj-wrlinux/release/tis/tis-3/update/ti%s-%s/Titanium-Server-3/patches" % (ts, munged_patch_id) + human_release = "Titanium Cloud 3" + windshare_folder = "Titanium-Cloud-3" + + if sw_version == "17.06": + local_dest = "/folk/cgts/rel-ops/%s/patches/" % sw_version + deliver_dest = "/folk/prj-wrlinux/release/tis/tis-4/update/ti%s-%s/Titanium-Cloud-4/patches" % (ts, munged_patch_id) + human_release = "Titanium Cloud 4" + windshare_folder = "Titanium-Cloud-4" + + if sw_version == "18.03" || sw_version == "18.03" + local_dest = "/folk/cgts/rel-ops/%s/patches/" % sw_version + deliver_dest = "/folk/prj-wrlinux/release/tis/tis-5/update/ti%s-%s/Titanium-Cloud-5/patches" % (ts, munged_patch_id) + human_release = "Titanium Cloud 5" + windshare_folder = "Titanium-Cloud-5" + + if local_dest != "": + issue_cmd("mkdir -p %s" % local_dest) + issue_cmd_no_raise("chmod 775 %s" % os.path.dirname(os.path.dirname(local_dest))) + issue_cmd_no_raise("chmod 775 %s" % os.path.dirname(local_dest)) + issue_cmd_no_raise("chmod 775 %s" % local_dest) + issue_cmd("cp %s %s" % (patch_path, local_dest)) + issue_cmd("md5sum %s | sed 's:%s:%s:' > %s/%s.md5" % (patch_path, patch_path, patch_file_name, local_dest, patch_file_name)) + issue_cmd_no_raise("chmod 664 %s/%s" % (local_dest, patch_file_name)) + issue_cmd_no_raise("chmod 664 %s/%s.md5" % (local_dest, patch_file_name)) + + if deliver_dest != "": + issue_cmd("mkdir -p %s" % deliver_dest) + issue_cmd_no_raise("chmod 775 %s" % os.path.dirname(os.path.dirname(deliver_dest))) + issue_cmd_no_raise("chmod 775 %s" % os.path.dirname(deliver_dest)) + issue_cmd_no_raise("chmod 775 %s" % deliver_dest) + issue_cmd("cp %s %s" % (patch_path, deliver_dest)) + issue_cmd("md5sum %s | sed 's:%s:%s:' > %s/%s.md5" % (patch_path, patch_path, patch_file_name, deliver_dest, patch_file_name)) + issue_cmd_no_raise("chmod 664 %s/%s" % (deliver_dest, patch_file_name)) + issue_cmd_no_raise("chmod 664 %s/%s.md5" % (deliver_dest, patch_file_name)) + + print "" + print "Go here to deliver the patch" + print " http://deliveryplus.windriver.com/update/release" + print "Login if required" + print "" + print "Release to be updated:" + print " select '%s'" % human_release + print "press 'select' and wait for next page to load." + print "" + print "Windshare folder to be uploaded:" + print " select '%s'" % windshare_folder + print "Subdirectory of WindShare folder in which to place updates:" + print " select 'patches'" + print "Pathname from which to copy update content:" + print " %s" % deliver_dest + print "press 'Release to Production'" + print "" + + except: + print "Failed to modify patch!" + finally: + shutil.rmtree(workdir) + + # PatchRecipeData + # prd.metadata['STATUS'] = new_status + + +def query_patch_usage(): + msg = "query_patch [ --sw_version --id | --file ] [ --field ]" + LOG.exception(msg) + print msg + msg = " field_name = [ status | summary | description | install_instructions | warnings | contents | requires ]" + LOG.exception(msg) + print msg + sys.exit(1) + +def query_patch(): + global workdir + global temp_rpm_db_dir + global sw_version + global build_info + + configure_logging(logtofile=False) + + try: + opts, remainder = getopt.getopt(sys.argv[1:], + 'h', + ['help', + 'sw_version=', + 'id=', + 'file=', + 'field=', + ]) + except getopt.GetoptError as e: + print str(e) + query_patch_usage() + + + patch_path = None + cwd = os.getcwd() + field = None + + for opt, arg in opts: + if opt == "--file": + patch_path = os.path.normpath(os.path.join(cwd, os.path.expanduser(arg))) + elif opt == "--sw_version": + sw_version = arg + elif opt == "--id": + patch_id = arg + elif opt == "--field": + field = arg + elif opt in ("-h", "--help"): + query_patch_usage() + else: + print "unknown option '%s'" % opt + query_patch_usage() + + workdir = tempfile.mkdtemp(prefix="patch_modify_") + os.chdir(workdir) + try: + temp_rpm_db_dir = "%s/%s" % (workdir, ".rpmdb") + if patch_path is not None: + answer = PatchFile.query_patch(patch_path, field=field) + print str(answer) + else: + if sw_version is None or patch_id is None: + print "--sw_version and --id are required" + shutil.rmtree(workdir) + query_patch_usage() + + build_info['SW_VERSION'] = sw_version + pl = PatchList([]) + patch_file_name = "%s.patch" % patch_id + patch_path = pl._std_patch_git_path(patch_file_name) + print "patch_id = %s" % patch_id + print "patch_file_name = %s" % patch_file_name + print "patch_path = %s" % patch_path + answer = PatchFile.query_patch(patch_path, field=field) + print str(answer) + + except: + print "Failed to query patch!" + finally: + shutil.rmtree(workdir) + + +def make_patch_usage(): + msg = "make_patch [--formal | --pre-compiled [--pre-clean]] [--workdir ] [--srcdir ] [--branch ] [--capture_source] [--capture_rpms] [ --all --sw_version | ]" + LOG.exception(msg) + print msg + sys.exit(1) + +def make_patch(): + global workdir + global temp_rpm_db_dir + global srcdir + global branch + global sw_version + global formal_flag + global pre_compiled_flag + global pre_clean_flag + global all_flag + global capture_source_flag + global capture_rpms_flag + patch_list = [] + + configure_logging(logtofile=False) + + try: + opts, remainder = getopt.getopt(sys.argv[1:], + 'h', + ['help', + 'all', + 'capture_source', + 'capture_rpms', + 'formal', + 'pre-compiled', + 'pre-clean', + 'release=', + 'workdir=', + 'srcdir=', + 'branch=', + 'sw_version=', + ]) + except getopt.GetoptError as e: + print str(e) + make_patch_usage() + + cwd = os.getcwd() + + for opt, arg in opts: + if opt == "--formal": + formal_flag = True + elif opt == "--pre-compiled": + pre_compiled_flag = True + elif opt == "--pre-clean": + pre_clean_flag = True + elif opt == "--all": + all_flag = True + elif opt == "--capture_source": + capture_source_flag = True + set_capture_source_path() + elif opt == "--capture_rpms": + capture_rpms_flag = True + elif opt == "--workdir": + workdir = os.path.normpath(os.path.join(cwd, os.path.expanduser(arg))) + elif opt == "--srcdir": + srcdir = os.path.normpath(os.path.join(cwd, os.path.expanduser(arg))) + elif opt == "--branch": + branch = arg + elif opt == "--sw_version": + sw_version = arg + elif opt in ("-h", "--help"): + make_patch_usage() + else: + print "unknown option '%s'" % opt + make_patch_usage() + + for x in remainder: + patch_list.append(os.path.normpath(os.path.join(cwd, os.path.expanduser(x)))) + + if len(patch_list) <= 0 and not all_flag: + print "Either '--all' or a patch.xml must be specified" + make_patch_usage() + + if all_flag and len(patch_list) > 0: + print "only specify one of '--all' or a patch.xml" + make_patch_usage() + + if len(patch_list) > 1: + print "only one patch.xml can be specified" + make_patch_usage() + + if all_flag: + if sw_version is None: + print "'--sw_version' must be specified when using '--all'" + make_patch_usage() + + if branch is not None: + if workdir is None or srcdir is None: + print "If --branch is specified, then a srcdir and workdir must also be specified" + make_patch_usage() + + if pre_compiled_flag: + print "pre_compiled_flag = %s" % str(pre_compiled_flag) + + if formal_flag: + os.environ["FORMAL_BUILD"] = "1" + print "formal_flag = %s" % str(formal_flag) + # TODO if branch is not None or workdir is not None or srcdir is not None: + # TODO print "If --formal is specified, then srcdir, workdir and branch are automatci and must not be specified" + # TODO make_patch_usage() + + if pre_compiled_flag and formal_flag: + print "invalid options: --formal and --pre-compiled can't be used together." + make_patch_usage() + + if workdir is not None: + if not os.path.isdir(workdir): + print "invalid directory: workdir = '%s'" % workdir + make_patch_usage() + + temp_rpm_db_dir = "%s/%s" % (workdir, ".rpmdb") + + if srcdir is not None: + if not os.path.isdir(srcdir): + print "invalid directory: srcdir = '%s'" % srcdir + make_patch_usage() + + for patch in patch_list: + if not os.path.isfile(patch): + print "invalid patch file path: '%s'" % patch + make_patch_usage() + + + if 'MY_REPO' in os.environ: + MY_REPO = os.path.normpath(os.path.join(cwd, os.path.expanduser(os.environ['MY_REPO']))) + else: + print "ERROR: environment variable 'MY_REPO' is not defined" + sys.exit(1) + + if 'MY_WORKSPACE' in os.environ: + MY_WORKSPACE = os.path.normpath(os.path.join(cwd, os.path.expanduser(os.environ['MY_WORKSPACE']))) + else: + print "ERROR: environment variable 'MY_REPO' is not defined" + sys.exit(1) + + if 'PROJECT' in os.environ: + PROJECT = os.path.normpath(os.path.join(cwd, os.path.expanduser(os.environ['PROJECT']))) + else: + print "ERROR: environment variable 'PROJECT' is not defined" + sys.exit(1) + + if 'SRC_BUILD_ENVIRONMENT' in os.environ: + SRC_BUILD_ENVIRONMENT = os.path.normpath(os.path.join(cwd, os.path.expanduser(os.environ['SRC_BUILD_ENVIRONMENT']))) + else: + print "ERROR: environment variable 'SRC_BUILD_ENVIRONMENT' is not defined" + sys.exit(1) + + if 'MY_SRC_RPM_BUILD_DIR' in os.environ: + MY_SRC_RPM_BUILD_DIR = os.path.normpath(os.path.join(cwd, os.path.expanduser(os.environ['MY_SRC_RPM_BUILD_DIR']))) + else: + print "ERROR: environment variable 'MY_SRC_RPM_BUILD_DIR' is not defined" + sys.exit(1) + + if 'MY_BUILD_CFG' in os.environ: + MY_BUILD_CFG = os.path.normpath(os.path.join(cwd, os.path.expanduser(os.environ['MY_BUILD_CFG']))) + else: + print "ERROR: environment variable 'MY_BUILD_CFG' is not defined" + sys.exit(1) + + if 'MY_BUILD_DIR' in os.environ: + MY_BUILD_DIR = os.path.normpath(os.path.join(cwd, os.path.expanduser(os.environ['MY_BUILD_DIR']))) + else: + print "ERROR: environment variable 'MY_BUILD_DIR' is not defined" + sys.exit(1) + + print "formal: %s" % formal_flag + print "pre_compiled_flag: %s" % pre_compiled_flag + print "pre_clean_flag: %s" % pre_clean_flag + print "capture_source_flag: %s" % capture_source_flag + print "capture_rpms_flag: %s" % capture_rpms_flag + print "workdir: %s" % workdir + print "srcdir: %s" % srcdir + print "branch: %s" % branch + print "sw_version: %s" % sw_version + print "patch_list: %s" % patch_list + print "" + + if workdir is not None: + os.chdir(workdir) + + if not read_build_info(): + print "build.info is missing. workdir is invalid, or has never completed initial loadbuild: workdir = '%s'" % workdir + make_patch_usage() + + # Capture initial state before any patches are built + if capture_rpms_flag: + capture_rpms() + + pl = PatchList(patch_list) + pl.myprint() + pl.build_patches() + if formal_flag: + + # sign formal patch + pl.sign_official_patches() + # deliver to git repo + pl.deliver_official_patch() + diff --git a/cgcs-patch/cgcs-patch/cgcs_make_patch/make_patching_branch b/cgcs-patch/cgcs-patch/cgcs_make_patch/make_patching_branch new file mode 100755 index 00000000..ecfb049b --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_make_patch/make_patching_branch @@ -0,0 +1,226 @@ +#!/bin/bash + +# repos +UPSTREAM_CLONE="git://vxgit.wrs.com/git" +# UPSTREAM_PUSH="ssh://vxgit.wrs.com/git" +ROOT_REPO=cgcs-root + +JENKINS_HOST=yow-cgts3-lx.wrs.com + +# SOURCE_CONTEXT=TC_17.06 +# SOURCE_JENKINS_BUILD=TC_17.06_Pull +# PREFIX=TC +# SW_VERSION=18.03 +# JOB=patching +USAGE=0 + +if [ x"$1" = x ] ; then + echo "ERROR: You must specify a source context" + USAGE=1 +fi +SOURCE_CONTEXT=$1 + +if [ x"$2" = x ] ; then + echo "ERROR: You must specify a source context" + USAGE=1 +fi +SOURCE_JENKINS_BUILD=$2 + +if [ x"$3" = x ] ; then + echo "ERROR: You must specify a prefix for patch and patch branch names" + USAGE=1 +fi +PREFIX=$3 + +if [ x"$4" = x ] ; then + echo "ERROR: You must specify a sw_version" + USAGE=1 +fi +SW_VERSION=$4 + +if [ x"$5" = x ] ; then + echo "ERROR: You must specify a job directory" + USAGE=1 +fi +JOB=$5 + +if [ $USAGE -ne 0 ] ; then + echo "USAGE: make_patching_branch " + echo " e.g. make_patching_branch CGCS_DEV_0007 Secure_Src_Pull_CGCS_DEV_0007 CGCS 14.10 testpatch" + exit 1 +fi + +PATCH_BRANCH=$PREFIX"_"$SW_VERSION"_PATCHING" +PATCH_TAG0=v$PREFIX"_"$SW_VERSION"_PATCH_0000" +MY_LOCAL_DISK=/localdisk/designer/$USER/$JOB +MY_REPO=$MY_LOCAL_DISK + +if [[ "$JOB" = /* ]] +then + MY_LOCAL_DISK=$JOB + MY_REPO=$JOB +fi + + +echo "PREFIX=$PREFIX" +echo "SW_VERSION=$SW_VERSION" +echo "JOB=$JOB" +echo "SOURCE_CONTEXT=$SOURCE_CONTEXT" +echo "MY_LOCAL_DISK=$MY_LOCAL_DISK" +echo "MY_REPO=$MY_REPO" +echo "SOURCE_JENKINS_BUILD=$SOURCE_JENKINS_BUILD" +echo "PATCH_BRANCH=$PATCH_BRANCH" +echo "UPSTREAM_CLONE=$UPSTREAM_CLONE" + +mkdir -p $MY_LOCAL_DISK +if [ $? -ne 0 ] ; then + echo "ERROR: failed to create directory '$MY_LOCAL_DISK'" + exit 1 +fi + +cd $MY_LOCAL_DISK +if [ $? -ne 0 ] ; then + echo "ERROR: failed to change to directory '$MY_LOCAL_DISK'" + exit 1 +fi + +# Get latest tools +if [ ! -d bin ] ; then + echo "clone bin" + git clone git://git.wrs.com/git/bin +else + echo "pull bin" + cd bin + git pull + cd $MY_LOCAL_DISK +fi + + +export PATH=$MY_REPO/build-tools/branching:$PATH + + +# Create repo +mkdir -p $MY_REPO +if [ $? -ne 0 ] ; then + echo "ERROR: failed to create directory '$MY_REPO'" + exit 1 +fi + +chgrp cgts $MY_REPO +if [ $? -ne 0 ] ; then + echo "ERROR: failed chgrp '$MY_REPO'" + exit 1 +fi + +chmod 750 $MY_REPO +if [ $? -ne 0 ] ; then + echo "ERROR: failed to chmod '$MY_REPO'" + exit 1 +fi + +cd $MY_REPO +if [ $? -ne 0 ] ; then + echo "ERROR: failed to change to directory '$MY_LOCAL_DISK'" + exit 1 +fi + +if [ ! -d $ROOT_REPO ] ; then + WRGIT_ALL_ADDONS=1 wrgit clone $UPSTREAM_CLONE/$ROOT_REPO $ROOT_REPO $SOURCE_CONTEXT + if [ $? -ne 0 ] ; then + echo "ERROR: failed to clone from repo '$UPSTREAM_CLONE' with context '$SOURCE_CONTEXT'" + exit 1 + fi + + MY_REPO=$MY_REPO/$ROOT_REPO +else + MY_REPO=$MY_REPO/$ROOT_REPO + cd $MY_REPO + if [ $? -ne 0 ] ; then + echo "ERROR: failed to change to directory '$MY_REPO'" + exit 1 + fi + + wrgit checkout $SOURCE_CONTEXT + + if [ $? -ne 0 ] ; then + echo "ERROR: wrgit checkout '$SOURCE_CONTEXT' failed" + exit 1 + fi + + wrgit pull + + if [ $? -ne 0 ] ; then + echo "ERROR: wrgit pull failed" + exit 1 + fi +fi + +cd $MY_REPO +if [ $? -ne 0 ] ; then + echo "ERROR: failed to change to directory '$MY_REPO'" + exit 1 +fi + + +git branch | grep $PATCH_BRANCH +if [ $? -ne 0 ] ; then + # Set context + CONTEXT_PATH="/localdisk/designer/jenkins/$SOURCE_JENKINS_BUILD/$ROOT_REPO/CONTEXT" + if [ -f $CONTEXT_PATH ]; then + cp $CONTEXT_PATH ../ + else + CONTEXT_PROVIDER=$JENKINS_HOST:$CONTEXT_PATH + scp $CONTEXT_PROVIDER ../ + if [ $? -ne 0 ] ; then + echo "ERROR: failed to obtain context from '$CONTEXT_PROVIDER'" + exit 1 + fi + fi + + source ../CONTEXT + if [ $? -ne 0 ] ; then + echo "ERROR: failed to set context" + exit 1 + fi + + git branch | grep $PATCH_BRANCH + if [ $? -ne 0 ] ; then + # create patching branch and tag + create_branches_and_tags.sh $PATCH_BRANCH . + if [ $? -ne 0 ] ; then + echo "ERROR: failed to create branch '$PATCH_BRANCH'" + exit 1 + fi + + push_branches_tags.sh $PATCH_BRANCH + if [ $? -ne 0 ] ; then + echo "ERROR: failed to push branch '$PATCH_BRANCH' to '$UPSTREAM_PUSH'" + exit 1 + fi + fi +else + wrgit checkout $PATCH_BRANCH + + if [ $? -ne 0 ] ; then + echo "ERROR: wrgit checkout '$PATCH_BRANCH' failed" + exit 1 + fi +fi + + +git tag | grep $PATCH_TAG0 +if [ $? -ne 0 ] ; then + create_tags.sh $PATCH_TAG0 + if [ $? -ne 0 ] ; then + echo "ERROR: failed to create tag '$PATCH_TAG0'" + exit 1 + fi + + push_tags.sh $PATCH_TAG0 + if [ $? -ne 0 ] ; then + echo "ERROR: failed to push branch '$PATCH_BRANCH' to '$UPSTREAM_PUSH'" + exit 1 + fi +fi + + diff --git a/cgcs-patch/cgcs-patch/cgcs_make_patch/make_patching_tag b/cgcs-patch/cgcs-patch/cgcs_make_patch/make_patching_tag new file mode 100755 index 00000000..304a3fb6 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_make_patch/make_patching_tag @@ -0,0 +1,98 @@ +#!/bin/bash + +# Real upstream repo +# UPSTREAM="git://vxgit.wrs.com/git/cgcs-root" +# Testing upstream repo +UPSTREAM="git://vxgit.wrs.com/git/users/cgcs/cgcs-root" + +JENKINS_HOST=yow-cgts4-lx.wrs.com + +# TAG=GCGS_14.10_PATCH_0001 +# JOB=playground +USAGE=0 + + +if [ x"$1" = x ] ; then + echo "ERROR: You must specify a PATCH_ID" + USAGE=1 +fi +PATCH_ID=$1 + +if [ x"$2" = x ] ; then + echo "ERROR: You must specify a job directory" + USAGE=1 +fi +JOB=$2 + +if [ $USAGE -ne 0 ] ; then + echo "USAGE: make_patching_tag " + echo "USAGE: make_patching_tag CGCS_14.10_PATCH_0001 testpatch" + exit 1 +fi + +TAG="v$PATCH_ID" +MY_LOCAL_DISK=/localdisk/designer/$USER/$JOB +MY_REPO=$MY_LOCAL_DISK + +if [[ "$JOB" = /* ]] +then + MY_LOCAL_DISK=$JOB + MY_REPO=$JOB +fi + + +echo "TAG=$TAG" +echo "JOB=$JOB" +echo "MY_LOCAL_DISK=$MY_LOCAL_DISK" +echo "MY_REPO=$MY_REPO" + +# Get latest tools +if [ ! -d bin ] ; then + echo "clone bin" + git clone git://git.wrs.com/git/bin +else + echo "pull bin" + cd bin + git pull + cd $MY_LOCAL_DISK +fi + + +export PATH=$MY_REPO/build-tools/branching:$PATH + + +MY_REPO=$MY_REPO/cgcs-root +cd $MY_REPO +if [ $? -ne 0 ] ; then + echo "ERROR: failed to change to directory '$MY_REPO'" + exit 1 +fi + +### wrgit pull +if [ $? -ne 0 ] ; then + echo "ERROR: wrgit pull failed" + exit 1 +fi + +# create patching tag +create_tags.sh $TAG . +if [ $? -ne 0 ] ; then + echo "ERROR: failed to create tag '$TAG'" + exit 1 +fi + +if [ "$USER" = "jenkins" ] ; then + sh /localdisk/designer/slittle1/proxy/push_tags.sh $TAG + if [ $? -ne 0 ] ; then + echo "ERROR: failed to push branch '$TAG' to '$UPSTREAM'" + exit 1 + fi +else + push_tags.sh $TAG + if [ $? -ne 0 ] ; then + echo "ERROR: failed to push branch '$TAG' to '$UPSTREAM'" + exit 1 + fi +fi + + diff --git a/cgcs-patch/cgcs-patch/cgcs_make_patch/make_patching_workspace b/cgcs-patch/cgcs-patch/cgcs_make_patch/make_patching_workspace new file mode 100755 index 00000000..104fdf20 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_make_patch/make_patching_workspace @@ -0,0 +1,181 @@ +#!/bin/bash + +USAGE=0 + +if [ x"$1" = x ] ; then + echo "ERROR: You must specify a prefix for patch and patch branch names" + USAGE=1 +fi +PREFIX=$1 + +if [ x"$2" = x ] ; then + echo "ERROR: You must specify a sw_version" + USAGE=1 +fi +SW_VERSION=$2 + +if [ x"$3" = x ] ; then + echo "ERROR: You must specify a job directory" + USAGE=1 +fi +JOB=$3 + +PATCH_BRANCH=$PREFIX"_"$SW_VERSION"_PATCHING" +PATCH_ID0=$PREFIX"_"$SW_VERSION"_PATCH_0000" +PATCH_TAG0=v$PATCH_ID0 +MY_LOCAL_DISK=/localdisk/designer/$USER/$JOB +MY_REPO=$MY_LOCAL_DISK/cgcs-root +MY_WORKSPACE=/localdisk/loadbuild/$USER/$JOB + +if [[ "$JOB" = /* ]] +then + MY_LOCAL_DISK=$JOB + MY_REPO=$JOB/cgcs-root + MY_WORKSPACE=$JOB +fi + +if [ x"$4" != x ] ; then + MY_WORKSPACE=$4 +fi + +if [ x"$5" != x ] ; then + MY_LOCAL_DISK=$(realpath $5/..) +fi +MY_REPO=$MY_LOCAL_DISK/cgcs-root + +if [ $USAGE -ne 0 ] ; then + echo "USAGE: make_patching_workspace [ [ []]]" + echo " e.g. make_patching_workspace TC 18.03 testpatch" + echo " e.g. make_patching_workspace TC 18.03 mypatch $MY_WORKSPACE $MY_REPO" + exit 1 +fi + +RPM_DB_DIR=$MY_WORKSPACE/export/patch_data +RPM_DB=$RPM_DB_DIR/$PATCH_ID0.rpm_db + +echo "PREFIX=$PREFIX" +echo "SW_VERSION=$SW_VERSION" +echo "JOB=$JOB" +echo "MY_LOCAL_DISK=$MY_LOCAL_DISK" +echo "MY_REPO=$MY_REPO" +echo "MY_WORKSPACE=$MY_WORKSPACE" +echo "PATCH_BRANCH=$PATCH_BRANCH" +echo "PATCH_TAG0=$PATCH_TAG0" +echo "RPM_DB_DIR=$RPM_DB_DIR" +echo "RPM_DB=$RPM_DB" + + +cd $MY_LOCAL_DISK/bin +if [ $? -ne 0 ] ; then + echo "ERROR: failed to change to directory '$MY_LOCAL_DISK/bin'" + exit 1 +fi + +export PATH=`pwd`:$PATH + +cd $MY_REPO +if [ $? -ne 0 ] ; then + echo "ERROR: failed to change to directory '$MY_REPO'" + exit 1 +fi + +# Make sure gits are free of changes that would prevent checkout or pull +for d in $(find . -type d -name .git | xargs --max-args=1 dirname) +do + (cd $d + echo $d + git clean -df + git reset --hard + git ls-files --others --exclude-standard | xargs --no-run-if-empty rm + if [ ! -f .subgits ]; then + if [ -f .gitignore ]; then + git ls-files --others --ignored --exclude-from=.gitignore | xargs --no-run-if-empty rm + fi + fi + ) +done + +wrgit checkout $PATCH_TAG0 +if [ $? -ne 0 ] ; then + echo "ERROR: wrgit checkout '$PATCH_TAG0' failed" + exit 1 +fi + +# Create workspace +echo "01: mkdir $MY_WORKSPACE" +if [ ! -d $MY_WORKSPACE ] ; then + mkdir -p $MY_WORKSPACE + if [ $? -ne 0 ] ; then + echo "ERROR: failed to create directory '$MY_WORKSPACE'" + exit 1 + fi +fi + +echo "02: cd $MY_WORKSPACE" +cd $MY_WORKSPACE +if [ $? -ne 0 ] ; then + echo "ERROR: failed to change to directory '$MY_WORKSPACE'" + exit 1 +fi + +echo "03: build" +if [ ! -f export/bootimage.iso ] ; then + echo "04: build pkgs" + nice -n 20 ionice -c Idle build-pkgs + if [ $? -ne 0 ] ; then + echo "ERROR: build-pkgs failed" + exit 1 + fi + + echo "05: build iso" + nice -n 20 ionice -c Idle build-iso + if [ $? -ne 0 ] ; then + echo "ERROR: build-iso failed " + exit 1 + fi +fi + +echo "06: rpm db" +mkdir -p $RPM_DB_DIR +if [ $? -ne 0 ] ; then + echo "ERROR: failed to make directory '$RPM_DB_DIR'" + exit 1 +fi + +echo "" > $RPM_DB +if [ $? -ne 0 ] ; then + echo "ERROR: failed to write file '$RPM_DB'" + exit 1 +fi + +for build_type in std rt; do + RPM_ROOT_DIR=$MY_WORKSPACE/$build_type/rpmbuild/RPMS + echo "RPM_ROOT_DIR=$RPM_ROOT_DIR" + cd $RPM_ROOT_DIR + if [ $? -ne 0 ] ; then + echo "ERROR: failed to change to directory '$RPM_ROOT_DIR'" + exit 1 + fi + + for d in `find * -type d`; do + pushd $d > /dev/null + if [ $? -ne 0 ] ; then + echo "ERROR: failed to change to directory '$d'" + exit 1 + fi + + rpm -qp --queryformat "$d %{NAME} %{RELEASE}\n" *rpm >> $RPM_DB 2> /dev/null + if [ $? -ne 0 ] ; then + echo "ERROR: rpm query failed in directory '$d'" + exit 1 + fi + + popd > /dev/null + done +done + + +echo 'Build is complete' + + + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/__init__.py b/cgcs-patch/cgcs-patch/cgcs_patch/__init__.py new file mode 100644 index 00000000..f7d7e19e --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/__init__.py @@ -0,0 +1,7 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/api/__init__.py b/cgcs-patch/cgcs-patch/cgcs_patch/api/__init__.py new file mode 100644 index 00000000..0df917f9 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/api/__init__.py @@ -0,0 +1,30 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +from oslo_config import cfg + + +API_SERVICE_OPTS = [ + cfg.StrOpt('api_bind_ip', + default='127.0.0.1', + help='IP for the Patching controller API server to bind to', + ), + cfg.IntOpt('api_port', + default=5487, + help='The port for the Patching controller API server', + ), + cfg.IntOpt('api_limit_max', + default=1000, + help='the maximum number of items returned in a single ' + 'response from a collection resource'), + ] + +CONF = cfg.CONF +opt_group = cfg.OptGroup(name='api', + title='Options for the Patching controller api service') +CONF.register_group(opt_group) +CONF.register_opts(API_SERVICE_OPTS) diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/api/app.py b/cgcs-patch/cgcs-patch/cgcs_patch/api/app.py new file mode 100644 index 00000000..5e0f12d2 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/api/app.py @@ -0,0 +1,45 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +#from oslo.config import cfg +import pecan + +from cgcs_patch.api import config + +#CONF = cfg.CONF + +def get_pecan_config(): + # Set up the pecan configuration + filename = config.__file__.replace('.pyc', '.py') + return pecan.configuration.conf_from_file(filename) + + +def setup_app(pecan_config=None): + if not pecan_config: + pecan_config = get_pecan_config() + + pecan.configuration.set_config(dict(pecan_config), overwrite=True) + + app = pecan.make_app( + pecan_config.app.root, + static_root=pecan_config.app.static_root, + template_path=pecan_config.app.template_path, + debug=False, + force_canonical=getattr(pecan_config.app, 'force_canonical', True), + guess_content_type_from_ext=False, # Avoid mime-type lookup + ) + + return app + + +class VersionSelectorApplication(object): + def __init__(self): + pc = get_pecan_config() + self.v1 = setup_app(pecan_config=pc) + + def __call__(self, environ, start_response): + return self.v1(environ, start_response) diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/api/config.py b/cgcs-patch/cgcs-patch/cgcs_patch/api/config.py new file mode 100644 index 00000000..91d449e7 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/api/config.py @@ -0,0 +1,23 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +# Server Specific Configurations +server = { + 'port': '5487', + 'host': '127.0.0.1' +} + +# Pecan Application Configurations +app = { + 'root': 'cgcs_patch.api.controllers.root.RootController', + 'modules': ['cgcs_patch.authapi'], + 'static_root': '%(confdir)s/public', + 'template_path': '%(confdir)s/../templates', + 'debug': False, + 'enable_acl': True, + 'acl_public_routes': [], +} diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/api/controllers/__init__.py b/cgcs-patch/cgcs-patch/cgcs_patch/api/controllers/__init__.py new file mode 100644 index 00000000..f7d7e19e --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/api/controllers/__init__.py @@ -0,0 +1,7 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/api/controllers/root.py b/cgcs-patch/cgcs-patch/cgcs_patch/api/controllers/root.py new file mode 100644 index 00000000..05483d68 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/api/controllers/root.py @@ -0,0 +1,266 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import os +from pecan import expose, request +import cgi +import glob + +from cgcs_patch.exceptions import PatchError +from cgcs_patch.patch_controller import pc + +import logging +from cgcs_patch.patch_functions import LOG + +class PatchAPIController(object): + + @expose('json') + @expose('query.xml', content_type='application/xml') + def index(self): + return self.query() + + @expose('json') + @expose('query.xml', content_type='application/xml') + def query(self, **kwargs): + try: + pd = pc.patch_query_cached(**kwargs) + except PatchError as e: + return dict(error="Error: %s" % e.message) + + return dict(pd=pd) + + @expose('json') + @expose('show.xml', content_type='application/xml') + def show(self, *args): + try: + result = pc.patch_query_specific_cached(list(args)) + except PatchError as e: + return dict(error="Error: %s" % e.message) + + return result + + @expose('json') + @expose('query.xml', content_type='application/xml') + def apply(self, *args): + if pc.any_patch_host_installing(): + return dict(error="Rejected: One or more nodes are installing patches.") + + try: + result = pc.patch_apply_api(list(args)) + except PatchError as e: + return dict(error="Error: %s" % e.message) + + pc.patch_sync() + + return result + + @expose('json') + @expose('query.xml', content_type='application/xml') + def remove(self, *args, **kwargs): + if pc.any_patch_host_installing(): + return dict(error="Rejected: One or more nodes are installing patches.") + + try: + result = pc.patch_remove_api(list(args), **kwargs) + except PatchError as e: + return dict(error="Error: %s" % e.message) + + pc.patch_sync() + + return result + + @expose('json') + @expose('query.xml', content_type='application/xml') + def delete(self, *args): + try: + result = pc.patch_delete_api(list(args)) + except PatchError as e: + return dict(error="Error: %s" % e.message) + + pc.patch_sync() + + return result + + @expose('json') + @expose('query.xml', content_type='application/xml') + def upload(self): + assert isinstance(request.POST['file'], cgi.FieldStorage) + fileitem = request.POST['file'] + + if not fileitem.filename: + return dict(error="Error: No file uploaded") + + fn = '/scratch/' + os.path.basename(fileitem.filename) + # This technique cannot copy a very large file. It + # requires a lot of memory as all data from the + # source file is read into memory then written to + # the destination file one chunk + # open(fn, 'wb').write(fileitem.file.read()) + + # Copying file by chunks using OS system calls + # requires much less memory. A larger chunk + # size can be used to improve the copy speed; + # currently 64K chunk size is selected + dst = os.open(fn, os.O_WRONLY | os.O_CREAT) + src = fileitem.file.fileno() + size = 64*1024 + n = size + while n >= size: + s = os.read(src, size) + n = os.write(dst, s) + os.close(dst) + + try: + result = pc.patch_import_api([fn]) + except PatchError as e: + os.remove(fn) + return dict(error=e.message) + + os.remove(fn) + + pc.patch_sync() + + return result + + @expose('json') + def upload_dir(self, **kwargs): + files = [] + for key, path in kwargs.iteritems(): + LOG.info("upload-dir: Retrieving patches from %s" % path) + for f in glob.glob(path + '/*.patch'): + if os.path.isfile(f): + files.append(f) + + if len(files) == 0: + return dict(error="No patches found") + + try: + result = pc.patch_import_api(sorted(files)) + except PatchError as e: + return dict(error=e.message) + + pc.patch_sync() + + return result + + @expose('json') + def init_release(self, *args): + if len(list(args)) == 0: + return dict(error="Release must be specified") + + try: + result = pc.patch_init_release_api(list(args)[0]) + except PatchError as e: + return dict(error=e.message) + + pc.patch_sync() + + return result + + @expose('json') + def del_release(self, *args): + if len(list(args)) == 0: + return dict(error="Release must be specified") + + try: + result = pc.patch_del_release_api(list(args)[0]) + except PatchError as e: + return dict(error=e.message) + + pc.patch_sync() + + return result + + @expose('json') + @expose('query_hosts.xml', content_type='application/xml') + def query_hosts(self, *args): + return dict(data=pc.query_host_cache()) + + @expose('json') + @expose('query.xml', content_type='application/xml') + def what_requires(self, *args): + try: + result = pc.patch_query_what_requires(list(args)) + except PatchError as e: + return dict(error="Error: %s" % e.message) + + return result + + @expose('json') + @expose('query.xml', content_type='application/xml') + def host_install(self, *args): + return dict(error="Deprecated: Use host_install_async") + + @expose('json') + @expose('query.xml', content_type='application/xml') + def host_install_async(self, *args): + if len(list(args)) == 0: + return dict(error="Host must be specified for install") + force = False + if len(list(args)) > 1 and 'force' in list(args)[1:]: + force = True + + try: + result = pc.patch_host_install(list(args)[0], force, async=True) + except PatchError as e: + return dict(error="Error: %s" % e.message) + + return result + + @expose('json') + @expose('query.xml', content_type='application/xml') + def drop_host(self, *args): + if len(list(args)) == 0: + return dict(error="Host must be specified") + + try: + result = pc.drop_host(list(args)[0]) + except PatchError as e: + return dict(error="Error: %s" % e.message) + + return result + + @expose('json') + def query_dependencies(self, *args, **kwargs): + try: + result = pc.patch_query_dependencies(list(args), **kwargs) + except PatchError as e: + return dict(error=e.message) + + return result + + @expose('json') + def commit(self, *args): + try: + result = pc.patch_commit(list(args)) + except PatchError as e: + return dict(error=e.message) + + pc.patch_sync() + + return result + + @expose('json') + def commit_dry_run(self, *args): + try: + result = pc.patch_commit(list(args), dry_run=True) + except PatchError as e: + return dict(error=e.message) + + return result + + +class RootController(object): + + @expose() + @expose('json') + def index(self): + return "Titanium Cloud Patching API, Available versions: /v1" + + patch = PatchAPIController() + v1 = PatchAPIController() + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/app.py b/cgcs-patch/cgcs-patch/cgcs_patch/app.py new file mode 100644 index 00000000..d4719424 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/app.py @@ -0,0 +1,27 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +from pecan import make_app +from cgcs_patch import model + + +def setup_app(config): + + model.init_model() + + return make_app( + config.app.root, + static_root=config.app.static_root, + template_path=config.app.template_path, + logging=getattr(config, 'logging', {}), + debug=getattr(config.app, 'debug', False), + force_canonical=getattr(config.app, 'force_canonical', True), + guess_content_type_from_ext=getattr( + config.app, + 'guess_content_type_from_ext', + True), + ) diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/authapi/__init__.py b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/__init__.py new file mode 100755 index 00000000..00dc7290 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) 2013-2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from oslo_config import cfg + +API_SERVICE_OPTS = [ + cfg.StrOpt('auth_api_bind_ip', + default=None, + help='IP for the authenticated Patching API server to bind to'), + cfg.IntOpt('auth_api_port', + default=5491, + help='The port for the authenticated Patching API server'), + cfg.IntOpt('api_limit_max', + default=1000, + help='the maximum number of items returned in a single ' + 'response from a collection resource') +] + +CONF = cfg.CONF +opt_group = cfg.OptGroup(name='api', + title='Options for the patch-api service') +CONF.register_group(opt_group) +CONF.register_opts(API_SERVICE_OPTS) diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/authapi/acl.py b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/acl.py new file mode 100755 index 00000000..b820fdb7 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/acl.py @@ -0,0 +1,28 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" +"""Access Control Lists (ACL's) control access the API server.""" + +from cgcs_patch.authapi import auth_token + +OPT_GROUP_NAME = 'keystone_authtoken' + + +def install(app, conf, public_routes): + """Install ACL check on application. + + :param app: A WSGI application. + :param conf: Settings. Must include OPT_GROUP_NAME section. + :param public_routes: The list of the routes which will be allowed + access without authentication. + :return: The same WSGI application with ACL installed. + + """ + + keystone_config = dict(conf.items(OPT_GROUP_NAME)) + return auth_token.AuthTokenMiddleware(app, + conf=keystone_config, + public_api_routes=public_routes) diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/authapi/app.py b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/app.py new file mode 100755 index 00000000..aba29299 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/app.py @@ -0,0 +1,77 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +from oslo_config import cfg +import pecan + +from cgcs_patch.authapi import acl +from cgcs_patch.authapi import config +from cgcs_patch.authapi import hooks +from cgcs_patch.authapi import policy + +import ConfigParser + +auth_opts = [ + cfg.StrOpt('auth_strategy', + default='keystone', + help='Method to use for auth: noauth or keystone.'), + ] + +CONF = cfg.CONF +CONF.register_opts(auth_opts) + + +def get_pecan_config(): + # Set up the pecan configuration + filename = config.__file__.replace('.pyc', '.py') + return pecan.configuration.conf_from_file(filename) + + +def setup_app(pecan_config=None, extra_hooks=None): + config = ConfigParser.RawConfigParser() + config.read('/etc/patching/patching.conf') + + policy.init() + + app_hooks = [hooks.ConfigHook(), + hooks.ContextHook(pecan_config.app.acl_public_routes), + ] + if extra_hooks: + app_hooks.extend(extra_hooks) + + if not pecan_config: + pecan_config = get_pecan_config() + + if pecan_config.app.enable_acl: + app_hooks.append(hooks.AdminAuthHook()) + + pecan.configuration.set_config(dict(pecan_config), overwrite=True) + + app = pecan.make_app( + pecan_config.app.root, + static_root=pecan_config.app.static_root, + template_path=pecan_config.app.template_path, + debug=False, + force_canonical=getattr(pecan_config.app, 'force_canonical', True), + hooks=app_hooks, + guess_content_type_from_ext=False, # Avoid mime-type lookup + ) + + if pecan_config.app.enable_acl: + return acl.install(app, config, pecan_config.app.acl_public_routes) + + return app + + +class VersionSelectorApplication(object): + def __init__(self): + pc = get_pecan_config() + pc.app.enable_acl = (CONF.auth_strategy == 'keystone') + self.v1 = setup_app(pecan_config=pc) + + def __call__(self, environ, start_response): + return self.v1(environ, start_response) diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/authapi/auth_token.py b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/auth_token.py new file mode 100755 index 00000000..3ca399a3 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/auth_token.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystonemiddleware import auth_token +from sysinv.common import utils + + +class AuthTokenMiddleware(auth_token.AuthProtocol): + """A wrapper on Keystone auth_token middleware. + + Does not perform verification of authentication tokens + for public routes in the API. + + """ + def __init__(self, app, conf, public_api_routes=[]): + self.public_api_routes = set(public_api_routes) + + super(AuthTokenMiddleware, self).__init__(app, conf) + + def __call__(self, env, start_response): + path = utils.safe_rstrip(env.get('PATH_INFO'), '/') + + if path in self.public_api_routes: + return self.app(env, start_response) + + return super(AuthTokenMiddleware, self).__call__(env, start_response) diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/authapi/config.py b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/config.py new file mode 100755 index 00000000..796fb8dc --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/config.py @@ -0,0 +1,23 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +# Server Specific Configurations +server = { + 'port': '5491', + 'host': '0.0.0.0' +} + +# Pecan Application Configurations +app = { + 'root': 'cgcs_patch.api.controllers.root.RootController', + 'modules': ['cgcs_patch.api'], + 'static_root': '%(confdir)s/public', + 'template_path': '%(confdir)s/../templates', + 'debug': False, + 'enable_acl': True, + 'acl_public_routes': [], +} diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/authapi/hooks.py b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/hooks.py new file mode 100755 index 00000000..c4d2353e --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/hooks.py @@ -0,0 +1,100 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2012 New Dream Network, LLC (DreamHost) +# +# Author: Doug Hellmann +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Copyright (c) 2013-2017 Wind River Systems, Inc. +# + + +from oslo_config import cfg +from pecan import hooks + +from sysinv.common import context +from sysinv.common import utils +from sysinv.openstack.common import policy +from webob import exc + + +class ConfigHook(hooks.PecanHook): + """Attach the config object to the request so controllers can get to it.""" + + def before(self, state): + state.request.cfg = cfg.CONF + + +class ContextHook(hooks.PecanHook): + """Configures a request context and attaches it to the request. + + The following HTTP request headers are used: + + X-User-Id or X-User: + Used for context.user_id. + + X-Tenant-Id or X-Tenant: + Used for context.tenant. + + X-Auth-Token: + Used for context.auth_token. + + X-Roles: + Used for setting context.is_admin flag to either True or False. + The flag is set to True, if X-Roles contains either an administrator + or admin substring. Otherwise it is set to False. + + """ + def __init__(self, public_api_routes): + self.public_api_routes = public_api_routes + super(ContextHook, self).__init__() + + def before(self, state): + user_id = state.request.headers.get('X-User-Id') + user_id = state.request.headers.get('X-User', user_id) + tenant = state.request.headers.get('X-Tenant-Id') + tenant = state.request.headers.get('X-Tenant', tenant) + domain_id = state.request.headers.get('X-User-Domain-Id') + domain_name = state.request.headers.get('X-User-Domain-Name') + auth_token = state.request.headers.get('X-Auth-Token', None) + creds = {'roles': state.request.headers.get('X-Roles', '').split(',')} + + is_admin = policy.check('admin', state.request.headers, creds) + + path = utils.safe_rstrip(state.request.path, '/') + is_public_api = path in self.public_api_routes + + state.request.context = context.RequestContext( + auth_token=auth_token, + user=user_id, + tenant=tenant, + domain_id=domain_id, + domain_name=domain_name, + is_admin=is_admin, + is_public_api=is_public_api) + + +class AdminAuthHook(hooks.PecanHook): + """Verify that the user has admin rights. + + Checks whether the request context is an admin context and + rejects the request otherwise. + + """ + def before(self, state): + ctx = state.request.context + is_admin_api = policy.check('admin_api', {}, ctx.to_dict()) + + if not is_admin_api and not ctx.is_public_api: + raise exc.HTTPForbidden() diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/authapi/policy.py b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/policy.py new file mode 100755 index 00000000..69150778 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/authapi/policy.py @@ -0,0 +1,117 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Copyright (c) 2014-2017 Wind River Systems, Inc. +# + +"""Policy Engine For Patching.""" + +import os.path + +from sysinv.common import exception +from sysinv.common import utils +from sysinv.openstack.common import policy + + +_POLICY_PATH = None +_POLICY_CACHE = {} + +def reset(): + global _POLICY_PATH + global _POLICY_CACHE + _POLICY_PATH = None + _POLICY_CACHE = {} + policy.reset() + + +def init(): + global _POLICY_PATH + global _POLICY_CACHE + if not _POLICY_PATH: + _POLICY_PATH = '/etc/patching/policy.json' + if not os.path.exists(_POLICY_PATH): + raise exception.ConfigNotFound(message='/etc/patching/policy.json') + utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, + reload_func=_set_rules) + + +def _set_rules(data): + default_rule = "rule:admin_api" + policy.set_rules(policy.Rules.load_json(data, default_rule)) + + +def enforce(context, action, target, do_raise=True): + """Verifies that the action is valid on the target in this context. + + :param context: sysinv context + :param action: string representing the action to be checked + this should be colon separated for clarity. + i.e. ``compute:create_instance``, + ``compute:attach_volume``, + ``volume:attach_volume`` + :param target: dictionary representing the object of the action + for object creation this should be a dictionary representing the + location of the object e.g. ``{'project_id': context.project_id}`` + :param do_raise: if True (the default), raises PolicyNotAuthorized; + if False, returns False + + :raises sysinv.exception.PolicyNotAuthorized: if verification fails + and do_raise is True. + + :return: returns a non-False value (not necessarily "True") if + authorized, and the exact value False if not authorized and + do_raise is False. + """ + init() + + credentials = context.to_dict() + + # Add the exception arguments if asked to do a raise + extra = {} + if do_raise: + extra.update(exc=exception.PolicyNotAuthorized, action=action) + + return policy.check(action, target, credentials, **extra) + + +def check_is_admin(context): + """Whether or not role contains 'admin' role according to policy setting. + + """ + init() + + credentials = context.to_dict() + target = credentials + + return policy.check('context_is_admin', target, credentials) + + +@policy.register('context_is_admin') +class IsAdminCheck(policy.Check): + """An explicit check for is_admin.""" + + def __init__(self, kind, match): + """Initialize the check.""" + + self.expected = (match.lower() == 'true') + + super(IsAdminCheck, self).__init__(kind, str(self.expected)) + + def __call__(self, target, creds): + """Determine whether is_admin matches the requested value.""" + + return creds['is_admin'] == self.expected diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/base.py b/cgcs-patch/cgcs-patch/cgcs_patch/base.py new file mode 100644 index 00000000..8ed43a67 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/base.py @@ -0,0 +1,166 @@ +""" +Copyright (c) 2017-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import socket +import struct +import subprocess +import time + +import cgcs_patch.utils as utils +import cgcs_patch.config as cfg +import cgcs_patch.constants as constants +from cgcs_patch.patch_functions import LOG + + +class PatchService: + def __init__(self): + self.sock_out = None + self.sock_in = None + self.service_type = None + self.port = None + self.mcast_addr = None + self.socket_lock = None + + def update_config(self): + # Implemented in subclass + pass + + def socket_lock_acquire(self): + pass + + def socket_lock_release(self): + pass + + def setup_socket_ipv4(self): + mgmt_ip = cfg.get_mgmt_ip() + if mgmt_ip is None: + # Don't setup socket unless we have a mgmt ip + return None + + self.update_config() + + interface_addr = socket.inet_pton(socket.AF_INET, mgmt_ip) + + # Close sockets, if necessary + for s in [self.sock_out, self.sock_in]: + if s is not None: + s.close() + + self.sock_out = socket.socket(socket.AF_INET, + socket.SOCK_DGRAM) + self.sock_in = socket.socket(socket.AF_INET, + socket.SOCK_DGRAM) + + self.sock_out.setblocking(0) + self.sock_in.setblocking(0) + + self.sock_out.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.sock_in.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + self.sock_out.bind((mgmt_ip, 0)) + self.sock_in.bind(('', self.port)) + + # These options are for outgoing multicast messages + self.sock_out.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, interface_addr) + self.sock_out.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1) + # Since only the controllers are sending to this address, + # we want the loopback so the local agent can receive it + self.sock_out.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1) + + # Register the multicast group + group = socket.inet_pton(socket.AF_INET, self.mcast_addr) + mreq = struct.pack('=4s4s', group, interface_addr) + + self.sock_in.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) + + return self.sock_in + + def setup_socket_ipv6(self): + mgmt_ip = cfg.get_mgmt_ip() + if mgmt_ip is None: + # Don't setup socket unless we have a mgmt ip + return None + + self.update_config() + + # Close sockets, if necessary + for s in [self.sock_out, self.sock_in]: + if s is not None: + s.close() + + self.sock_out = socket.socket(socket.AF_INET6, + socket.SOCK_DGRAM) + self.sock_in = socket.socket(socket.AF_INET6, + socket.SOCK_DGRAM) + + self.sock_out.setblocking(0) + self.sock_in.setblocking(0) + + self.sock_out.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.sock_in.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + self.sock_out.bind((mgmt_ip, 0)) + self.sock_in.bind(('', self.port)) + + # These options are for outgoing multicast messages + mgmt_ifindex = utils.if_nametoindex(cfg.get_mgmt_iface()) + self.sock_out.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, mgmt_ifindex) + self.sock_out.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 1) + # Since only the controllers are sending to this address, + # we want the loopback so the local agent can receive it + self.sock_out.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, 1) + + # Register the multicast group + if_index_packed = struct.pack('I', mgmt_ifindex) + group = socket.inet_pton(socket.AF_INET6, self.mcast_addr) + if_index_packed + self.sock_in.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, group) + + return self.sock_in + + def setup_socket(self): + self.socket_lock_acquire() + + try: + sock_in = None + if utils.get_management_version() == constants.ADDRESS_VERSION_IPV6: + sock_in = self.setup_socket_ipv6() + else: + sock_in = self.setup_socket_ipv4() + self.socket_lock_release() + return sock_in + except: + LOG.exception("Failed to setup socket") + + # Close sockets, if necessary + for s in [self.sock_out, self.sock_in]: + if s is not None: + s.close() + + self.socket_lock_release() + + return None + + def audit_socket(self): + # Ensure multicast address is still allocated + cmd = "ip maddr show %s | awk 'BEGIN { ORS=\"\" }; {if ($2 == \"%s\") print $2}'" % \ + (cfg.get_mgmt_iface(), self.mcast_addr) + try: + result = subprocess.check_output(cmd, shell=True) + + if result == self.mcast_addr: + return + except subprocess.CalledProcessError as e: + LOG.error("Command output: %s" % e.output) + return + + # Close the socket and set it up again + LOG.info("Detected missing multicast addr (%s). Reconfiguring" % self.mcast_addr) + while self.setup_socket() is None: + LOG.info("Unable to setup sockets. Waiting to retry") + time.sleep(5) + LOG.info("Multicast address reconfigured") + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/certificates.py b/cgcs-patch/cgcs-patch/cgcs_patch/certificates.py new file mode 100644 index 00000000..d94def69 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/certificates.py @@ -0,0 +1,51 @@ +""" +Copyright (c) 2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +dev_certificate = b"""-----BEGIN CERTIFICATE----- + MIIDejCCAmKgAwIBAgICEAQwDQYJKoZIhvcNAQELBQAwQjELMAkGA1UEBhMCQ0Ex + EDAOBgNVBAgMB09udGFyaW8xITAfBgNVBAoMGFdpbmQgUml2ZXIgU3lzdGVtcywg + SW5jLjAeFw0xNzA4MTgxNDM3MjlaFw0yNzA4MTYxNDM3MjlaMEExCzAJBgNVBAYT + AkNBMRAwDgYDVQQIDAdPbnRhcmlvMSAwHgYDVQQKDBdXaW5kIFJpdmVyIFN5c3Rl + bXMsIEluYzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALcs0/Te6x69 + lxQOxudrF+uSC5F9r5bKUnZNWUKHyXKlN4SzZgWGs+fb/DqXIm7piuoQ6GH7GEQd + BEN1j/bwp30LZlv0Ur+8jhCvEdqsIP3vUXfv7pv0bomVs0Q8ZRI/FYZhjxYlyFKr + gZFV9WPP8S9SwfClHjaYRUudvwvjHHnnnkZ9blVFbXU0Xe83A8fWd0HNqAU1TlmK + 4CeSi4FI4aRKiXJnOvgv2UoJMI57rBIVKYRUH8uuFpPofOwjOM/Rd6r3Ir+4/CX6 + +/NALOBIEN6M05ZzoiyiH8NHELknQBqzNs0cXObJWpaSinAOcBnPCc7DNRwgQzjR + SdcE9FG1+LcCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3Bl + blNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFDRbal2KxU0hQyv4 + MVnWrW96+aWoMB8GA1UdIwQYMBaAFJaLO1x8+jti7V6pLGbUyqpy0M36MA0GCSqG + SIb3DQEBCwUAA4IBAQBmcPFZzEoPtuMPCFvJ/0cmngp8yvCGxWz3JEDkdGYSCVGs + TG5e9DeltaHOk6yLvZSRY1so30GQnyB9q8v4DwEGVslKg8u9w/WEU81wl6Q2FZ5s + XRP6TASQ0Lbg9e4b3bnTITJJ8jT/zF29NaohgC2fg0UwVuldZLfa7FihJB4//OC1 + UdNEcmdqTVRqN2oco1n3ZUWKXvG2AvGsoiqu+lsWX1MXacoFvJexSACLrUvOoXMW + i38Ofp7XMCAm3rM0cXv7Uc9WCrgnTWbEvDgjGfRAmcM9moWGoWX6E46Xkojpkfle + Ss6CHAMK42aZ/+MWQlZEzNK49PtomGMjn5SuoK8u + -----END CERTIFICATE-----""" + +formal_certificate=b"""-----BEGIN CERTIFICATE----- + MIIDezCCAmOgAwIBAgICEAMwDQYJKoZIhvcNAQELBQAwQjELMAkGA1UEBhMCQ0Ex + EDAOBgNVBAgMB09udGFyaW8xITAfBgNVBAoMGFdpbmQgUml2ZXIgU3lzdGVtcywg + SW5jLjAeFw0xNzA4MTgxNDM1MTJaFw0yNzA4MTYxNDM1MTJaMEIxCzAJBgNVBAYT + AkNBMRAwDgYDVQQIDAdPbnRhcmlvMSEwHwYDVQQKDBhXaW5kIFJpdmVyIFN5c3Rl + bXMsIEluYy4wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC+0fS8ybg8 + M37lW+lcR9LmQAR2zUJdbnl2L0fj3W/7W+PMm3mJWeQDTf19wf+qHHrgEkjxGp10 + BSXWZYdPyCdOjAay/Ew1s/waFeAQZpf4vv/9D1Y/4sVkqct9ibo5NVgvVsjqKVnX + IVhyzHlhBSUqYhZlS/SOx8JcLQWSUMJoP2XR4Tv28xIXi0Fuyp8QBwUmSwmvfPy4 + 0yxzfON/b8kHld5aTY353KLXh/5YWsn1zRlOYfS1OuJk4LGjm6HvmZtxPNUZk4vI + NA24rH4FKkuxyM3x8aPi3LE4G6GSrJDuNi28xzOj864rlFoyLODy/mov1YMR/g4k + d3mG6UbRckPxAgMBAAGjezB5MAkGA1UdEwQCMAAwLAYJYIZIAYb4QgENBB8WHU9w + ZW5TU0wgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBTjyMN/AX07rEmB + 6sz6pnyt/m+eSzAfBgNVHSMEGDAWgBSWiztcfPo7Yu1eqSxm1MqqctDN+jANBgkq + hkiG9w0BAQsFAAOCAQEASpyCu/adGTvNjyy/tV+sL/kaVEKLA7q36HUrzQkTjMPX + y8L8PVZoeWprkz7cvYTyHmVTPLBvFkGEFVn8LWi9fTTp/UrHnxw6fvb+V78mOypi + 4A1aU9+dh3L6arpd4jZ4hDiLhEClesGCYVTVBdsrh3zSOc51nT4hosyBVpRd/VgQ + jhGJBBMEXASZceady4ajK5jnR3wF8oW/he4NYF97qh8WWKVsIYbwgLS0rT58q7qq + vpjPxMOahUdACkyPyt/XJICTlkanVD7KgG3oLWpc+3FWPHGr+F7mspPLZqUcEFDV + bGF+oDJ7p/tqHsNvPlRDVGqh0QdiAkKeS/SJC9jmAw== + -----END CERTIFICATE----- + """ diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/config.py b/cgcs-patch/cgcs-patch/cgcs_patch/config.py new file mode 100644 index 00000000..a2ce8ce8 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/config.py @@ -0,0 +1,126 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import os +import ConfigParser +import StringIO +import subprocess +import logging +import socket +import cgcs_patch.utils as utils +import cgcs_patch.constants as constants +import tsconfig.tsconfig as tsc + +controller_mcast_group = None +agent_mcast_group = None +controller_port = 0 +agent_port = 0 +api_port = 0 +mgmt_if = None +nodetype = None +platform_conf_mtime = 0 +patching_conf_mtime = 0 +patching_conf = '/etc/patching/patching.conf' + +def read_config(): + global patching_conf_mtime + global patching_conf + + if patching_conf_mtime == os.stat(patching_conf).st_mtime: + # The file has not changed since it was last read + return + + defaults = { + 'controller_mcast_group': "239.1.1.3", + 'agent_mcast_group': "239.1.1.4", + 'api_port': "5487", + 'controller_port': "5488", + 'agent_port': "5489", + } + + global controller_mcast_group + global agent_mcast_group + global api_port + global controller_port + global agent_port + + config = ConfigParser.SafeConfigParser(defaults) + config.read(patching_conf) + patching_conf_mtime = os.stat(patching_conf).st_mtime + + controller_mcast_group = config.get('runtime', + 'controller_multicast') + agent_mcast_group = config.get('runtime', 'agent_multicast') + + api_port = config.getint('runtime', 'api_port') + controller_port = config.getint('runtime', 'controller_port') + agent_port = config.getint('runtime', 'agent_port') + + # The platform.conf file has no section headers, which causes problems + # for ConfigParser. So we'll fake it out. + ini_str = '[platform_conf]\n' + open(tsc.PLATFORM_CONF_FILE, 'r').read() + ini_fp = StringIO.StringIO(ini_str) + config.readfp(ini_fp) + + try: + value = config.get('platform_conf', 'nodetype') + + global nodetype + nodetype = value + except ConfigParser.Error: + logging.exception("Failed to read nodetype from config") + return False + + +def get_mgmt_ip(): + # Check if initial config is complete + if not os.path.exists('/etc/platform/.initial_config_complete'): + return None + mgmt_hostname = socket.gethostname() + return utils.gethostbyname(mgmt_hostname) + + +# Because the patching daemons are launched before manifests are +# applied, the content of some settings in platform.conf can change, +# such as the management interface. As such, we can't just directly +# use tsc.management_interface +# +def get_mgmt_iface(): + # Check if initial config is complete + if not os.path.exists(constants.INITIAL_CONFIG_COMPLETE_FLAG): + return None + + global mgmt_if + global platform_conf_mtime + + if mgmt_if is not None and \ + platform_conf_mtime == os.stat(tsc.PLATFORM_CONF_FILE).st_mtime: + # The platform.conf file hasn't been modified since we read it, + # so return the cached value. + return mgmt_if + + config = ConfigParser.SafeConfigParser() + + # The platform.conf file has no section headers, which causes problems + # for ConfigParser. So we'll fake it out. + ini_str = '[platform_conf]\n' + open(tsc.PLATFORM_CONF_FILE, 'r').read() + ini_fp = StringIO.StringIO(ini_str) + config.readfp(ini_fp) + + try: + value = config.get('platform_conf', 'management_interface') + + global nodetype + mgmt_if = value + + platform_conf_mtime = os.stat(tsc.PLATFORM_CONF_FILE).st_mtime + except ConfigParser.Error: + logging.exception("Failed to read management_interface from config") + return None + return mgmt_if + + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/constants.py b/cgcs-patch/cgcs-patch/cgcs_patch/constants.py new file mode 100644 index 00000000..ad699d04 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/constants.py @@ -0,0 +1,43 @@ +""" +Copyright (c) 2015-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import os +try: + # The tsconfig module is only available at runtime + import tsconfig.tsconfig as tsc + + INITIAL_CONFIG_COMPLETE_FLAG = os.path.join( + tsc.PLATFORM_CONF_PATH, ".initial_config_complete") +except: + pass + +PATCH_AGENT_STATE_IDLE = "idle" +PATCH_AGENT_STATE_INSTALLING = "installing" +PATCH_AGENT_STATE_INSTALL_FAILED = "install-failed" +PATCH_AGENT_STATE_INSTALL_REJECTED = "install-rejected" + +ADDRESS_VERSION_IPV4 = 4 +ADDRESS_VERSION_IPV6 = 6 +CONTROLLER_FLOATING_HOSTNAME = "controller" + +AVAILABLE = 'Available' +APPLIED = 'Applied' +PARTIAL_APPLY = 'Partial-Apply' +PARTIAL_REMOVE = 'Partial-Remove' +COMMITTED = 'Committed' +UNKNOWN = 'n/a' + +STATUS_OBSOLETE = 'OBS' +STATUS_RELEASED = 'REL' +STATUS_DEVELOPEMENT = 'DEV' + +CLI_OPT_ALL = '--all' +CLI_OPT_DRY_RUN = '--dry-run' +CLI_OPT_RECURSIVE = '--recursive' +CLI_OPT_RELEASE = '--release' + +ENABLE_DEV_CERTIFICATE_PATCH_IDENTIFIER = 'ENABLE_DEV_CERTIFICATE' diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/exceptions.py b/cgcs-patch/cgcs-patch/cgcs_patch/exceptions.py new file mode 100644 index 00000000..1d9ff62c --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/exceptions.py @@ -0,0 +1,45 @@ +""" +Copyright (c) 2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +class PatchError(Exception): + """Base class for patching exceptions.""" + + def __init__(self, message=None): + self.message = message + + def __str__(self): + return self.message or "" + + +class MetadataFail(PatchError): + """Metadata error.""" + pass + + +class RpmFail(PatchError): + """RPM error.""" + pass + + +class RepoFail(PatchError): + """Repo error.""" + pass + + +class PatchFail(PatchError): + """General patching error.""" + pass + + +class PatchValidationFailure(PatchError): + """Patch validation error.""" + pass + + +class PatchMismatchFailure(PatchError): + """Patch validation error.""" + pass \ No newline at end of file diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/messages.py b/cgcs-patch/cgcs-patch/cgcs_patch/messages.py new file mode 100644 index 00000000..a57ea28d --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/messages.py @@ -0,0 +1,64 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +from cgcs_patch.patch_functions import LOG + +PATCHMSG_UNKNOWN = 0 +PATCHMSG_HELLO = 1 +PATCHMSG_HELLO_ACK = 2 +PATCHMSG_SYNC_REQ = 3 +PATCHMSG_SYNC_COMPLETE = 4 +PATCHMSG_HELLO_AGENT = 5 +PATCHMSG_HELLO_AGENT_ACK = 6 +PATCHMSG_QUERY_DETAILED = 7 +PATCHMSG_QUERY_DETAILED_RESP = 8 +PATCHMSG_AGENT_INSTALL_REQ = 9 +PATCHMSG_AGENT_INSTALL_RESP = 10 +PATCHMSG_DROP_HOST_REQ = 11 + +PATCHMSG_STR = { + PATCHMSG_UNKNOWN: "unknown", + PATCHMSG_HELLO: "hello", + PATCHMSG_HELLO_ACK: "hello-ack", + PATCHMSG_SYNC_REQ: "sync-req", + PATCHMSG_SYNC_COMPLETE: "sync-complete", + PATCHMSG_HELLO_AGENT: "hello-agent", + PATCHMSG_HELLO_AGENT_ACK: "hello-agent-ack", + PATCHMSG_QUERY_DETAILED: "query-detailed", + PATCHMSG_QUERY_DETAILED_RESP: "query-detailed-resp", + PATCHMSG_AGENT_INSTALL_REQ: "agent-install-req", + PATCHMSG_AGENT_INSTALL_RESP: "agent-install-resp", + PATCHMSG_DROP_HOST_REQ: "drop-host-req", +} + + +class PatchMessage(object): + def __init__(self, msgtype=PATCHMSG_UNKNOWN): + self.msgtype = msgtype + self.msgversion = 1 + self.message = {} + + def decode(self, data): + if 'msgtype' in data: + self.msgtype = data['msgtype'] + if 'msgversion' in data: + self.msgversion = data['msgversion'] + + def encode(self): + self.message['msgtype'] = self.msgtype + self.message['msgversion'] = self.msgversion + + def data(self): + return {'msgtype': self.msgtype} + + def msgtype_str(self): + if self.msgtype in PATCHMSG_STR: + return PATCHMSG_STR[self.msgtype] + return "invalid-type" + + def handle(self, sock, addr): + LOG.info("Unhandled message type: %s" % self.msgtype) diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/patch_agent.py b/cgcs-patch/cgcs-patch/cgcs_patch/patch_agent.py new file mode 100644 index 00000000..3ff72a76 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/patch_agent.py @@ -0,0 +1,1060 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import os +import time +import socket +import json +import select +import subprocess +import random +import requests +import xml.etree.ElementTree as ElementTree +import rpm +import sys +import yaml +import shutil + +from rpmUtils.miscutils import stringToVersion + +from cgcs_patch.patch_functions import (configure_logging, LOG) +import cgcs_patch.config as cfg +from cgcs_patch.base import PatchService +import cgcs_patch.utils as utils +import cgcs_patch.messages as messages +import cgcs_patch.constants as constants + +from tsconfig.tsconfig import (SW_VERSION, subfunctions, install_uuid) + +pidfile_path = "/var/run/patch_agent.pid" +node_is_patched_file = "/var/run/node_is_patched" +node_is_patched_rr_file = "/var/run/node_is_patched_rr" +patch_installing_file = "/var/run/patch_installing" +patch_failed_file = "/var/run/patch_install_failed" +node_is_locked_file = "/var/run/.node_locked" + +insvc_patch_scripts = "/run/patching/patch-scripts" +insvc_patch_flags = "/run/patching/patch-flags" +insvc_patch_restart_agent = "/run/patching/.restart.patch-agent" + +run_insvc_patch_scripts_cmd = "/usr/sbin/run-patch-scripts" + +pa = None + +# Smart commands +smart_cmd = [ "/usr/bin/smart" ] +smart_quiet = smart_cmd + [ "--quiet" ] +smart_update = smart_quiet + [ "update" ] +smart_newer = smart_quiet + [ "newer" ] +smart_orphans = smart_quiet + [ "query", "--orphans", "--show-format", "$name\n" ] +smart_query = smart_quiet + [ "query" ] +smart_query_repos = smart_quiet + [ "query", "--channel=base", "--channel=updates" ] +smart_install_cmd = smart_cmd + [ "install", "--yes", "--explain" ] +smart_remove_cmd = smart_cmd + [ "remove", "--yes", "--explain" ] +smart_query_installed = smart_quiet + [ "query", "--installed", "--show-format", "$name $version\n" ] +smart_query_base = smart_quiet + [ "query", "--channel=base", "--show-format", "$name $version\n" ] +smart_query_updates = smart_quiet + [ "query", "--channel=updates", "--show-format", "$name $version\n" ] + + +def setflag(fname): + try: + with open(fname, "w") as f: + f.write("%d\n" % os.getpid()) + except: + LOG.exception("Failed to update %s flag" % fname) + + +def clearflag(fname): + if os.path.exists(fname): + try: + os.remove(fname) + except: + LOG.exception("Failed to clear %s flag" % fname) + + +def check_install_uuid(): + controller_install_uuid_url = "http://controller/feed/rel-%s/install_uuid" % SW_VERSION + try: + req = requests.get(controller_install_uuid_url) + if req.status_code != 200: + # If we're on controller-1, controller-0 may not have the install_uuid + # matching this release, if we're in an upgrade. If the file doesn't exist, + # bypass this check + if socket.gethostname() == "controller-1": + return True + + LOG.error("Failed to get install_uuid from controller") + return False + except requests.ConnectionError: + LOG.error("Failed to connect to controller") + return False + + controller_install_uuid = str(req.text).rstrip() + + if install_uuid != controller_install_uuid: + LOG.error("Local install_uuid=%s doesn't match controller=%s" % (install_uuid, controller_install_uuid)) + return False + + return True + + +class PatchMessageHelloAgent(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO_AGENT) + self.patch_op_counter = 0 + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'patch_op_counter' in data: + self.patch_op_counter = data['patch_op_counter'] + + def encode(self): + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + # Send response + + # Run the smart config audit + global pa + pa.timed_audit_smart_config() + + # + # If a user tries to do a host-install on an unlocked node, + # without bypassing the lock check (either via in-service + # patch or --force option), the agent will set its state + # to Install-Rejected in order to report back the rejection. + # However, since this should just be a transient state, + # we don't want the client reporting the Install-Rejected + # state indefinitely, so reset it to Idle after a minute or so. + # + if pa.state == constants.PATCH_AGENT_STATE_INSTALL_REJECTED: + if os.path.exists(node_is_locked_file): + # Node has been locked since rejected attempt. Reset the state + pa.state = constants.PATCH_AGENT_STATE_IDLE + elif (time.time() - pa.rejection_timestamp) > 60: + # Rejected state for more than a minute. Reset it. + pa.state = constants.PATCH_AGENT_STATE_IDLE + + if self.patch_op_counter > 0: + pa.handle_patch_op_counter(self.patch_op_counter) + + resp = PatchMessageHelloAgentAck() + resp.send(sock) + + def send(self, sock): + LOG.error("Should not get here") + + +class PatchMessageHelloAgentAck(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO_AGENT_ACK) + + def encode(self): + global pa + messages.PatchMessage.encode(self) + self.message['query_id'] = pa.query_id + self.message['out_of_date'] = pa.changes + self.message['hostname'] = socket.gethostname() + self.message['requires_reboot'] = pa.node_is_patched + self.message['patch_failed'] = pa.patch_failed + self.message['sw_version'] = SW_VERSION + self.message['state'] = pa.state + + def handle(self, sock, addr): + LOG.error("Should not get here") + + def send(self, sock): + self.encode() + message = json.dumps(self.message) + sock.sendto(message, (cfg.controller_mcast_group, cfg.controller_port)) + + +class PatchMessageQueryDetailed(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_QUERY_DETAILED) + + def decode(self, data): + messages.PatchMessage.decode(self, data) + + def encode(self): + # Nothing to add to the HELLO_AGENT, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + # Send response + LOG.info("Handling detailed query") + resp = PatchMessageQueryDetailedResp() + resp.send(sock) + + def send(self, sock): + LOG.error("Should not get here") + + +class PatchMessageQueryDetailedResp(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_QUERY_DETAILED_RESP) + + def encode(self): + global pa + messages.PatchMessage.encode(self) + self.message['installed'] = pa.installed + self.message['to_remove'] = pa.to_remove + self.message['missing_pkgs'] = pa.missing_pkgs + self.message['nodetype'] = cfg.nodetype + self.message['sw_version'] = SW_VERSION + self.message['subfunctions'] = subfunctions + self.message['state'] = pa.state + + def handle(self, sock, addr): + LOG.error("Should not get here") + + def send(self, sock): + self.encode() + message = json.dumps(self.message) + sock.sendall(message) + + +class PatchMessageAgentInstallReq(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_AGENT_INSTALL_REQ) + self.force = False + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'force' in data: + self.force = data['force'] + + def encode(self): + # Nothing to add to the HELLO_AGENT, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + LOG.info("Handling host install request, force=%s" % self.force) + global pa + resp = PatchMessageAgentInstallResp() + + if not os.path.exists(node_is_locked_file): + if self.force: + LOG.info("Installing on unlocked node, with force option") + else: + LOG.info("Rejecting install request on unlocked node") + pa.state = constants.PATCH_AGENT_STATE_INSTALL_REJECTED + pa.rejection_timestamp = time.time() + resp.status = False + resp.reject_reason = 'Node must be locked.' + resp.send(sock, addr) + return + + resp.status = pa.handle_install() + resp.send(sock, addr) + + def send(self, sock): + LOG.error("Should not get here") + + +class PatchMessageAgentInstallResp(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_AGENT_INSTALL_RESP) + self.status = False + self.reject_reason = None + + def encode(self): + global pa + messages.PatchMessage.encode(self) + self.message['status'] = self.status + if self.reject_reason is not None: + self.message['reject_reason'] = self.reject_reason + + def handle(self, sock, addr): + LOG.error("Should not get here") + + def send(self, sock, addr): + address = (addr[0], cfg.controller_port) + self.encode() + message = json.dumps(self.message) + sock.sendto(message, address) + + # Send a hello ack to follow it + resp = PatchMessageHelloAgentAck() + resp.send(sock) + + +class PatchAgent(PatchService): + def __init__(self): + PatchService.__init__(self) + self.sock_out = None + self.sock_in = None + self.listener = None + self.changes = False + self.installed = {} + self.to_install = {} + self.to_remove = [] + self.missing_pkgs = [] + self.patch_op_counter = 0 + self.node_is_patched = os.path.exists(node_is_patched_file) + self.node_is_patched_timestamp = 0 + self.query_id = 0 + self.state = constants.PATCH_AGENT_STATE_IDLE + self.last_config_audit = 0 + + # Check state flags + if os.path.exists(patch_installing_file): + # We restarted while installing. Change to failed + setflag(patch_failed_file) + os.remove(patch_installing_file) + + if os.path.exists(patch_failed_file): + self.state = constants.PATCH_AGENT_STATE_INSTALL_FAILED + + self.patch_failed = os.path.exists(patch_failed_file) + + def update_config(self): + cfg.read_config() + + if self.port != cfg.agent_port: + self.port = cfg.agent_port + + if self.mcast_addr != cfg.agent_mcast_group: + self.mcast_addr = cfg.agent_mcast_group + + def setup_tcp_socket(self): + address_family = utils.get_management_family() + self.listener = socket.socket(address_family, socket.SOCK_STREAM) + self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.listener.bind(('', self.port)) + self.listener.listen(2) # Allow two connections, for two controllers + + def audit_smart_config(self): + LOG.info("Auditing smart configuration") + + # Get the current channel config + try: + output = subprocess.check_output(smart_cmd + + [ "channel", "--yaml" ], + stderr=subprocess.STDOUT) + config = yaml.load(output) + except subprocess.CalledProcessError as e: + LOG.exception("Failed to query channels") + LOG.error("Command output: %s" % e.output) + return False + except Exception: + LOG.exception("Failed to query channels") + return False + + expected = [ { 'channel': 'rpmdb', + 'type': 'rpm-sys', + 'name': 'RPM Database', + 'baseurl': None }, + { 'channel': 'base', + 'type': 'rpm-md', + 'name': 'Base', + 'baseurl': "http://controller/feed/rel-%s" % SW_VERSION}, + { 'channel': 'updates', + 'type': 'rpm-md', + 'name': 'Patches', + 'baseurl': "http://controller/updates/rel-%s" % SW_VERSION} ] + + updated = False + + for item in expected: + channel = item['channel'] + ch_type = item['type'] + ch_name = item['name'] + ch_baseurl = item['baseurl'] + + add_channel = False + + if channel in config: + # Verify existing channel config + if (config[channel].get('type') != ch_type or + config[channel].get('name') != ch_name or + config[channel].get('baseurl') != ch_baseurl): + # Config is invalid + add_channel = True + LOG.warning("Invalid smart config found for %s" % channel) + try: + output = subprocess.check_output(smart_cmd + + [ "channel", "--yes", + "--remove", channel ], + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + LOG.exception("Failed to configure %s channel" % channel) + LOG.error("Command output: %s" % e.output) + return False + else: + # Channel is missing + add_channel = True + LOG.warning("Channel %s is missing from config" % channel) + + if add_channel: + LOG.info("Adding channel %s" % channel) + cmd_args = [ "channel", "--yes", "--add", channel, + "type=%s" % ch_type, + "name=%s" % ch_name ] + if ch_baseurl is not None: + cmd_args += [ "baseurl=%s" % ch_baseurl ] + + try: + output = subprocess.check_output(smart_cmd + cmd_args, + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + LOG.exception("Failed to configure %s channel" % channel) + LOG.error("Command output: %s" % e.output) + return False + + updated = True + + # Validate the smart config + try: + output = subprocess.check_output(smart_cmd + + [ "config", "--yaml" ], + stderr=subprocess.STDOUT) + config = yaml.load(output) + except subprocess.CalledProcessError as e: + LOG.exception("Failed to query smart config") + LOG.error("Command output: %s" % e.output) + return False + except Exception: + LOG.exception("Failed to query smart config") + return False + + # Check for the rpm-nolinktos flag + nolinktos = 'rpm-nolinktos' + if config.get(nolinktos) is not True: + # Set the flag + LOG.warning("Setting %s option" % nolinktos) + try: + output = subprocess.check_output(smart_cmd + + [ "config", "--set", + "%s=true" % nolinktos ], + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + LOG.exception("Failed to configure %s option" % nolinktos) + LOG.error("Command output: %s" % e.output) + return False + + updated = True + + # Check for the rpm-check-signatures flag + nosignature = 'rpm-check-signatures' + if config.get(nosignature) is not False: + # Set the flag + LOG.warning("Setting %s option" % nosignature) + try: + output = subprocess.check_output(smart_cmd + + [ "config", "--set", + "%s=false" % nosignature], + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + LOG.exception("Failed to configure %s option" % nosignature) + LOG.error("Command output: %s" % e.output) + return False + + updated = True + + if updated: + try: + subprocess.check_output(smart_update, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + LOG.exception("Failed to update smartpm") + LOG.error("Command output: %s" % e.output) + return False + + # Reset the patch op counter to force a detailed query + self.patch_op_counter = 0 + + self.last_config_audit = time.time() + return True + + def timed_audit_smart_config(self): + rc = True + if (time.time() - self.last_config_audit) > 1800: + # It's been 30 minutes since the last completed audit + LOG.info("Kicking timed audit") + rc = self.audit_smart_config() + + return rc + + @staticmethod + def parse_smart_pkglist(output): + pkglist = {} + for line in output.splitlines(): + if line == '': + continue + + fields = line.split() + pkgname = fields[0] + (version, arch) = fields[1].split('@') + + if pkgname not in pkglist: + pkglist[pkgname] = {} + pkglist[pkgname][arch] = version + elif arch not in pkglist[pkgname]: + pkglist[pkgname][arch] = version + else: + stored_ver = pkglist[pkgname][arch] + + # The rpm.labelCompare takes version broken into 3 components + # It returns: + # 1, if first arg is higher version + # 0, if versions are same + # -1, if first arg is lower version + rc = rpm.labelCompare(stringToVersion(version), + stringToVersion(stored_ver)) + + if rc > 0: + # Update version + pkglist[pkgname][arch] = version + + return pkglist + + @staticmethod + def get_pkg_version(pkglist, pkg, arch): + if pkg not in pkglist: + return None + if arch not in pkglist[pkg]: + return None + return pkglist[pkg][arch] + + def parse_smart_newer(self, output): + # Skip the first two lines, which are headers + for line in output.splitlines()[2:]: + if line == '': + continue + + fields = line.split() + pkgname = fields[0] + installedver = fields[2] + newver = fields[5] + + self.installed[pkgname] = installedver + self.to_install[pkgname] = newver + + def parse_smart_orphans(self, output): + for pkgname in output.splitlines(): + if pkgname == '': + continue + + highest_version = None + + try: + query = subprocess.check_output(smart_query_repos + [ "--show-format", '$version\n', pkgname ]) + # The last non-blank version is the highest + for version in query.splitlines(): + if version == '': + continue + highest_version = version.split('@')[0] + + except subprocess.CalledProcessError: + # Package is not in the repo + highest_version = None + + if highest_version is None: + # Package is to be removed + self.to_remove.append(pkgname) + else: + # Rollback to the highest version + self.to_install[pkgname] = highest_version + + # Get the installed version + try: + query = subprocess.check_output(smart_query + [ "--installed", "--show-format", '$version\n', pkgname ]) + for version in query.splitlines(): + if version == '': + continue + self.installed[pkgname] = version.split('@')[0] + break + except subprocess.CalledProcessError: + LOG.error("Failed to query installed version of %s" % pkgname) + + self.changes = True + + def check_groups(self): + # Get the groups file + mygroup = "updates-%s" % "-".join(subfunctions) + self.missing_pkgs = [] + installed_pkgs = [] + + groups_url = "http://controller/updates/rel-%s/comps.xml" % SW_VERSION + try: + req = requests.get(groups_url) + if req.status_code != 200: + LOG.error("Failed to get groups list from server") + return False + except requests.ConnectionError: + LOG.error("Failed to connect to server") + return False + + # Get list of installed packages + try: + query = subprocess.check_output(["rpm", "-qa", "--queryformat", "%{NAME}\n"]) + installed_pkgs = query.split() + except subprocess.CalledProcessError: + LOG.exception("Failed to query RPMs") + return False + + root = ElementTree.fromstring(req.text) + for child in root: + group_id = child.find('id') + if group_id is None or group_id.text != mygroup: + continue + + pkglist = child.find('packagelist') + if pkglist is None: + continue + + for pkg in pkglist.findall('packagereq'): + if pkg.text not in installed_pkgs and pkg.text not in self.missing_pkgs: + self.missing_pkgs.append(pkg.text) + self.changes = True + + def query(self): + """ Check current patch state """ + if not check_install_uuid(): + LOG.info("Failed install_uuid check. Skipping query") + return False + + if not self.audit_smart_config(): + # Set a state to "unknown"? + return False + + try: + subprocess.check_output(smart_update, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + LOG.error("Failed to update smartpm") + LOG.error("Command output: %s" % e.output) + # Set a state to "unknown"? + return False + + # Generate a unique query id + self.query_id = random.random() + + self.changes = False + self.installed = {} + self.to_install = {} + self.to_remove = [] + self.missing_pkgs = [] + + # Get the repo data + pkgs_installed = {} + pkgs_base = {} + pkgs_updates = {} + + try: + output = subprocess.check_output(smart_query_installed) + pkgs_installed = self.parse_smart_pkglist(output) + except subprocess.CalledProcessError as e: + LOG.error("Failed to query installed pkgs: %s" % e.output) + # Set a state to "unknown"? + return False + + try: + output = subprocess.check_output(smart_query_base) + pkgs_base = self.parse_smart_pkglist(output) + except subprocess.CalledProcessError as e: + LOG.error("Failed to query base pkgs: %s" % e.output) + # Set a state to "unknown"? + return False + + try: + output = subprocess.check_output(smart_query_updates) + pkgs_updates = self.parse_smart_pkglist(output) + except subprocess.CalledProcessError as e: + LOG.error("Failed to query patched pkgs: %s" % e.output) + # Set a state to "unknown"? + return False + + # There are four possible actions: + # 1. If installed pkg is not in base or updates, remove it. + # 2. If installed pkg version is higher than highest in base + # or updates, downgrade it. + # 3. If installed pkg version is lower than highest in updates, + # upgrade it. + # 4. If pkg in grouplist is not in installed, install it. + + for pkg in pkgs_installed: + for arch in pkgs_installed[pkg]: + installed_version = pkgs_installed[pkg][arch] + updates_version = self.get_pkg_version(pkgs_updates, pkg, arch) + base_version = self.get_pkg_version(pkgs_base, pkg, arch) + + if updates_version is None and base_version is None: + # Remove it + self.to_remove.append(pkg) + self.changes = True + continue + + compare_version = updates_version + if compare_version is None: + compare_version = base_version + + # Compare the installed version to what's in the repo + rc = rpm.labelCompare(stringToVersion(installed_version), + stringToVersion(compare_version)) + if rc == 0: + # Versions match, nothing to do. + continue + else: + # Install the version from the repo + self.to_install[pkg] = "@".join([compare_version, arch]) + self.installed[pkg] = "@".join([installed_version, arch]) + self.changes = True + + # Look for new packages + self.check_groups() + + LOG.info("Patch state query returns %s" % self.changes) + LOG.info("Installed: %s" % self.installed) + LOG.info("To install: %s" % self.to_install) + LOG.info("To remove: %s" % self.to_remove) + LOG.info("Missing: %s" % self.missing_pkgs) + + return True + + def handle_install(self, verbose_to_stdout=False, disallow_insvc_patch=False): + # + # The disallow_insvc_patch parameter is set when we're installing + # the patch during init. At that time, we don't want to deal with + # in-service patch scripts, so instead we'll treat any patch as + # a reboot-required when this parameter is set. Rather than running + # any scripts, the RR flag will be set, which will result in the node + # being rebooted immediately upon completion of the installation. + # + + LOG.info("Handling install") + + # Check the INSTALL_UUID first. If it doesn't match the active + # controller, we don't want to install patches. + if not check_install_uuid(): + LOG.error("Failed install_uuid check. Skipping install") + + self.patch_failed = True + setflag(patch_failed_file) + self.state = constants.PATCH_AGENT_STATE_INSTALL_FAILED + + # Send a hello to provide a state update + if self.sock_out is not None: + hello_ack = PatchMessageHelloAgentAck() + hello_ack.send(self.sock_out) + + return False + + self.state = constants.PATCH_AGENT_STATE_INSTALLING + setflag(patch_installing_file) + + try: + # Create insvc patch directories + if os.path.exists(insvc_patch_scripts): + shutil.rmtree(insvc_patch_scripts, ignore_errors=True) + if os.path.exists(insvc_patch_flags): + shutil.rmtree(insvc_patch_flags, ignore_errors=True) + os.mkdir(insvc_patch_scripts, 0700) + os.mkdir(insvc_patch_flags, 0700) + except: + LOG.exception("Failed to create in-service patch directories") + + # Send a hello to provide a state update + if self.sock_out is not None: + hello_ack = PatchMessageHelloAgentAck() + hello_ack.send(self.sock_out) + + # Build up the install set + if verbose_to_stdout: + print "Checking for software updates..." + self.query() + install_set = [] + for pkg, version in self.to_install.iteritems(): + install_set.append("%s-%s" % (pkg, version)) + + install_set += self.missing_pkgs + + changed = False + rc = True + + if len(install_set) > 0: + try: + if verbose_to_stdout: + print "Installing software updates..." + LOG.info("Installing: %s" % ", ".join(install_set)) + output = subprocess.check_output(smart_install_cmd + install_set, stderr=subprocess.STDOUT) + changed = True + for line in output.split('\n'): + LOG.info("INSTALL: %s" % line) + if verbose_to_stdout: + print "Software updated." + except subprocess.CalledProcessError as e: + LOG.exception("Failed to install RPMs") + LOG.error("Command output: %s" % e.output) + rc = False + if verbose_to_stdout: + print "WARNING: Software update failed." + else: + if verbose_to_stdout: + print "Nothing to install." + LOG.info("Nothing to install") + + if rc: + self.query() + remove_set = self.to_remove + + if len(remove_set) > 0: + try: + if verbose_to_stdout: + print "Handling patch removal..." + LOG.info("Removing: %s" % ", ".join(remove_set)) + output = subprocess.check_output(smart_remove_cmd + remove_set, stderr=subprocess.STDOUT) + changed = True + for line in output.split('\n'): + LOG.info("REMOVE: %s" % line) + if verbose_to_stdout: + print "Patch removal complete." + except subprocess.CalledProcessError as e: + LOG.exception("Failed to remove RPMs") + LOG.error("Command output: %s" % e.output) + rc = False + if verbose_to_stdout: + print "WARNING: Patch removal failed." + else: + if verbose_to_stdout: + print "Nothing to remove." + LOG.info("Nothing to remove") + + if changed: + # Update the node_is_patched flag + setflag(node_is_patched_file) + + self.node_is_patched = True + if verbose_to_stdout: + print "This node has been patched." + + if os.path.exists(node_is_patched_rr_file): + LOG.info("Reboot is required. Skipping patch-scripts") + elif disallow_insvc_patch: + LOG.info("Disallowing patch-scripts. Treating as reboot-required") + setflag(node_is_patched_rr_file) + else: + LOG.info("Running in-service patch-scripts") + + try: + subprocess.check_output(run_insvc_patch_scripts_cmd, stderr=subprocess.STDOUT) + + # Clear the node_is_patched flag, since we've handled it in-service + clearflag(node_is_patched_file) + self.node_is_patched = False + except subprocess.CalledProcessError as e: + LOG.exception("In-Service patch scripts failed") + LOG.error("Command output: %s" % e.output) + # Fail the patching operation + rc = False + + # Clear the in-service patch dirs + if os.path.exists(insvc_patch_scripts): + shutil.rmtree(insvc_patch_scripts, ignore_errors=True) + if os.path.exists(insvc_patch_flags): + shutil.rmtree(insvc_patch_flags, ignore_errors=True) + + if rc: + self.patch_failed = False + clearflag(patch_failed_file) + self.state = constants.PATCH_AGENT_STATE_IDLE + else: + # Update the patch_failed flag + self.patch_failed = True + setflag(patch_failed_file) + self.state = constants.PATCH_AGENT_STATE_INSTALL_FAILED + + clearflag(patch_installing_file) + self.query() + + # Send a hello to provide a state update + if self.sock_out is not None: + hello_ack = PatchMessageHelloAgentAck() + hello_ack.send(self.sock_out) + + return rc + + def handle_patch_op_counter(self, counter): + changed = False + if os.path.exists(node_is_patched_file): + # The node has been patched. Run a query if: + # - node_is_patched didn't exist previously + # - node_is_patched timestamp changed + timestamp = os.path.getmtime(node_is_patched_file) + if not self.node_is_patched: + self.node_is_patched = True + self.node_is_patched_timestamp = timestamp + changed = True + elif self.node_is_patched_timestamp != timestamp: + self.node_is_patched_timestamp = timestamp + changed = True + elif self.node_is_patched: + self.node_is_patched = False + self.node_is_patched_timestamp = 0 + changed = True + + if self.patch_op_counter < counter: + self.patch_op_counter = counter + changed = True + + if changed: + rc = self.query() + if not rc: + # Query failed. Reset the op counter + self.patch_op_counter = 0 + + def run(self): + self.setup_socket() + + while self.sock_out is None: + # Check every thirty seconds? + # Once we've got a conf file, tied into packstack, + # we'll get restarted when the file is updated, + # and this should be unnecessary. + time.sleep(30) + self.setup_socket() + + self.setup_tcp_socket() + + # Ok, now we've got our socket. + # Let's let the controllers know we're here + hello_ack = PatchMessageHelloAgentAck() + hello_ack.send(self.sock_out) + + first_hello = True + + connections = [] + + timeout = time.time() + 30.0 + remaining = 30 + + while True: + inputs = [ self.sock_in, self.listener ] + connections + outputs = [ ] + + rlist, wlist, xlist = select.select(inputs, outputs, inputs, remaining) + + remaining = int(timeout - time.time()) + if remaining <= 0 or remaining > 30: + timeout = time.time() + 30.0 + remaining = 30 + + if (len(rlist) == 0 and + len(wlist) == 0 and + len(xlist) == 0): + # Timeout hit + self.audit_socket() + continue + + for s in rlist: + if s == self.listener: + conn, addr = s.accept() + connections.append(conn) + continue + + data = '' + addr = None + msg = None + + if s == self.sock_in: + # Receive from UDP + data, addr = s.recvfrom(1024) + else: + # Receive from TCP + while True: + try: + packet = s.recv(1024) + except socket.error: + LOG.exception("Socket error on recv") + data = '' + break + + if packet: + data += packet + + if data == '': + break + + try: + datachk = json.loads(data) + break + except ValueError: + # Message is incomplete + continue + else: + # End of TCP message received + break + + if data == '': + # Connection dropped + connections.remove(s) + s.close() + continue + + msgdata = json.loads(data) + + # For now, discard any messages that are not msgversion==1 + if 'msgversion' in msgdata and msgdata['msgversion'] != 1: + continue + + if 'msgtype' in msgdata: + if msgdata['msgtype'] == messages.PATCHMSG_HELLO_AGENT: + if first_hello: + self.query() + first_hello = False + + msg = PatchMessageHelloAgent() + elif msgdata['msgtype'] == messages.PATCHMSG_QUERY_DETAILED: + msg = PatchMessageQueryDetailed() + elif msgdata['msgtype'] == messages.PATCHMSG_AGENT_INSTALL_REQ: + msg = PatchMessageAgentInstallReq() + + if msg is None: + msg = messages.PatchMessage() + + msg.decode(msgdata) + if s == self.sock_in: + msg.handle(self.sock_out, addr) + else: + msg.handle(s, addr) + + for s in xlist: + if s in connections: + connections.remove(s) + s.close() + + # Check for in-service patch restart flag + if os.path.exists(insvc_patch_restart_agent): + # Make sure it's safe to restart, ie. no reqs queued + rlist, wlist, xlist = select.select(inputs, outputs, inputs, 0) + if (len(rlist) == 0 and + len(wlist) == 0 and + len(xlist) == 0): + # Restart + LOG.info("In-service patch restart flag detected. Exiting.") + os.remove(insvc_patch_restart_agent) + exit(0) + + +def main(): + global pa + + configure_logging() + + cfg.read_config() + + pa = PatchAgent() + pa.query() + + if len(sys.argv) <= 1: + pa.run() + elif sys.argv[1] == "--install": + pa.handle_install(verbose_to_stdout=True, disallow_insvc_patch=True) + elif sys.argv[1] == "--status": + rc = 0 + if pa.changes: + rc = 1 + exit(rc) diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/patch_client.py b/cgcs-patch/cgcs-patch/cgcs_patch/patch_client.py new file mode 100644 index 00000000..527b8d03 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/patch_client.py @@ -0,0 +1,1308 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import requests +import json +import os +import sys +import shutil +import re +import time +import signal + +import subprocess +import textwrap + +# noinspection PyUnresolvedReferences +from requests_toolbelt import MultipartEncoder + +import cgcs_patch.constants as constants +import cgcs_patch.utils as utils + +from tsconfig.tsconfig import SW_VERSION as RUNNING_SW_VERSION + +api_addr = "127.0.0.1:5487" +auth_token = None + +TERM_WIDTH = 72 +VIRTUAL_REGION = 'SystemController' +IPV6_FAMILY = 6 + + +help_upload = "Upload one or more patches to the patching system." +help_upload_dir = "Upload patches from one or more directories to the patching system." +help_apply = "Apply one or more patches. This adds the specified patches " + \ + "to the repository, making the update(s) available to the " + \ + "hosts in the system. Use --all to apply all available patches." +help_remove = "Remove one or more patches. This removes the specified " + \ + "patches from the repository." +help_delete = "Delete one or more patches from the patching system." +help_query = "Query system patches. Optionally, specify 'query applied' " + \ + "to query only those patches that are applied, or 'query available' " + \ + "to query those that are not." +help_show = "Show details for specified patches." +help_what_requires = "List patches that require the specified patches." +help_query_hosts = "Query patch states for hosts in the system." +help_host_install = "Trigger patch install/remove on specified host. " + \ + "To force install on unlocked node, use the --force option." +help_host_install_async = "Trigger patch install/remove on specified host. " + \ + "To force install on unlocked node, use the --force option." + \ + " Note: This command returns immediately upon dispatching installation request." +help_patch_args = "Patches are specified as a space-separated list of patch IDs." +help_install_local = "Trigger patch install/remove on the local host. " + \ + "This command can only be used for patch installation prior to initial " + \ + "configuration." +help_drop_host = "Drop specified host from table." +help_query_dependencies = "List dependencies for specified patch. Use " + \ + constants.CLI_OPT_RECURSIVE + " for recursive query." +help_commit = "Commit patches to free disk space. WARNING: This action " + \ + "is irreversible!" +help_region_name = "Send the request to a specified region" + + +def set_term_width(): + global TERM_WIDTH + + try: + with open(os.devnull, 'w') as NULL: + output = subprocess.check_output(["tput", "cols"], stderr=NULL) + width = int(output) + if width > 60: + TERM_WIDTH = width - 4 + except: + pass + + +def print_help(): + print "usage: sw-patch [--debug]" + print " ..." + print "" + print "Subcomands:" + print "" + print textwrap.fill(" {0:<15} ".format("upload:") + help_upload, + width=TERM_WIDTH, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("upload-dir:") + help_upload_dir, + width=TERM_WIDTH, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("apply:") + help_apply, + width=TERM_WIDTH, subsequent_indent=' '*20) + print textwrap.fill(help_patch_args, + width=TERM_WIDTH, initial_indent=' '*20, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("remove:") + help_remove, + width=TERM_WIDTH, subsequent_indent=' '*20) + print textwrap.fill(help_patch_args, + width=TERM_WIDTH, initial_indent=' '*20, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("delete:") + help_delete, + width=TERM_WIDTH, subsequent_indent=' '*20) + print textwrap.fill(help_patch_args, + width=TERM_WIDTH, initial_indent=' '*20, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("query:") + help_query, + width=TERM_WIDTH, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("show:") + help_show, + width=TERM_WIDTH, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("what-requires:") + help_what_requires, + width=TERM_WIDTH, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("query-hosts:") + help_query_hosts, + width=TERM_WIDTH, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("host-install:") + help_host_install, + width=TERM_WIDTH, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("host-install-async:") + help_host_install_async, + width=TERM_WIDTH, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("install-local:") + help_install_local, + width=TERM_WIDTH, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("drop-host:") + help_drop_host, + width=TERM_WIDTH, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("query-dependencies:") + help_query_dependencies, + width=TERM_WIDTH, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("commit:") + help_commit, + width=TERM_WIDTH, subsequent_indent=' '*20) + print "" + print textwrap.fill(" {0:<15} ".format("--os-region-name:") + help_region_name, + width=TERM_WIDTH, subsequent_indent=' '*20) + print "" + + exit(1) + + +def check_rc(req): + rc = 0 + if req.status_code == 200: + data = json.loads(req.text) + if 'error' in data and data["error"] != "": + rc = 1 + else: + rc = 1 + + return rc + + +def print_result_debug(req): + if req.status_code == 200: + data = json.loads(req.text) + if 'pd' in data: + print json.dumps(data['pd'], + sort_keys=True, + indent=4, + separators=(',', ': ')) + elif 'data' in data: + print json.dumps(data['data'], + sort_keys=True, + indent=4, + separators=(',', ': ')) + else: + print json.dumps(data, + sort_keys=True, + indent=4, + separators=(',', ': ')) + elif req.status_code == 500: + print "An internal error has occurred. Please check /var/log/patching.log for details" + else: + m = re.search("(Error message:.*)", req.text, re.MULTILINE) + print m.group(0) + + +def print_patch_op_result(req): + if req.status_code == 200: + data = json.loads(req.text) + + if 'pd' in data: + pd = data['pd'] + + # Calculate column widths + hdr_id = "Patch ID" + hdr_rr = "RR" + hdr_rel = "Release" + hdr_repo = "Repo State" + hdr_state = "Patch State" + + width_id = len(hdr_id) + width_rr = len(hdr_rr) + width_rel = len(hdr_rel) + width_repo = len(hdr_repo) + width_state = len(hdr_state) + + show_repo = False + + for patch_id in pd.keys(): + if len(patch_id) > width_id: + width_id = len(patch_id) + if len(pd[patch_id]["sw_version"]) > width_rel: + width_rel = len(pd[patch_id]["sw_version"]) + if len(pd[patch_id]["repostate"]) > width_repo: + width_repo = len(pd[patch_id]["repostate"]) + if len(pd[patch_id]["patchstate"]) > width_state: + width_state = len(pd[patch_id]["patchstate"]) + if pd[patch_id]["patchstate"] == "n/a": + show_repo = True + + if show_repo: + print "{0:^{width_id}} {1:^{width_rr}} {2:^{width_rel}} {3:^{width_repo}} {4:^{width_state}}".format( + hdr_id, hdr_rr, hdr_rel, hdr_repo, hdr_state, + width_id=width_id, width_rr=width_rr, + width_rel=width_rel, width_repo=width_repo, width_state=width_state) + + print "{0} {1} {2} {3} {4}".format( + '=' * width_id, '=' * width_rr, '=' * width_rel, '=' * width_repo, '=' * width_state) + + for patch_id in sorted(pd.keys()): + if "reboot_required" in pd[patch_id]: + rr = pd[patch_id]["reboot_required"] + else: + rr = "Y" + + print "{0:<{width_id}} {1:^{width_rr}} {2:^{width_rel}} {3:^{width_repo}} {4:^{width_state}}".format( + patch_id, + rr, + pd[patch_id]["sw_version"], + pd[patch_id]["repostate"], + pd[patch_id]["patchstate"], + width_id=width_id, width_rr=width_rr, + width_rel=width_rel, width_repo=width_repo, width_state=width_state) + else: + print "{0:^{width_id}} {1:^{width_rr}} {2:^{width_rel}} {3:^{width_state}}".format( + hdr_id, hdr_rr, hdr_rel, hdr_state, + width_id=width_id, width_rr=width_rr, width_rel=width_rel, width_state=width_state) + + print "{0} {1} {2} {3}".format( + '=' * width_id, '=' * width_rr, '=' * width_rel, '=' * width_state) + + for patch_id in sorted(pd.keys()): + if "reboot_required" in pd[patch_id]: + rr = pd[patch_id]["reboot_required"] + else: + rr = "Y" + + print "{0:<{width_id}} {1:^{width_rr}} {2:^{width_rel}} {3:^{width_state}}".format( + patch_id, + rr, + pd[patch_id]["sw_version"], + pd[patch_id]["patchstate"], + width_id=width_id, width_rr=width_rr, width_rel=width_rel, width_state=width_state) + + print "" + + if 'info' in data and data["info"] != "": + print data["info"] + + if 'warning' in data and data["warning"] != "": + print "Warning:" + print data["warning"] + + if 'error' in data and data["error"] != "": + print "Error:" + print data["error"] + + elif req.status_code == 500: + print "An internal error has occurred. Please check /var/log/patching.log for details" + + +def print_patch_show_result(req): + if req.status_code == 200: + data = json.loads(req.text) + + if 'metadata' in data: + pd = data['metadata'] + for patch_id in sorted(pd.keys()): + print "%s:" % patch_id + + if "sw_version" in pd[patch_id] and pd[patch_id]["sw_version"] != "": + print textwrap.fill(" {0:<15} ".format("Release:") + pd[patch_id]["sw_version"], + width=TERM_WIDTH, subsequent_indent=' '*20) + + if "patchstate" in pd[patch_id] and pd[patch_id]["patchstate"] != "": + print textwrap.fill(" {0:<15} ".format("Patch State:") + pd[patch_id]["patchstate"], + width=TERM_WIDTH, subsequent_indent=' '*20) + + if pd[patch_id]["patchstate"] == "n/a": + if "repostate" in pd[patch_id] and pd[patch_id]["repostate"] != "": + print textwrap.fill(" {0:<15} ".format("Repo State:") + pd[patch_id]["repostate"], + width=TERM_WIDTH, subsequent_indent=' '*20) + + if "status" in pd[patch_id] and pd[patch_id]["status"] != "": + print textwrap.fill(" {0:<15} ".format("Status:") + pd[patch_id]["status"], + width=TERM_WIDTH, subsequent_indent=' '*20) + + if "unremovable" in pd[patch_id] and pd[patch_id]["unremovable"] != "": + print textwrap.fill(" {0:<15} ".format("Unremovable:") + pd[patch_id]["unremovable"], + width=TERM_WIDTH, subsequent_indent=' '*20) + + if "reboot_required" in pd[patch_id] and pd[patch_id]["reboot_required"] != "": + print textwrap.fill(" {0:<15} ".format("RR:") + pd[patch_id]["reboot_required"], + width=TERM_WIDTH, subsequent_indent=' '*20) + + if "summary" in pd[patch_id] and pd[patch_id]["summary"] != "": + print textwrap.fill(" {0:<15} ".format("Summary:") + pd[patch_id]["summary"], + width=TERM_WIDTH, subsequent_indent=' '*20) + + if "description" in pd[patch_id] and pd[patch_id]["description"] != "": + first_line = True + for line in pd[patch_id]["description"].split('\n'): + if first_line: + print textwrap.fill(" {0:<15} ".format("Description:") + line, + width=TERM_WIDTH, subsequent_indent=' '*20) + first_line = False + else: + print textwrap.fill(line, + width=TERM_WIDTH, subsequent_indent=' '*20, + initial_indent=' '*20) + + if "install_instructions" in pd[patch_id] and pd[patch_id]["install_instructions"] != "": + print " Install Instructions:" + for line in pd[patch_id]["install_instructions"].split('\n'): + print textwrap.fill(line, + width=TERM_WIDTH, subsequent_indent=' '*20, + initial_indent=' '*20) + + if "warnings" in pd[patch_id] and pd[patch_id]["warnings"] != "": + first_line = True + for line in pd[patch_id]["warnings"].split('\n'): + if first_line: + print textwrap.fill(" {0:<15} ".format("Warnings:") + line, + width=TERM_WIDTH, subsequent_indent=' '*20) + first_line = False + else: + print textwrap.fill(line, + width=TERM_WIDTH, subsequent_indent=' '*20, + initial_indent=' '*20) + + if "requires" in pd[patch_id] and len(pd[patch_id]["requires"]) > 0: + print " Requires:" + for req_patch in sorted(pd[patch_id]["requires"]): + print ' '*20 + req_patch + + if "contents" in data and patch_id in data["contents"]: + print " Contents:" + for pkg in sorted(data["contents"][patch_id]): + print ' '*20 + pkg + + print "\n" + + if 'info' in data and data["info"] != "": + print data["info"] + + if 'warning' in data and data["warning"] != "": + print "Warning:" + print data["warning"] + + if 'error' in data and data["error"] != "": + print "Error:" + print data["error"] + + elif req.status_code == 500: + print "An internal error has occurred. Please check /var/log/patching.log for details" + + +def patch_upload_req(debug, args): + rc = 0 + + if len(args) == 0: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + for patchfile in sorted(list(set(args))): + if os.path.isdir(patchfile): + print "Error: %s is a directory. Please use upload-dir" % patchfile + continue + + if not os.path.isfile(patchfile): + print "Error: File does not exist: %s" % patchfile + continue + + enc = MultipartEncoder(fields={'file': (patchfile, + open(patchfile, 'rb'), + )}) + url = "http://%s/patch/upload" % api_addr + headers = {'Content-Type': enc.content_type} + append_auth_token_if_required(headers) + req = requests.post(url, + data=enc, + headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + if check_rc(req) != 0: + rc = 1 + + return rc + + +def patch_apply_req(debug, args): + if len(args) == 0: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + patches = "/".join(args) + + url = "http://%s/patch/apply/%s" % (api_addr, patches) + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_remove_req(debug, args): + extra_opts = "" + + if len(args) == 0: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + # The removeunremovable option is hidden and should not be added to help + # text or customer documentation. It is for emergency use only - under + # supervision of the design team. + if "--removeunremovable" in args: + idx = args.index("--removeunremovable") + + # Get rid of the --removeunremovable + args.pop(idx) + + # Format the extra opts + extra_opts = "?removeunremovable=yes" + + patches = "/".join(args) + + url = "http://%s/patch/remove/%s%s" % (api_addr, patches, extra_opts) + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_delete_req(debug, args): + if len(args) == 0: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + patches = "/".join(args) + + url = "http://%s/patch/delete/%s" % (api_addr, patches) + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_commit_req(debug, args): + if len(args) == 0: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + dry_run = False + if constants.CLI_OPT_DRY_RUN in args: + dry_run = True + args.remove(constants.CLI_OPT_DRY_RUN) + + all = False + if constants.CLI_OPT_ALL in args: + all = True + args.remove(constants.CLI_OPT_ALL) + + # Default to running release + relopt = RUNNING_SW_VERSION + + release = False + if constants.CLI_OPT_RELEASE in args: + release = True + idx = args.index(constants.CLI_OPT_RELEASE) + # There must be at least one more arg + if len(args) < (idx + 1): + print_help() + + # Get rid of the --release + args.pop(idx) + # Pop off the release arg + relopt = args.pop(idx) + + headers = {} + append_auth_token_if_required(headers) + if release and not all: + # Disallow + print "Use of --release option requires --all" + return 1 + elif all: + # Get a list of all patches + extra_opts = "&release=%s" % relopt + url = "http://%s/patch/query?show=all%s" % (api_addr, extra_opts) + + req = requests.get(url, headers=headers) + + patch_list = [] + if req.status_code == 200: + data = json.loads(req.text) + + if 'pd' in data: + patch_list = sorted(data['pd'].keys()) + elif req.status_code == 500: + print "Failed to get patch list. Aborting..." + return 1 + + if len(patch_list) == 0: + print "There are no %s patches to commit." % relopt + return 0 + + print "The following patches will be committed:" + for patch_id in patch_list: + print " %s" % patch_id + print + + patches = "/".join(patch_list) + else: + patches = "/".join(args) + + # First, get a list of dependencies and ask for confirmation + url = "http://%s/patch/query_dependencies/%s?recursive=yes" % (api_addr, patches) + + req = requests.get(url, headers=headers) + + if req.status_code == 200: + data = json.loads(req.text) + + if 'patches' in data: + print "The following patches will be committed:" + for patch_id in sorted(data['patches']): + print " %s" % patch_id + print + else: + print "No patches found to commit" + return 1 + + elif req.status_code == 500: + print "An internal error has occurred. Please check /var/log/patching.log for details" + return 1 + + # Run dry-run + url = "http://%s/patch/commit_dry_run/%s" % (api_addr, patches) + + req = requests.post(url, headers=headers) + print_patch_op_result(req) + + if check_rc(req) != 0: + print "Aborting..." + return 1 + + if dry_run: + return 0 + + print + commit_warning = "WARNING: Committing a patch is an irreversible operation. " + \ + "Committed patches cannot be removed." + print textwrap.fill(commit_warning, width=TERM_WIDTH, subsequent_indent=' '*9) + print + + user_input = raw_input("Would you like to continue? [y/N]: ") + if user_input.lower() != 'y': + print "Aborting..." + return 1 + + url = "http://%s/patch/commit/%s" % (api_addr, patches) + req = requests.post(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_query_req(debug, args): + state = "all" + extra_opts = "" + + if "--release" in args: + idx = args.index("--release") + # There must be at least one more arg + if len(args) < (idx + 1): + print_help() + + # Get rid of the --release + args.pop(idx) + # Pop off the release arg + relopt = args.pop(idx) + + # Format the query string + extra_opts = "&release=%s" % relopt + + if len(args) > 1: + # Support 1 additional arg at most, currently + print_help() + + if len(args) > 0: + state = args[0] + + url = "http://%s/patch/query?show=%s%s" % (api_addr, state, extra_opts) + + headers = {} + append_auth_token_if_required(headers) + req = requests.get(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def print_query_hosts_result(req): + if req.status_code == 200: + data = json.loads(req.text) + if 'data' not in data: + print "Invalid data returned:" + print_result_debug(req) + return + + agents = data['data'] + + # Calculate column widths + hdr_hn = "Hostname" + hdr_ip = "IP Address" + hdr_pc = "Patch Current" + hdr_rr = "Reboot Required" + hdr_rel = "Release" + hdr_state = "State" + + width_hn = len(hdr_hn) + width_ip = len(hdr_ip) + width_pc = len(hdr_pc) + width_rr = len(hdr_rr) + width_rel = len(hdr_rel) + width_state = len(hdr_state) + + for agent in sorted(agents, key=lambda a: a["hostname"]): + if len(agent["hostname"]) > width_hn: + width_hn = len(agent["hostname"]) + if len(agent["ip"]) > width_ip: + width_ip = len(agent["ip"]) + if len(agent["sw_version"]) > width_rel: + width_rel = len(agent["sw_version"]) + if len(agent["state"]) > width_state: + width_state = len(agent["state"]) + + print "{0:^{width_hn}} {1:^{width_ip}} {2:^{width_pc}} {3:^{width_rr}} {4:^{width_rel}} {5:^{width_state}}".format( + hdr_hn, hdr_ip, hdr_pc, hdr_rr, hdr_rel, hdr_state, + width_hn=width_hn, width_ip=width_ip, width_pc=width_pc, width_rr=width_rr, width_rel=width_rel, width_state=width_state) + + print "{0} {1} {2} {3} {4} {5}".format( + '=' * width_hn, '=' * width_ip, '=' * width_pc, '=' * width_rr, '=' * width_rel, '=' * width_state) + + for agent in sorted(agents, key=lambda a: a["hostname"]): + patch_current_field = "Yes" if agent["patch_current"] else "No" + if agent.get("interim_state") == True: + patch_current_field = "Pending" + + if agent["patch_failed"]: + patch_current_field = "Failed" + + print "{0:<{width_hn}} {1:<{width_ip}} {2:^{width_pc}} {3:^{width_rr}} {4:^{width_rel}} {5:^{width_state}}".format( + agent["hostname"], + agent["ip"], + patch_current_field, + "Yes" if agent["requires_reboot"] else "No", + agent["sw_version"], + agent["state"], + width_hn=width_hn, width_ip=width_ip, width_pc=width_pc, width_rr=width_rr, width_rel=width_rel, width_state=width_state) + + elif req.status_code == 500: + print "An internal error has occurred. Please check /var/log/patching.log for details" + + +def patch_query_hosts_req(debug, args): + if len(args) > 0: + # Support 0 arg at most, currently + print_help() + + url = "http://%s/patch/query_hosts" % api_addr + + req = requests.get(url) + + if debug: + print_result_debug(req) + else: + print_query_hosts_result(req) + + return check_rc(req) + + +def patch_show_req(debug, args): + if len(args) == 0: + print_help() + + patches = "/".join(args) + + url = "http://%s/patch/show/%s" % (api_addr, patches) + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_show_result(req) + + return check_rc(req) + + +def what_requires(debug, args): + if len(args) == 0: + print_help() + + patches = "/".join(args) + + url = "http://%s/patch/what_requires/%s" % (api_addr, patches) + + headers = {} + append_auth_token_if_required(headers) + req = requests.get(url, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def query_dependencies(debug, args): + if len(args) == 0: + print_help() + + extra_opts = "" + if constants.CLI_OPT_RECURSIVE in args: + args.remove(constants.CLI_OPT_RECURSIVE) + extra_opts = "?recursive=yes" + + patches = "/".join(args) + + url = "http://%s/patch/query_dependencies/%s%s" % (api_addr, patches, extra_opts) + + headers = {} + append_auth_token_if_required(headers) + req = requests.get(url, headers=headers) + + if debug: + print_result_debug(req) + else: + if req.status_code == 200: + data = json.loads(req.text) + + if 'patches' in data: + for patch_id in sorted(data['patches']): + print patch_id + + elif req.status_code == 500: + print "An internal error has occurred. Please check /var/log/patching.log for details" + + return check_rc(req) + + +def wait_for_install_complete(agent_ip): + url = "http://%s/patch/query_hosts" % api_addr + rc = 0 + + max_retries = 4 + retriable_count = 0 + + while True: + # Sleep on the first pass as well, to allow time for the + # agent to respond + time.sleep(5) + + try: + req = requests.get(url) + except requests.exceptions.ConnectionError: + # The local patch-controller may have restarted. + retriable_count += 1 + if retriable_count <= max_retries: + continue + else: + print "Lost communications with the patch controller" + rc = 1 + break + + if req.status_code == 200: + data = json.loads(req.text) + if 'data' not in data: + print "Invalid query-hosts data returned:" + print_result_debug(req) + rc = 1 + break + + state = None + agents = data['data'] + interim_state = None + for agent in agents: + if agent['hostname'] == agent_ip \ + or agent['ip'] == agent_ip: + state = agent.get('state') + interim_state = agent.get('interim_state') + + if state is None: + # If the patching daemons have restarted, there's a + # window after the patch-controller restart that the + # hosts table will be empty. + retriable_count += 1 + if retriable_count <= max_retries: + continue + else: + print "%s agent has timed out." % agent_ip + rc = 1 + break + + if state == constants.PATCH_AGENT_STATE_INSTALLING or \ + interim_state == True: + # Still installing + sys.stdout.write(".") + sys.stdout.flush() + elif state == constants.PATCH_AGENT_STATE_INSTALL_REJECTED: + print "\nInstallation rejected. Node must be locked" + rc = 1 + break + elif state == constants.PATCH_AGENT_STATE_INSTALL_FAILED: + print "\nInstallation failed. Please check logs for details." + rc = 1 + break + elif state == constants.PATCH_AGENT_STATE_IDLE: + print "\nInstallation was successful." + rc = 0 + break + else: + print "\nPatch agent is reporting unknown state: %s" % state + rc = 1 + break + + elif req.status_code == 500: + print "An internal error has occurred. Please check /var/log/patching.log for details" + rc = 1 + break + else: + m = re.search("(Error message:.*)", req.text, re.MULTILINE) + print m.group(0) + rc = 1 + break + + return rc + + +def host_install(debug, args): + force = False + rc = 0 + + if "--force" in args: + force = True + args.remove("--force") + + if len(args) != 1: + print_help() + + agent_ip = args[0] + + # Issue host_install_async request and poll for results + url = "http://%s/patch/host_install_async/%s" % (api_addr, agent_ip) + + if force: + url += "/force" + + req = requests.post(url) + + if req.status_code == 200: + data = json.loads(req.text) + if 'error' in data and data["error"] != "": + print "Error:" + print data["error"] + rc = 1 + else: + rc = wait_for_install_complete(agent_ip) + elif req.status_code == 500: + print "An internal error has occurred. Please check /var/log/patching.log for details" + rc = 1 + else: + m = re.search("(Error message:.*)", req.text, re.MULTILINE) + print m.group(0) + rc = 1 + + return rc + + +def host_install_async(debug, args): + force = False + + if "--force" in args: + force = True + args.remove("--force") + + if len(args) != 1: + print_help() + + agent_ip = args[0] + + url = "http://%s/patch/host_install_async/%s" % (api_addr, agent_ip) + + if force: + url += "/force" + + req = requests.post(url) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def drop_host(debug, args): + force = False + + if len(args) != 1: + print_help() + + host_ip = args[0] + + url = "http://%s/patch/drop_host/%s" % (api_addr, host_ip) + + req = requests.post(url) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_upload_dir_req(debug, args): + if len(args) == 0: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + dirlist = {} + i = 0 + for d in sorted(list(set(args))): + dirlist["dir%d" % i] = os.path.abspath(d) + i += 1 + + url = "http://%s/patch/upload_dir" % api_addr + + headers = {} + append_auth_token_if_required(headers) + req = requests.post(url, params=dirlist, headers=headers) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_install_local(debug, args): + """ This function is used to trigger patch installation prior to configuration """ + # First, check to see if the controller hostname is already known. + if utils.gethostbyname(constants.CONTROLLER_FLOATING_HOSTNAME): + # If this is successful, disallow the install + print >>sys.stderr, "Error: This function can only be used before initial system configuration." + return 1 + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + # To allow patch installation to occur before configuration, we need + # to alias controller to localhost so that the smartpm channels work. + # There is a HOSTALIASES feature that would be preferred here, but it + # unfortunately requires dnsmasq to be running, which it is not at this point. + + rc = 0 + + # Make a backup of /etc/hosts + shutil.copy2('/etc/hosts', '/etc/hosts.patchbak') + + # Update /etc/hosts + with open('/etc/hosts', 'a') as f: + f.write("127.0.0.1 controller\n") + + # Run the patch install + try: + # Use the restart option of the sw-patch init script, which will + # install patches but won't automatically reboot if the RR flag is set + subprocess.check_output(['/etc/init.d/sw-patch', 'restart']) + except subprocess.CalledProcessError: + print >>sys.stderr, "Error: Failed to install patches. Please check /var/log/patching.log for details" + rc = 1 + + # Restore /etc/hosts + os.rename('/etc/hosts.patchbak', '/etc/hosts') + + if rc == 0: + print "Patch installation is complete." + print "Please reboot before continuing with configuration." + + return rc + + +def patch_init_release(debug, args): + if len(args) != 1: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + release = args[0] + + url = "http://%s/patch/init_release/%s" % (api_addr, release) + + req = requests.post(url) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def patch_del_release(debug, args): + if len(args) != 1: + print_help() + + # Ignore interrupts during this function + signal.signal(signal.SIGINT, signal.SIG_IGN) + + release = args[0] + + url = "http://%s/patch/del_release/%s" % (api_addr, release) + + req = requests.post(url) + + if debug: + print_result_debug(req) + else: + print_patch_op_result(req) + + return check_rc(req) + + +def completion_opts(args): + if len(args) != 1: + return 1 + + if args[0] == "patches": + url = "http://%s/patch/query" % api_addr + req = requests.get(url) + # Just list patch IDs + if req.status_code == 200: + data = json.loads(req.text) + + if 'pd' in data: + print " ".join(data['pd'].keys()) + return 0 + + elif args[0] == "hosts": + url = "http://%s/patch/query_hosts" % api_addr + req = requests.get(url) + + # Just list hostnames + if req.status_code == 200: + data = json.loads(req.text) + + if 'data' in data: + for agent in data['data']: + print agent["hostname"] + return 0 + + return 1 + + +def check_env(env, var): + if env not in os.environ: + print "You must provide a %s via env[%s]" % (var, env) + exit(-1) + + +def get_auth_token_and_endpoint(region_name): + from keystoneauth1 import identity + from keystoneauth1 import session + from keystoneauth1 import exceptions + + user_env_map = {'OS_USERNAME': 'username', + 'OS_PASSWORD': 'password', + 'OS_PROJECT_NAME': 'project_name', + 'OS_AUTH_URL': 'auth_url', + 'OS_USER_DOMAIN_NAME': 'user_domain_name', + 'OS_PROJECT_DOMAIN_NAME': 'project_domain_name'} + + for k, v in user_env_map.items(): + check_env(k, v) + + user = dict() + for k, v in user_env_map.items(): + user[v] = os.environ.get(k) + + auth = identity.V3Password(**user) + sess = session.Session(auth=auth) + try: + token = auth.get_token(sess) + endpoint = auth.get_endpoint(sess, service_type='patching', + interface='internal', + region_name=region_name) + except (exceptions.http.Unauthorized, exceptions.EndpointNotFound) as e: + print e.message + exit(-1) + + return token, endpoint + + +def append_auth_token_if_required(headers): + global auth_token + if auth_token is not None: + headers['X-Auth-Token'] = auth_token + + +def format_url_address(address): + import netaddr + try: + ip_addr = netaddr.IPAddress(address) + if ip_addr.version == IPV6_FAMILY: + return "[%s]" % address + else: + return address + except netaddr.AddrFormatError: + return address + + +def check_for_os_region_name(): + region_option = "--os-region-name" + if region_option not in sys.argv: + return False + + for c, value in enumerate(sys.argv, 1): + if value == region_option: + if c == len(sys.argv): + print "Please specify a region name" + print_help() + + region = sys.argv[c] + global VIRTUAL_REGION + if region != VIRTUAL_REGION: + print "Unsupported region name: %s" % region + exit(1) + + # check it is running on the active controller + # not able to use sm-query due to it requires sudo + try: + subprocess.check_output("pgrep -f dcorch-api-proxy", shell=True) + except subprocess.CalledProcessError: + print ("Command must be run from the active controller.") + exit(1) + + # get a token and fetch the internal endpoint in SystemController + global auth_token + auth_token, endpoint = get_auth_token_and_endpoint(region) + if endpoint is not None: + global api_addr + from urlparse import urlparse + url = urlparse(endpoint) + address = format_url_address(url.hostname) + api_addr = '{}:{}'.format(address, url.port) + + sys.argv.remove("--os-region-name") + sys.argv.remove(region) + return True + + +def main(): + set_term_width() + + if len(sys.argv) <= 1: + print_help() + + debug = False + if "--debug" in sys.argv: + debug = True + sys.argv.remove("--debug") + + dc_request = check_for_os_region_name() + + rc = 0 + + action = sys.argv[1] + + # Reject the commands that are not supported in the virtual region + if (dc_request and action in ["query-hosts", "host-install", + "host-install-async", + "install-local", "drop-host"]): + global VIRTUAL_REGION + print "\n%s command is not allowed in %s region" % (action, + VIRTUAL_REGION) + exit(1) + + if auth_token is None and os.geteuid() != 0: + # Restrict non-root/sudo users to these commands + if action == "query": + rc = patch_query_req(debug, sys.argv[2:]) + elif action == "query-hosts": + rc = patch_query_hosts_req(debug, sys.argv[2:]) + elif action == "what-requires": + rc = what_requires(debug, sys.argv[2:]) + elif action == "completion": + rc = completion_opts(sys.argv[2:]) + elif action == "--help" or action == "-h": + print_help() + else: + print >>sys.stderr, "Error: Command must be run as sudo or root" + rc = 1 + else: + if action == "upload": + rc = patch_upload_req(debug, sys.argv[2:]) + elif action == "apply": + rc = patch_apply_req(debug, sys.argv[2:]) + elif action == "remove": + rc = patch_remove_req(debug, sys.argv[2:]) + elif action == "delete": + rc = patch_delete_req(debug, sys.argv[2:]) + elif action == "commit": + rc = patch_commit_req(debug, sys.argv[2:]) + elif action == "query": + rc = patch_query_req(debug, sys.argv[2:]) + elif action == "query-hosts": + rc = patch_query_hosts_req(debug, sys.argv[2:]) + elif action == "show": + rc = patch_show_req(debug, sys.argv[2:]) + elif action == "what-requires": + what_requires(debug, sys.argv[2:]) + elif action == "query-dependencies": + query_dependencies(debug, sys.argv[2:]) + elif action == "host-install": + rc = host_install(debug, sys.argv[2:]) + elif action == "host-install-async": + rc = host_install_async(debug, sys.argv[2:]) + elif action == "drop-host": + rc = drop_host(debug, sys.argv[2:]) + elif action == "upload-dir": + rc = patch_upload_dir_req(debug, sys.argv[2:]) + elif action == "install-local": + rc = patch_install_local(debug, sys.argv[2:]) + elif action == "init-release": + rc = patch_init_release(debug, sys.argv[2:]) + elif action == "del-release": + rc = patch_del_release(debug, sys.argv[2:]) + elif action == "completion": + rc = completion_opts(sys.argv[2:]) + else: + print_help() + + exit(rc) diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/patch_controller.py b/cgcs-patch/cgcs-patch/cgcs_patch/patch_controller.py new file mode 100644 index 00000000..66775c02 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/patch_controller.py @@ -0,0 +1,2468 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" +import shutil +import threading +import time +import socket +import json +import select +import subprocess +import ConfigParser +import rpm +import os + +from rpmUtils.miscutils import stringToVersion + +from wsgiref import simple_server +from cgcs_patch.api import app +from cgcs_patch.authapi import app as auth_app +from cgcs_patch.patch_functions import \ + configure_logging, BasePackageData, \ + avail_dir, applied_dir, committed_dir, \ + PatchFile, parse_rpm_filename, \ + package_dir, repo_dir, SW_VERSION, root_package_dir +from cgcs_patch.exceptions import MetadataFail, RpmFail, PatchFail, PatchValidationFailure, PatchMismatchFailure +from cgcs_patch.patch_functions import LOG +from cgcs_patch.patch_functions import audit_log_info +from cgcs_patch.patch_functions import patch_dir, repo_root_dir +from cgcs_patch.patch_functions import PatchData +from cgcs_patch.base import PatchService + +import cgcs_patch.config as cfg +import cgcs_patch.utils as utils +# noinspection PyUnresolvedReferences +from oslo_config import cfg as oslo_cfg + +import cgcs_patch.messages as messages +import cgcs_patch.constants as constants + +CONF = oslo_cfg.CONF + +pidfile_path = "/var/run/patch_controller.pid" + +pc = None +state_file = "/opt/patching/.controller.state" + +insvc_patch_restart_controller = "/run/patching/.restart.patch-controller" + +stale_hosts = [] +pending_queries = [] + +thread_death = None +keep_running = True + +# Limit socket blocking to 5 seconds to allow for thread to shutdown +api_socket_timeout = 5.0 + + +class ControllerNeighbour(object): + def __init__(self): + self.last_ack = 0 + self.synced = False + + def rx_ack(self): + self.last_ack = time.time() + + def get_age(self): + return int(time.time() - self.last_ack) + + def rx_synced(self): + self.synced = True + + def clear_synced(self): + self.synced = False + + def get_synced(self): + return self.synced + + +class AgentNeighbour(object): + def __init__(self, ip): + self.ip = ip + self.last_ack = 0 + self.last_query_id = 0 + self.out_of_date = False + self.hostname = "n/a" + self.requires_reboot = False + self.patch_failed = False + self.stale = False + self.pending_query = False + self.installed = {} + self.to_remove = [] + self.missing_pkgs = [] + self.nodetype = None + self.sw_version = "unknown" + self.subfunctions = [] + self.state = None + + def rx_ack(self, + hostname, + out_of_date, + requires_reboot, + query_id, + patch_failed, + sw_version, + state): + self.last_ack = time.time() + self.hostname = hostname + self.patch_failed = patch_failed + self.sw_version = sw_version + self.state = state + + if out_of_date != self.out_of_date or requires_reboot != self.requires_reboot: + self.out_of_date = out_of_date + self.requires_reboot = requires_reboot + LOG.info("Agent %s (%s) reporting out_of_date=%s, requires_reboot=%s" % ( + self.hostname, + self.ip, + self.out_of_date, + self.requires_reboot)) + + if self.last_query_id != query_id: + self.last_query_id = query_id + self.stale = True + if self.ip not in stale_hosts and self.ip not in pending_queries: + stale_hosts.append(self.ip) + + def get_age(self): + return int(time.time() - self.last_ack) + + def handle_query_detailed_resp(self, + installed, + to_remove, + missing_pkgs, + nodetype, + sw_version, + subfunctions, + state): + self.installed = installed + self.to_remove = to_remove + self.missing_pkgs = missing_pkgs + self.nodetype = nodetype + self.stale = False + self.pending_query = False + self.sw_version = sw_version + self.subfunctions = subfunctions + self.state = state + + if self.ip in pending_queries: + pending_queries.remove(self.ip) + + if self.ip in stale_hosts: + stale_hosts.remove(self.ip) + + def get_dict(self): + d = {"ip": self.ip, + "hostname": self.hostname, + "patch_current": not self.out_of_date, + "secs_since_ack": self.get_age(), + "patch_failed": self.patch_failed, + "stale_details": self.stale, + "installed": self.installed, + "to_remove": self.to_remove, + "missing_pkgs": self.missing_pkgs, + "nodetype": self.nodetype, + "subfunctions": self.subfunctions, + "sw_version": self.sw_version, + "state": self.state} + + global pc + if self.out_of_date and not pc.allow_insvc_patching: + d["requires_reboot"] = True + else: + d["requires_reboot"] = self.requires_reboot + + # Included for future enhancement, to allow per-node determination + # of in-service patching + d["allow_insvc_patching"] = pc.allow_insvc_patching + + return d + + +class PatchMessageHello(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO) + self.patch_op_counter = 0 + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'patch_op_counter' in data: + self.patch_op_counter = data['patch_op_counter'] + + def encode(self): + global pc + messages.PatchMessage.encode(self) + self.message['patch_op_counter'] = pc.patch_op_counter + + def handle(self, sock, addr): + global pc + host = addr[0] + if host == cfg.get_mgmt_ip(): + # Ignore messages from self + return + + # Send response + if self.patch_op_counter > 0: + pc.handle_nbr_patch_op_counter(host, self.patch_op_counter) + + resp = PatchMessageHelloAck() + resp.send(sock) + + def send(self, sock): + self.encode() + message = json.dumps(self.message) + sock.sendto(message, (cfg.controller_mcast_group, cfg.controller_port)) + + +class PatchMessageHelloAck(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO_ACK) + + def encode(self): + # Nothing to add, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + global pc + + pc.controller_neighbours_lock.acquire() + if not addr[0] in pc.controller_neighbours: + pc.controller_neighbours[addr[0]] = ControllerNeighbour() + + pc.controller_neighbours[addr[0]].rx_ack() + pc.controller_neighbours_lock.release() + + def send(self, sock): + self.encode() + message = json.dumps(self.message) + sock.sendto(message, (cfg.controller_mcast_group, cfg.controller_port)) + + +class PatchMessageSyncReq(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_SYNC_REQ) + + def encode(self): + # Nothing to add to the SYNC_REQ, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + global pc + host = addr[0] + if host == cfg.get_mgmt_ip(): + # Ignore messages from self + return + + # We may need to do this in a separate thread, so that we continue to process hellos + LOG.info("Handling sync req") + + pc.sync_from_nbr(host) + + resp = PatchMessageSyncComplete() + resp.send(sock) + + def send(self, sock): + LOG.info("sending sync req") + self.encode() + message = json.dumps(self.message) + sock.sendto(message, (cfg.controller_mcast_group, cfg.controller_port)) + + +class PatchMessageSyncComplete(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_SYNC_COMPLETE) + + def encode(self): + # Nothing to add to the SYNC_COMPLETE, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + global pc + LOG.info("Handling sync complete") + + pc.controller_neighbours_lock.acquire() + if not addr[0] in pc.controller_neighbours: + pc.controller_neighbours[addr[0]] = ControllerNeighbour() + + pc.controller_neighbours[addr[0]].rx_synced() + pc.controller_neighbours_lock.release() + + def send(self, sock): + LOG.info("sending sync complete") + self.encode() + message = json.dumps(self.message) + sock.sendto(message, (cfg.controller_mcast_group, cfg.controller_port)) + + +class PatchMessageHelloAgent(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO_AGENT) + + def encode(self): + global pc + messages.PatchMessage.encode(self) + self.message['patch_op_counter'] = pc.patch_op_counter + + def handle(self, sock, addr): + LOG.error("Should not get here") + + def send(self, sock): + self.encode() + message = json.dumps(self.message) + local_hostname = utils.ip_to_versioned_localhost(cfg.agent_mcast_group) + sock.sendto(message, (cfg.agent_mcast_group, cfg.agent_port)) + sock.sendto(message, (local_hostname, cfg.agent_port)) + + +class PatchMessageHelloAgentAck(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_HELLO_AGENT_ACK) + self.query_id = 0 + self.agent_out_of_date = False + self.agent_hostname = "n/a" + self.agent_requires_reboot = False + self.agent_patch_failed = False + self.agent_sw_version = "unknown" + self.agent_state = "unknown" + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'query_id' in data: + self.query_id = data['query_id'] + if 'out_of_date' in data: + self.agent_out_of_date = data['out_of_date'] + if 'hostname' in data: + self.agent_hostname = data['hostname'] + if 'requires_reboot' in data: + self.agent_requires_reboot = data['requires_reboot'] + if 'patch_failed' in data: + self.agent_patch_failed = data['patch_failed'] + if 'sw_version' in data: + self.agent_sw_version = data['sw_version'] + if 'state' in data: + self.agent_state = data['state'] + + def encode(self): + # Nothing to add, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + global pc + + pc.hosts_lock.acquire() + if not addr[0] in pc.hosts: + pc.hosts[addr[0]] = AgentNeighbour(addr[0]) + + pc.hosts[addr[0]].rx_ack(self.agent_hostname, + self.agent_out_of_date, + self.agent_requires_reboot, + self.query_id, + self.agent_patch_failed, + self.agent_sw_version, + self.agent_state) + pc.hosts_lock.release() + + def send(self, sock): + LOG.error("Should not get here") + + +class PatchMessageQueryDetailed(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_QUERY_DETAILED) + + def encode(self): + # Nothing to add to the message, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + LOG.error("Should not get here") + + def send(self, sock): + self.encode() + message = json.dumps(self.message) + sock.sendall(message) + + +class PatchMessageQueryDetailedResp(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_QUERY_DETAILED_RESP) + self.agent_sw_version = "unknown" + self.installed = {} + self.to_install = {} + self.to_remove = [] + self.missing_pkgs = [] + self.subfunctions = [] + self.nodetype = "unknown" + self.agent_sw_version = "unknown" + self.agent_state = "unknown" + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'installed' in data: + self.installed = data['installed'] + if 'to_remove' in data: + self.to_remove = data['to_remove'] + if 'missing_pkgs' in data: + self.missing_pkgs = data['missing_pkgs'] + if 'nodetype' in data: + self.nodetype = data['nodetype'] + if 'sw_version' in data: + self.agent_sw_version = data['sw_version'] + if 'subfunctions' in data: + self.subfunctions = data['subfunctions'] + if 'state' in data: + self.agent_state = data['state'] + + def encode(self): + LOG.error("Should not get here") + + def handle(self, sock, addr): + global pc + + ip = addr[0] + pc.hosts_lock.acquire() + if ip in pc.hosts: + pc.hosts[ip].handle_query_detailed_resp(self.installed, + self.to_remove, + self.missing_pkgs, + self.nodetype, + self.agent_sw_version, + self.subfunctions, + self.agent_state) + for patch_id in pc.interim_state.keys(): + if ip in pc.interim_state[patch_id]: + pc.interim_state[patch_id].remove(ip) + if len(pc.interim_state[patch_id]) == 0: + del pc.interim_state[patch_id] + pc.hosts_lock.release() + pc.check_patch_states() + else: + pc.hosts_lock.release() + + def send(self, sock): + LOG.error("Should not get here") + + +class PatchMessageAgentInstallReq(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_AGENT_INSTALL_REQ) + self.ip = None + self.force = False + + def encode(self): + global pc + messages.PatchMessage.encode(self) + self.message['force'] = self.force + + def handle(self, sock, addr): + LOG.error("Should not get here") + + def send(self, sock): + LOG.info("sending install request to node: %s" % self.ip) + self.encode() + message = json.dumps(self.message) + sock.sendto(message, (self.ip, cfg.agent_port)) + + +class PatchMessageAgentInstallResp(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_AGENT_INSTALL_RESP) + self.status = False + self.reject_reason = None + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'status' in data: + self.status = data['status'] + if 'reject_reason' in data: + self.reject_reason = data['reject_reason'] + + def encode(self): + # Nothing to add, so just call the super class + messages.PatchMessage.encode(self) + + def handle(self, sock, addr): + LOG.info("Handling install resp from %s" % addr[0]) + global pc + # LOG.info("Handling hello ack") + + pc.hosts_lock.acquire() + if not addr[0] in pc.hosts: + pc.hosts[addr[0]] = AgentNeighbour(addr[0]) + + pc.hosts[addr[0]].install_status = self.status + pc.hosts[addr[0]].install_pending = False + pc.hosts[addr[0]].install_reject_reason = self.reject_reason + pc.hosts_lock.release() + + def send(self, sock): + LOG.error("Should not get here") + + +class PatchMessageDropHostReq(messages.PatchMessage): + def __init__(self): + messages.PatchMessage.__init__(self, messages.PATCHMSG_DROP_HOST_REQ) + self.ip = None + + def encode(self): + messages.PatchMessage.encode(self) + self.message['ip'] = self.ip + + def decode(self, data): + messages.PatchMessage.decode(self, data) + if 'ip' in data: + self.ip = data['ip'] + + def handle(self, sock, addr): + global pc + host = addr[0] + if host == cfg.get_mgmt_ip(): + # Ignore messages from self + return + + if self.ip is None: + LOG.error("Received PATCHMSG_DROP_HOST_REQ with no ip: %s" % json.dumps(self.data)) + return + + pc.drop_host(self.ip, sync_nbr=False) + return + + def send(self, sock): + self.encode() + message = json.dumps(self.message) + sock.sendto(message, (cfg.controller_mcast_group, cfg.controller_port)) + + + +class PatchController(PatchService): + def __init__(self): + PatchService.__init__(self) + + # Locks + self.socket_lock = threading.RLock() + self.controller_neighbours_lock = threading.RLock() + self.hosts_lock = threading.RLock() + self.patch_data_lock = threading.RLock() + + self.hosts = {} + self.controller_neighbours = {} + + # interim_state is used to track hosts that have not responded + # with fresh queries since a patch was applied or removed, on + # a per-patch basis. This allows the patch controller to move + # patches immediately into a "Partial" state until all nodes + # have responded. + # + self.interim_state = {} + + self.sock_out = None + self.sock_in = None + self.patch_op_counter = 1 + self.patch_data = PatchData() + self.patch_data.load_all() + self.check_patch_states() + self.base_pkgdata = BasePackageData() + + self.allow_insvc_patching = True + + if os.path.isfile(state_file): + self.read_state_file() + else: + self.write_state_file() + + def update_config(self): + cfg.read_config() + + if self.port != cfg.controller_port: + self.port = cfg.controller_port + + if self.mcast_addr != cfg.controller_mcast_group: + self.mcast_addr = cfg.controller_mcast_group + + def socket_lock_acquire(self): + self.socket_lock.acquire() + + def socket_lock_release(self): + try: + self.socket_lock.release() + except: + pass + + def write_state_file(self): + config = ConfigParser.ConfigParser() + + cfgfile = open(state_file, 'w') + + config.add_section('runtime') + config.set('runtime', 'patch_op_counter', self.patch_op_counter) + config.write(cfgfile) + cfgfile.close() + + def read_state_file(self): + config = ConfigParser.ConfigParser() + + config.read(state_file) + + try: + counter = config.getint('runtime', 'patch_op_counter') + self.patch_op_counter = counter + + LOG.info("patch_op_counter is: %d" % self.patch_op_counter) + except ConfigParser.Error: + LOG.exception("Failed to read state info") + + def handle_nbr_patch_op_counter(self, host, nbr_patch_op_counter): + if self.patch_op_counter >= nbr_patch_op_counter: + return + + self.sync_from_nbr(host) + + def sync_from_nbr(self, host): + # Sync the patching repo + host_url = utils.ip_to_url(host) + try: + output = subprocess.check_output(["rsync", + "-acv", + "--delete", + "--exclude", "tmp", + "rsync://%s/patching/" % host_url, + "%s/" % patch_dir], + stderr=subprocess.STDOUT) + LOG.info("Synced to mate patching via rsync: %s" % output) + except subprocess.CalledProcessError as e: + LOG.error("Failed to rsync: %s" % e.output) + return False + + try: + output = subprocess.check_output(["rsync", + "-acv", + "--delete", + "rsync://%s/repo/" % host_url, + "%s/" % repo_root_dir], + stderr=subprocess.STDOUT) + LOG.info("Synced to mate repo via rsync: %s" % output) + except subprocess.CalledProcessError: + LOG.error("Failed to rsync: %s" % output) + return False + + self.read_state_file() + + self.patch_data_lock.acquire() + self.hosts_lock.acquire() + self.interim_state = {} + self.patch_data.load_all() + self.check_patch_states() + self.hosts_lock.release() + self.patch_data_lock.release() + + return True + + def inc_patch_op_counter(self): + self.patch_op_counter += 1 + self.write_state_file() + + def check_patch_states(self): + # If we have no hosts, we can't be sure of the current patch state + if len(self.hosts) == 0: + for patch_id in self.patch_data.metadata: + self.patch_data.metadata[patch_id]["patchstate"] = constants.UNKNOWN + return + + # Default to allowing in-service patching + self.allow_insvc_patching = True + + # Take the detailed query results from the hosts and merge with the patch data + + self.hosts_lock.acquire() + + # Initialize patch state data based on repo state and interim_state presence + for patch_id in self.patch_data.metadata: + if patch_id in self.interim_state: + if self.patch_data.metadata[patch_id]["repostate"] == constants.AVAILABLE: + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_REMOVE + elif self.patch_data.metadata[patch_id]["repostate"] == constants.APPLIED: + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_APPLY + else: + self.patch_data.metadata[patch_id]["patchstate"] = \ + self.patch_data.metadata[patch_id]["repostate"] + + any_out_of_date = False + for ip in self.hosts.keys(): + if not self.hosts[ip].out_of_date: + continue + + any_out_of_date = True + + for pkg in self.hosts[ip].installed.keys(): + for patch_id in self.patch_data.content_versions.keys(): + if pkg not in self.patch_data.content_versions[patch_id]: + continue + + if patch_id not in self.patch_data.metadata: + LOG.error("Patch data missing for %s" % patch_id) + continue + + # If the patch is on a different release than the host, skip it. + if self.patch_data.metadata[patch_id]["sw_version"] != self.hosts[ip].sw_version: + continue + + # Is the installed pkg higher or lower version? + # The rpm.labelCompare takes version broken into 3 components + installed_ver = self.hosts[ip].installed[pkg].split('@')[0] + if ":" in installed_ver: + # Ignore epoch + installed_ver = installed_ver.split(':')[1] + + patch_ver = self.patch_data.content_versions[patch_id][pkg] + if ":" in patch_ver: + # Ignore epoch + patch_ver = patch_ver.split(':')[1] + + rc = rpm.labelCompare(stringToVersion(installed_ver), + stringToVersion(patch_ver)) + + if self.patch_data.metadata[patch_id]["repostate"] == constants.AVAILABLE: + # The RPM is not expected to be installed. + # If the installed version is the same or higher, + # this patch is in a Partial-Remove state + if rc >= 0 or patch_id in self.interim_state: + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_REMOVE + if self.patch_data.metadata[patch_id].get("reboot_required") != "N": + self.allow_insvc_patching = False + continue + elif self.patch_data.metadata[patch_id]["repostate"] == constants.APPLIED: + # The RPM is expected to be installed. + # If the installed version is the lower, + # this patch is in a Partial-Apply state + if rc == -1 or patch_id in self.interim_state: + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_APPLY + if self.patch_data.metadata[patch_id].get("reboot_required") != "N": + self.allow_insvc_patching = False + continue + + if self.hosts[ip].sw_version == "14.10": + # For Release 1 + personality = "personality-%s" % self.hosts[ip].nodetype + else: + personality = "personality-%s" % "-".join(self.hosts[ip].subfunctions) + + # Check the to_remove list + for pkg in self.hosts[ip].to_remove: + for patch_id in self.patch_data.content_versions.keys(): + if pkg not in self.patch_data.content_versions[patch_id]: + continue + + if patch_id not in self.patch_data.metadata: + LOG.error("Patch data missing for %s" % patch_id) + continue + + if personality not in self.patch_data.metadata[patch_id]: + continue + + if pkg not in self.patch_data.metadata[patch_id][personality]: + continue + + if self.patch_data.metadata[patch_id]["repostate"] == constants.AVAILABLE: + # The RPM is not expected to be installed. + # This patch is in a Partial-Remove state + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_REMOVE + if self.patch_data.metadata[patch_id].get("reboot_required") != "N": + self.allow_insvc_patching = False + continue + + # Check the missing_pkgs list + for pkg in self.hosts[ip].missing_pkgs: + for patch_id in self.patch_data.content_versions.keys(): + if pkg not in self.patch_data.content_versions[patch_id]: + continue + + if patch_id not in self.patch_data.metadata: + LOG.error("Patch data missing for %s" % patch_id) + continue + + if personality not in self.patch_data.metadata[patch_id]: + continue + + if pkg not in self.patch_data.metadata[patch_id][personality]: + continue + + if self.patch_data.metadata[patch_id]["repostate"] == constants.APPLIED: + # The RPM is expected to be installed. + # This patch is in a Partial-Apply state + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_APPLY + if self.patch_data.metadata[patch_id].get("reboot_required") != "N": + self.allow_insvc_patching = False + continue + + self.hosts_lock.release() + + def get_store_filename(self, patch_sw_version, rpmname): + rpm_dir = package_dir[patch_sw_version] + rpmfile = "%s/%s" % (rpm_dir, rpmname) + return rpmfile + + def get_repo_filename(self, patch_sw_version, rpmname): + rpmfile = self.get_store_filename(patch_sw_version, rpmname) + if not os.path.isfile(rpmfile): + msg = "Could not find rpm: %s" % rpmfile + LOG.error(msg) + return None + + repo_filename = None + + try: + # Get the architecture from the RPM + pkgarch = subprocess.check_output(["rpm", + "-qp", + "--queryformat", + "%{ARCH}", + "--nosignature", + rpmfile]) + + repo_filename = "%s/Packages/%s/%s" % (repo_dir[patch_sw_version], pkgarch, rpmname) + except subprocess.CalledProcessError: + msg = "RPM query failed for %s" % rpmfile + LOG.exception(msg) + return None + + return repo_filename + + def patch_import_api(self, patches): + """ + Import patches + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + + # Refresh data, if needed + self.base_pkgdata.loaddirs() + + # Protect against duplications + patch_list = sorted(list(set(patches))) + + # First, make sure the specified files exist + for patch in patch_list: + if not os.path.isfile(patch): + raise PatchFail("File does not exist: %s" % patch) + + try: + if not os.path.exists(avail_dir): + os.makedirs(avail_dir) + if not os.path.exists(applied_dir): + os.makedirs(applied_dir) + if not os.path.exists(committed_dir): + os.makedirs(committed_dir) + except os.error: + msg = "Failed to create directories" + LOG.exception(msg) + raise PatchFail(msg) + + msg = "Importing patches: %s" % ",".join(patch_list) + LOG.info(msg) + audit_log_info(msg) + + repo_changed = False + + for patch in patch_list: + msg = "Importing patch: %s" % patch + LOG.info(msg) + audit_log_info(msg) + + # Get the patch_id from the filename + # and check to see if it's already imported + (patch_id, ext) = os.path.splitext(os.path.basename(patch)) + if patch_id in self.patch_data.metadata: + if self.patch_data.metadata[patch_id]["repostate"] == constants.APPLIED: + mdir = applied_dir + elif self.patch_data.metadata[patch_id]["repostate"] == constants.COMMITTED: + msg = "%s is committed. Metadata not updated" % patch_id + LOG.info(msg) + msg_info += msg + "\n" + continue + else: + mdir = avail_dir + + try: + thispatch = PatchFile.extract_patch(patch, + metadata_dir=mdir, + metadata_only=True, + existing_content=self.patch_data.contents[patch_id], + allpatches=self.patch_data, + base_pkgdata=self.base_pkgdata) + self.patch_data.update_patch(thispatch) + msg = "%s is already imported. Updated metadata only" % patch_id + LOG.info(msg) + msg_info += msg + "\n" + except PatchMismatchFailure: + msg = "Contents of %s do not match re-imported patch" % patch_id + LOG.exception(msg) + msg_error += msg + "\n" + continue + except PatchValidationFailure as e: + msg = "Patch validation failed for %s" % patch_id + if e.message is not None and e.message != '': + msg += ":\n%s" % e.message + LOG.exception(msg) + msg_error += msg + "\n" + continue + except PatchFail: + msg = "Failed to import patch %s" % patch_id + LOG.exception(msg) + msg_error += msg + "\n" + + continue + + if ext != ".patch": + msg = "File must end in .patch extension: %s" \ + % os.path.basename(patch) + LOG.exception(msg) + msg_error += msg + "\n" + continue + + repo_changed = True + + try: + thispatch = PatchFile.extract_patch(patch, + metadata_dir=avail_dir, + allpatches=self.patch_data, + base_pkgdata=self.base_pkgdata) + + msg_info += "%s is now available\n" % patch_id + self.patch_data.add_patch(patch_id, thispatch) + + self.patch_data.metadata[patch_id]["repostate"] = constants.AVAILABLE + if len(self.hosts) > 0: + self.patch_data.metadata[patch_id]["patchstate"] = constants.AVAILABLE + else: + self.patch_data.metadata[patch_id]["patchstate"] = constants.UNKNOWN + except PatchValidationFailure as e: + msg = "Patch validation failed for %s" % patch_id + if e.message is not None and e.message != '': + msg += ":\n%s" % e.message + LOG.exception(msg) + msg_error += msg + "\n" + continue + except PatchFail: + msg = "Failed to import patch %s" % patch_id + LOG.exception(msg) + msg_error += msg + "\n" + continue + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_apply_api(self, patch_ids): + """ + Apply patches, moving patches from available to applied and updating repo + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + + # Protect against duplications + patch_list = sorted(list(set(patch_ids))) + + msg = "Applying patches: %s" % ",".join(patch_list) + LOG.info(msg) + audit_log_info(msg) + + if "--all" in patch_list: + # Set patch_ids to list of all available patches + # We're getting this list now, before we load the applied patches + patch_list = [] + for patch_id in sorted(self.patch_data.metadata.keys()): + if self.patch_data.metadata[patch_id]["repostate"] == constants.AVAILABLE: + patch_list.append(patch_id) + + if len(patch_list) == 0: + msg_info += "There are no available patches to be applied.\n" + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + repo_changed = False + + # First, verify that all specified patches exist + id_verification = True + for patch_id in patch_list: + if patch_id not in self.patch_data.metadata: + msg = "Patch %s does not exist" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + id_verification = False + + if not id_verification: + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Next, check the patch dependencies + # required_patches will map the required patch to the patches that need it + required_patches = {} + for patch_id in patch_list: + for req_patch in self.patch_data.metadata[patch_id]["requires"]: + # Ignore patches in the op set + if req_patch in patch_list: + continue + + if req_patch not in required_patches: + required_patches[req_patch] = [] + + required_patches[req_patch].append(patch_id) + + # Now verify the state of the required patches + req_verification = True + for req_patch, patch_list in required_patches.iteritems(): + if req_patch not in self.patch_data.metadata \ + or self.patch_data.metadata[req_patch]["repostate"] == constants.AVAILABLE: + msg = "%s is required by: %s" % (req_patch, ", ".join(sorted(patch_list))) + msg_error += msg + "\n" + LOG.info(msg) + req_verification = False + + if not req_verification: + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Start applying the patches + for patch_id in patch_list: + msg = "Applying patch: %s" % patch_id + LOG.info(msg) + audit_log_info(msg) + + if self.patch_data.metadata[patch_id]["repostate"] == constants.APPLIED \ + or self.patch_data.metadata[patch_id]["repostate"] == constants.COMMITTED: + msg = "%s is already in the repo" % patch_id + LOG.info(msg) + msg_info += msg + "\n" + continue + + # To allow for easy cleanup, we're going to first iterate + # through the rpm list to determine where to copy the file. + # As a second step, we'll go through the list and copy each file. + # If there are problems querying any RPMs, none will be copied. + rpmlist = {} + for rpmname in self.patch_data.contents[patch_id]: + patch_sw_version = self.patch_data.metadata[patch_id]["sw_version"] + + rpmfile = self.get_store_filename(patch_sw_version, rpmname) + if not os.path.isfile(rpmfile): + msg = "Could not find rpm: %s" % rpmfile + LOG.error(msg) + raise RpmFail(msg) + + repo_filename = self.get_repo_filename(patch_sw_version, rpmname) + if repo_filename is None: + msg = "Failed to determine repo path for %s" % rpmfile + LOG.exception(msg) + raise RpmFail(msg) + + repo_pkg_dir = os.path.dirname(repo_filename) + if not os.path.exists(repo_pkg_dir): + os.makedirs(repo_pkg_dir) + rpmlist[rpmfile] = repo_filename + + # Copy the RPMs. If a failure occurs, clean up copied files. + copied = [] + for rpmfile in rpmlist: + LOG.info("Copy %s to %s" % (rpmfile, rpmlist[rpmfile])) + try: + shutil.copy(rpmfile, rpmlist[rpmfile]) + copied.append(rpmlist[rpmfile]) + except IOError: + msg = "Failed to copy %s" % rpmfile + LOG.exception(msg) + # Clean up files + for filename in copied: + LOG.info("Cleaning up %s" % filename) + os.remove(filename) + + raise RpmFail(msg) + + try: + # Move the metadata to the applied dir + shutil.move("%s/%s-metadata.xml" % (avail_dir, patch_id), + "%s/%s-metadata.xml" % (applied_dir, patch_id)) + + msg_info += "%s is now in the repo\n" % patch_id + except shutil.Error: + msg = "Failed to move the metadata for %s" % patch_id + LOG.exception(msg) + raise MetadataFail(msg) + + self.patch_data.metadata[patch_id]["repostate"] = constants.APPLIED + if len(self.hosts) > 0: + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_APPLY + else: + self.patch_data.metadata[patch_id]["patchstate"] = constants.UNKNOWN + + self.hosts_lock.acquire() + self.interim_state[patch_id] = self.hosts.keys() + self.hosts_lock.release() + + repo_changed = True + + if repo_changed: + # Update the repo + self.patch_data.gen_groups_xml() + for ver, rdir in repo_dir.iteritems(): + try: + output = subprocess.check_output(["createrepo", + "--update", + "-g", + "comps.xml", + rdir], + stderr=subprocess.STDOUT) + LOG.info("Repo[%s] updated:\n%s" % (ver, output)) + except subprocess.CalledProcessError: + msg = "Failed to update the repo for %s" % ver + LOG.exception(msg) + raise PatchFail(msg) + else: + LOG.info("Repository is unchanged") + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_remove_api(self, patch_ids, **kwargs): + """ + Remove patches, moving patches from applied to available and updating repo + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + remove_unremovable = False + + repo_changed = False + + # Protect against duplications + patch_list = sorted(list(set(patch_ids))) + + msg = "Removing patches: %s" % ",".join(patch_list) + LOG.info(msg) + audit_log_info(msg) + + if kwargs.get("removeunremovable") == "yes": + remove_unremovable = True + + # First, verify that all specified patches exist + id_verification = True + for patch_id in patch_list: + if patch_id not in self.patch_data.metadata: + msg = "Patch %s does not exist" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + id_verification = False + + if not id_verification: + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # See if any of the patches are marked as unremovable + unremovable_verification = True + for patch_id in patch_list: + if self.patch_data.metadata[patch_id].get("unremovable") == "Y": + if remove_unremovable: + msg = "Unremovable patch %s being removed" % patch_id + LOG.warning(msg) + msg_warning += msg + "\n" + else: + msg = "Patch %s is not removable" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + unremovable_verification = False + elif self.patch_data.metadata[patch_id]['repostate'] == constants.COMMITTED: + msg = "Patch %s is committed and cannot be removed" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + unremovable_verification = False + + if not unremovable_verification: + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Next, see if any of the patches are required by applied patches + # required_patches will map the required patch to the patches that need it + required_patches = {} + for patch_iter in self.patch_data.metadata.keys(): + # Ignore patches in the op set + if patch_iter in patch_list: + continue + + # Only check applied patches + if self.patch_data.metadata[patch_iter]["repostate"] == constants.AVAILABLE: + continue + + for req_patch in self.patch_data.metadata[patch_iter]["requires"]: + if req_patch not in patch_list: + continue + + if req_patch not in required_patches: + required_patches[req_patch] = [] + + required_patches[req_patch].append(patch_iter) + + if len(required_patches) > 0: + for req_patch, patch_list in required_patches.iteritems(): + msg = "%s is required by: %s" % (req_patch, ", ".join(sorted(patch_list))) + msg_error += msg + "\n" + LOG.info(msg) + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + for patch_id in patch_list: + msg = "Removing patch: %s" % patch_id + LOG.info(msg) + audit_log_info(msg) + + if self.patch_data.metadata[patch_id]["repostate"] == constants.AVAILABLE: + msg = "%s is not in the repo" % patch_id + LOG.info(msg) + msg_info += msg + "\n" + continue + + repo_changed = True + + for rpmname in self.patch_data.contents[patch_id]: + patch_sw_version = self.patch_data.metadata[patch_id]["sw_version"] + rpmfile = self.get_store_filename(patch_sw_version, rpmname) + if not os.path.isfile(rpmfile): + msg = "Could not find rpm: %s" % rpmfile + LOG.error(msg) + raise RpmFail(msg) + + repo_filename = self.get_repo_filename(patch_sw_version, rpmname) + if repo_filename is None: + msg = "Failed to determine repo path for %s" % rpmfile + LOG.exception(msg) + raise RpmFail(msg) + + try: + os.remove(repo_filename) + except OSError: + msg = "Failed to remove RPM" + LOG.exception(msg) + raise RpmFail(msg) + + try: + # Move the metadata to the available dir + shutil.move("%s/%s-metadata.xml" % (applied_dir, patch_id), + "%s/%s-metadata.xml" % (avail_dir, patch_id)) + msg_info += "%s has been removed from the repo\n" % patch_id + except shutil.Error: + msg = "Failed to move the metadata for %s" % patch_id + LOG.exception(msg) + raise MetadataFail(msg) + + self.patch_data.metadata[patch_id]["repostate"] = constants.AVAILABLE + if len(self.hosts) > 0: + self.patch_data.metadata[patch_id]["patchstate"] = constants.PARTIAL_REMOVE + else: + self.patch_data.metadata[patch_id]["patchstate"] = constants.UNKNOWN + + self.hosts_lock.acquire() + self.interim_state[patch_id] = self.hosts.keys() + self.hosts_lock.release() + + if repo_changed: + # Update the repo + self.patch_data.gen_groups_xml() + for ver, rdir in repo_dir.iteritems(): + try: + output = subprocess.check_output(["createrepo", + "--update", + "-g", + "comps.xml", + rdir], + stderr=subprocess.STDOUT) + LOG.info("Repo[%s] updated:\n%s" % (ver, output)) + except subprocess.CalledProcessError: + msg = "Failed to update the repo for %s" % ver + LOG.exception(msg) + raise PatchFail(msg) + else: + LOG.info("Repository is unchanged") + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_delete_api(self, patch_ids): + """ + Delete patches + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + + # Protect against duplications + patch_list = sorted(list(set(patch_ids))) + + msg = "Deleting patches: %s" % ",".join(patch_list) + LOG.info(msg) + audit_log_info(msg) + + # Verify patches exist and are in proper state first + id_verification = True + for patch_id in patch_list: + if patch_id not in self.patch_data.metadata: + msg = "Patch %s does not exist" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + id_verification = False + continue + + # Get the aggregated patch state, if possible + patchstate = constants.UNKNOWN + if patch_id in self.patch_data.metadata: + patchstate = self.patch_data.metadata[patch_id]["patchstate"] + + if self.patch_data.metadata[patch_id]["repostate"] != constants.AVAILABLE or \ + (patchstate != constants.AVAILABLE and patchstate != constants.UNKNOWN): + msg = "Patch %s not in Available state" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + id_verification = False + continue + + if not id_verification: + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Handle operation + for patch_id in patch_list: + for rpmname in self.patch_data.contents[patch_id]: + patch_sw_version = self.patch_data.metadata[patch_id]["sw_version"] + rpmfile = self.get_store_filename(patch_sw_version, rpmname) + if not os.path.isfile(rpmfile): + # We're deleting the patch anyway, so the missing file + # doesn't really matter + continue + + try: + os.remove(rpmfile) + except OSError: + msg = "Failed to remove RPM %s" % rpmfile + LOG.exception(msg) + raise RpmFail(msg) + + try: + # Delete the metadata + os.remove("%s/%s-metadata.xml" % (avail_dir, patch_id)) + except OSError: + msg = "Failed to remove metadata for %s" % patch_id + LOG.exception(msg) + raise MetadataFail(msg) + + self.patch_data.delete_patch(patch_id) + msg = "%s has been deleted" % patch_id + LOG.info(msg) + msg_info += msg + "\n" + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_init_release_api(self, release): + """ + Create an empty repo for a new release + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + + msg = "Initializing repo for: %s" % release + LOG.info(msg) + audit_log_info(msg) + + if release == SW_VERSION: + msg = "Rejected: Requested release %s is running release" % release + msg_error += msg + "\n" + LOG.info(msg) + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Refresh data + self.base_pkgdata.loaddirs() + + self.patch_data.load_all_metadata(avail_dir, repostate=constants.AVAILABLE) + self.patch_data.load_all_metadata(applied_dir, repostate=constants.APPLIED) + self.patch_data.load_all_metadata(committed_dir, repostate=constants.COMMITTED) + + repo_dir[release] = "%s/rel-%s" % (repo_root_dir, release) + + # Verify the release doesn't already exist + if os.path.exists(repo_dir[release]): + msg = "Patch repository for %s already exists" % release + msg_info += msg + "\n" + LOG.info(msg) + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Generate the groups xml + self.patch_data.gen_release_groups_xml(release) + + # Create the repo + try: + output = subprocess.check_output(["createrepo", + "--update", + "-g", + "comps.xml", + repo_dir[release]], + stderr=subprocess.STDOUT) + LOG.info("Repo[%s] updated:\n%s" % (release, output)) + except subprocess.CalledProcessError: + msg = "Failed to update the repo for %s" % release + LOG.exception(msg) + + # Wipe out what was created + shutil.rmtree(repo_dir[release]) + del repo_dir[release] + + raise PatchFail(msg) + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_del_release_api(self, release): + """ + Delete the repo and patches for second release + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + + msg = "Deleting repo and patches for: %s" % release + LOG.info(msg) + audit_log_info(msg) + + if release == SW_VERSION: + msg = "Rejected: Requested release %s is running release" % release + msg_error += msg + "\n" + LOG.info(msg) + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Delete patch XML files + for patch_id in self.patch_data.metadata.keys(): + if self.patch_data.metadata[patch_id]["sw_version"] != release: + continue + + if self.patch_data.metadata[patch_id]["repostate"] == constants.APPLIED: + mdir = applied_dir + elif self.patch_data.metadata[patch_id]["repostate"] == constants.COMMITTED: + mdir = committed_dir + else: + mdir = avail_dir + + try: + # Delete the metadata + os.remove("%s/%s-metadata.xml" % (mdir, patch_id)) + except OSError: + msg = "Failed to remove metadata for %s" % patch_id + LOG.exception(msg) + + # Refresh patch data + self.patch_data = PatchData() + self.patch_data.load_all_metadata(avail_dir, repostate=constants.AVAILABLE) + self.patch_data.load_all_metadata(applied_dir, repostate=constants.APPLIED) + self.patch_data.load_all_metadata(committed_dir, repostate=constants.COMMITTED) + + raise MetadataFail(msg) + + # Delete the packages dir + package_dir[release] = "%s/%s" % (root_package_dir, release) + if os.path.exists(package_dir[release]): + try: + shutil.rmtree(package_dir[release]) + except shutil.Error: + msg = "Failed to delete package dir for %s" % release + LOG.exception(msg) + + del package_dir[release] + + # Verify the release exists + repo_dir[release] = "%s/rel-%s" % (repo_root_dir, release) + if not os.path.exists(repo_dir[release]): + # Nothing to do + msg = "Patch repository for %s does not exist" % release + msg_info += msg + "\n" + LOG.info(msg) + del repo_dir[release] + + # Refresh patch data + self.patch_data = PatchData() + self.patch_data.load_all_metadata(avail_dir, repostate=constants.AVAILABLE) + self.patch_data.load_all_metadata(applied_dir, repostate=constants.APPLIED) + self.patch_data.load_all_metadata(committed_dir, repostate=constants.COMMITTED) + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Delete the repo + try: + shutil.rmtree(repo_dir[release]) + except shutil.Error: + msg = "Failed to delete repo for %s" % release + LOG.exception(msg) + + del repo_dir[release] + + if self.base_pkgdata is not None: + del self.base_pkgdata.pkgs[release] + + # Refresh patch data + self.patch_data = PatchData() + self.patch_data.load_all_metadata(avail_dir, repostate=constants.AVAILABLE) + self.patch_data.load_all_metadata(applied_dir, repostate=constants.APPLIED) + self.patch_data.load_all_metadata(committed_dir, repostate=constants.COMMITTED) + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_query_what_requires(self, patch_ids): + """ + Query the known patches to see which have dependencies on the specified patches + :return: + """ + msg_info = "" + msg_warning = "" + msg_error = "" + + msg = "Querying what requires patches: %s" % ",".join(patch_ids) + LOG.info(msg) + audit_log_info(msg) + + # First, verify that all specified patches exist + id_verification = True + for patch_id in patch_ids: + if patch_id not in self.patch_data.metadata: + msg = "Patch %s does not exist" % patch_id + LOG.error(msg) + msg_error += msg + "\n" + id_verification = False + + if not id_verification: + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + required_patches = {} + for patch_iter in self.patch_data.metadata.keys(): + for req_patch in self.patch_data.metadata[patch_iter]["requires"]: + if req_patch not in patch_ids: + continue + + if req_patch not in required_patches: + required_patches[req_patch] = [] + + required_patches[req_patch].append(patch_iter) + + for patch_id in patch_ids: + if patch_id in required_patches: + patch_list = required_patches[patch_id] + msg_info += "%s is required by: %s\n" % (patch_id, ", ".join(sorted(patch_list))) + else: + msg_info += "%s is not required by any patches.\n" % patch_id + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def patch_sync(self): + # Increment the patch_op_counter here + self.inc_patch_op_counter() + + self.patch_data_lock.acquire() + #self.patch_data.load_all() + self.check_patch_states() + self.patch_data_lock.release() + + if self.sock_out is None: + return True + + # Send the sync requests + + self.controller_neighbours_lock.acquire() + for n in self.controller_neighbours: + self.controller_neighbours[n].clear_synced() + self.controller_neighbours_lock.release() + + msg = PatchMessageSyncReq() + self.socket_lock.acquire() + msg.send(self.sock_out) + self.socket_lock.release() + + # Now we wait, up to two mins... TODO: Wait on a condition + my_ip = cfg.get_mgmt_ip() + sync_rc = False + max_time = time.time() + 120 + while time.time() < max_time: + all_done = True + self.controller_neighbours_lock.acquire() + for n in self.controller_neighbours: + if n != my_ip and not self.controller_neighbours[n].get_synced(): + all_done = False + self.controller_neighbours_lock.release() + + if all_done: + LOG.info("Sync complete") + sync_rc = True + break + + time.sleep(0.5) + + # Send hellos to the hosts now, to get queries performed + hello_agent = PatchMessageHelloAgent() + self.socket_lock.acquire() + hello_agent.send(self.sock_out) + self.socket_lock.release() + + if not sync_rc: + LOG.info("Timed out waiting for sync completion") + return sync_rc + + def patch_query_cached(self, **kwargs): + query_state = None + if "show" in kwargs: + if kwargs["show"] == "available": + query_state = constants.AVAILABLE + elif kwargs["show"] == "applied": + query_state = constants.APPLIED + elif kwargs["show"] == "committed": + query_state = constants.COMMITTED + + query_release = None + if "release" in kwargs: + query_release = kwargs["release"] + + results = {} + self.patch_data_lock.acquire() + if query_state is None and query_release is None: + # Return everything + results = self.patch_data.metadata + else: + # Filter results + for patch_id, data in self.patch_data.metadata.iteritems(): + if query_state is not None and data["repostate"] != query_state: + continue + if query_release is not None and data["sw_version"] != query_release: + continue + results[patch_id] = data + self.patch_data_lock.release() + + return results + + def patch_query_specific_cached(self, patch_ids): + audit_log_info("Patch show") + + results = {"metadata": {}, + "contents": {}, + "error": ""} + + self.patch_data_lock.acquire() + + for patch_id in patch_ids: + if patch_id not in self.patch_data.metadata.keys(): + results["error"] += "%s is unrecognized\n" % patch_id + + for patch_id, data in self.patch_data.metadata.iteritems(): + if patch_id in patch_ids: + results["metadata"][patch_id] = data + for patch_id, data in self.patch_data.contents.iteritems(): + if patch_id in patch_ids: + results["contents"][patch_id] = data + + self.patch_data_lock.release() + + return results + + def get_dependencies(self, patch_ids, recursive): + dependencies = set() + patch_added = False + + self.patch_data_lock.acquire() + + # Add patches to workset + for patch_id in sorted(patch_ids): + dependencies.add(patch_id) + patch_added = True + + while patch_added: + patch_added = False + for patch_id in sorted(dependencies): + for req in self.patch_data.metadata[patch_id]["requires"]: + if req not in dependencies: + dependencies.add(req) + patch_added = recursive + + self.patch_data_lock.release() + + return sorted(dependencies) + + def patch_query_dependencies(self, patch_ids, **kwargs): + msg = "Patch query-dependencies %s" % patch_ids + LOG.info(msg) + audit_log_info(msg) + + failure = False + + results = {"patches": [], + "error": ""} + + recursive = False + if kwargs.get("recursive") == "yes": + recursive = True + + self.patch_data_lock.acquire() + + # Verify patch IDs + for patch_id in sorted(patch_ids): + if patch_id not in self.patch_data.metadata.keys(): + errormsg = "%s is unrecognized\n" % patch_id + LOG.info("patch_query_dependencies: %s" % errormsg) + results["error"] += errormsg + failure = True + self.patch_data_lock.release() + + if failure: + LOG.info("patch_query_dependencies failed") + return results + + results["patches"] = self.get_dependencies(patch_ids, recursive) + + return results + + def patch_commit(self, patch_ids, dry_run=False): + msg = "Patch commit %s" % patch_ids + LOG.info(msg) + audit_log_info(msg) + + try: + if not os.path.exists(committed_dir): + os.makedirs(committed_dir) + except os.error: + msg = "Failed to create %s" % committed_dir + LOG.exception(msg) + raise PatchFail(msg) + + release = None + all = False + patch_added = False + failure = False + recursive = True + + keep = {} + cleanup = {} + cleanup_files = set() + + results = {"info": "", + "error": ""} + + # Ensure there are only REL patches + non_rel_list = [] + self.patch_data_lock.acquire() + for patch_id in self.patch_data.metadata: + if self.patch_data.metadata[patch_id]['status'] != constants.STATUS_RELEASED: + non_rel_list.append(patch_id) + self.patch_data_lock.release() + + if len(non_rel_list) > 0: + errormsg = "A commit cannot be performed with non-REL status patches in the system:\n" + for patch_id in non_rel_list: + errormsg += " %s\n" % patch_id + LOG.info("patch_commit rejected: %s" % errormsg) + results["error"] += errormsg + return results + + # Verify patch IDs + self.patch_data_lock.acquire() + for patch_id in sorted(patch_ids): + if patch_id not in self.patch_data.metadata.keys(): + errormsg = "%s is unrecognized\n" % patch_id + LOG.info("patch_commit: %s" % errormsg) + results["error"] += errormsg + failure = True + self.patch_data_lock.release() + + if failure: + LOG.info("patch_commit: Failed patch ID check") + return results + + commit_list = self.get_dependencies(patch_ids, recursive) + + # Check patch states + avail_list = [] + self.patch_data_lock.acquire() + for patch_id in commit_list: + if self.patch_data.metadata[patch_id]['patchstate'] != constants.APPLIED \ + and self.patch_data.metadata[patch_id]['patchstate'] != constants.COMMITTED: + avail_list.append(patch_id) + self.patch_data_lock.release() + + if len(avail_list) > 0: + errormsg = "The following patches are not applied and cannot be committed:\n" + for patch_id in avail_list: + errormsg += " %s\n" % patch_id + LOG.info("patch_commit rejected: %s" % errormsg) + results["error"] += errormsg + return results + + # Get list of packages + self.patch_data_lock.acquire() + for patch_id in commit_list: + patch_sw_version = self.patch_data.metadata[patch_id]["sw_version"] + + if patch_sw_version not in keep: + keep[patch_sw_version] = {} + if patch_sw_version not in cleanup: + cleanup[patch_sw_version] = {} + + for rpmname in self.patch_data.contents[patch_id]: + try: + pkgname, arch, pkgver = parse_rpm_filename(rpmname) + except ValueError as e: + self.patch_data_lock.release() + raise e + + if pkgname not in keep[patch_sw_version]: + keep[patch_sw_version][pkgname] = { arch: pkgver } + continue + elif arch not in keep[patch_sw_version][pkgname]: + keep[patch_sw_version][pkgname][arch] = pkgver + continue + + # Compare versions + keep_pkgver = keep[patch_sw_version][pkgname][arch] + if pkgver > keep_pkgver: + if pkgname not in cleanup[patch_sw_version]: + cleanup[patch_sw_version][pkgname] = { arch: [ keep_pkgver ] } + elif arch not in cleanup[patch_sw_version][pkgname]: + cleanup[patch_sw_version][pkgname][arch] = [ keep_pkgver ] + else: + cleanup[patch_sw_version][pkgname][arch].append(keep_pkgver) + + # Find the rpmname + keep_rpmname = keep_pkgver.generate_rpm_filename(pkgname, arch) + + store_filename = self.get_store_filename(patch_sw_version, keep_rpmname) + if store_filename is not None and os.path.exists(store_filename): + cleanup_files.add(store_filename) + + repo_filename = self.get_repo_filename(patch_sw_version, keep_rpmname) + if repo_filename is not None and os.path.exists(repo_filename): + cleanup_files.add(repo_filename) + + # Keep the new pkgver + keep[patch_sw_version][pkgname][arch] = pkgver + else: + # Put this pkg in the cleanup list + if pkgname not in cleanup[patch_sw_version]: + cleanup[patch_sw_version][pkgname] = { arch: [ pkgver ] } + elif arch not in cleanup[patch_sw_version][pkgname]: + cleanup[patch_sw_version][pkgname][arch] = [ pkgver ] + else: + cleanup[patch_sw_version][pkgname][arch].append(pkgver) + + store_filename = self.get_store_filename(patch_sw_version, rpmname) + if store_filename is not None and os.path.exists(store_filename): + cleanup_files.add(store_filename) + + repo_filename = self.get_repo_filename(patch_sw_version, rpmname) + if repo_filename is not None and os.path.exists(repo_filename): + cleanup_files.add(repo_filename) + + self.patch_data_lock.release() + + # Calculate disk space + disk_space = 0 + for rpmfile in cleanup_files: + statinfo = os.stat(rpmfile) + disk_space += statinfo.st_size + + if dry_run: + results["info"] = "This commit operation would free %0.2f MiB" % (disk_space/(1024.0*1024.0)) + return results + + # Do the commit + + # Move the metadata to the committed dir + for patch_id in commit_list: + metadata_fname = "%s-metadata.xml" % patch_id + applied_fname = os.path.join(applied_dir, metadata_fname) + committed_fname = os.path.join(committed_dir, metadata_fname) + if os.path.exists(applied_fname): + try: + shutil.move(applied_fname, committed_fname) + except shutil.Error: + msg = "Failed to move the metadata for %s" % patch_id + LOG.exception(msg) + raise MetadataFail(msg) + + # Delete the files + for rpmfile in cleanup_files: + try: + os.remove(rpmfile) + except OSError: + msg = "Failed to remove: %s" % rpmfile + LOG.exception(msg) + raise MetadataFail(msg) + + # Update the repo + self.patch_data.gen_groups_xml() + for ver, rdir in repo_dir.iteritems(): + try: + output = subprocess.check_output(["createrepo", + "--update", + "-g", + "comps.xml", + rdir], + stderr=subprocess.STDOUT) + LOG.info("Repo[%s] updated:\n%s" % (ver, output)) + except subprocess.CalledProcessError: + msg = "Failed to update the repo for %s" % ver + LOG.exception(msg) + raise PatchFail(msg) + + self.patch_data.load_all() + + results["info"] = "The patches have been committed." + return results + + def query_host_cache(self): + output = [] + + self.hosts_lock.acquire() + for nbr in self.hosts.keys(): + host = self.hosts[nbr].get_dict() + host["interim_state"] = False + for patch_id in pc.interim_state.keys(): + if nbr in pc.interim_state[patch_id]: + host["interim_state"] = True + + output.append(host) + + self.hosts_lock.release() + + return output + + def any_patch_host_installing(self): + rc = False + + self.hosts_lock.acquire() + for ip, host in self.hosts.iteritems(): + if host.state == constants.PATCH_AGENT_STATE_INSTALLING: + rc = True + break + + self.hosts_lock.release() + + return rc + + def patch_host_install(self, host_ip, force, async=False): + msg_info = "" + msg_warning = "" + msg_error = "" + + ip = host_ip + + self.hosts_lock.acquire() + # If not in hosts table, maybe a hostname was used instead + if host_ip not in self.hosts: + try: + ip = utils.gethostbyname(host_ip) + if ip not in self.hosts: + # Translated successfully, but IP isn't in the table. + # Raise an exception to drop out to the failure handling + raise + except: + self.hosts_lock.release() + msg = "Unknown host specified: %s" % host_ip + msg_error += msg + "\n" + LOG.error("Error in host-install: " + msg) + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + msg = "Running host-install for %s (%s), force=%s, async=%s" % (host_ip, ip, force, async) + LOG.info(msg) + audit_log_info(msg) + + if self.allow_insvc_patching: + LOG.info("Allowing in-service patching") + force = True + + self.hosts[ip].install_pending = True + self.hosts[ip].install_status = False + self.hosts[ip].install_reject_reason = None + self.hosts_lock.release() + + installreq = PatchMessageAgentInstallReq() + installreq.ip = ip + installreq.force = force + installreq.encode() + self.socket_lock.acquire() + installreq.send(self.sock_out) + self.socket_lock.release() + + if async: + # async install requested, so return now + msg = "Patch installation request sent to %s." % self.hosts[ip].hostname + msg_info += msg + "\n" + LOG.info("host-install async: " + msg) + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + # Now we wait, up to ten mins... TODO: Wait on a condition + resp_rx = False + max_time = time.time() + 600 + while time.time() < max_time: + self.hosts_lock.acquire() + if ip not in self.hosts: + # The host aged out while we were waiting + self.hosts_lock.release() + msg = "Agent expired while waiting: %s" % ip + msg_error += msg + "\n" + LOG.error("Error in host-install: " + msg) + break + + if not self.hosts[ip].install_pending: + # We got a response + resp_rx = True + if self.hosts[ip].install_status: + msg = "Patch installation was successful on %s." % self.hosts[ip].hostname + msg_info += msg + "\n" + LOG.info("host-install: " + msg) + elif self.hosts[ip].install_reject_reason: + msg = "Patch installation rejected by %s. %s" % ( + self.hosts[ip].hostname, + self.hosts[ip].install_reject_reason) + msg_error += msg + "\n" + LOG.error("Error in host-install: " + msg) + else: + msg = "Patch installation failed on %s." % self.hosts[ip].hostname + msg_error += msg + "\n" + LOG.error("Error in host-install: " + msg) + + self.hosts_lock.release() + break + + self.hosts_lock.release() + + time.sleep(0.5) + + if not resp_rx: + msg = "Timeout occurred while waiting response from %s." % ip + msg_error += msg + "\n" + LOG.error("Error in host-install: " + msg) + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + def drop_host(self, host_ip, sync_nbr=True): + msg_info = "" + msg_warning = "" + msg_error = "" + + ip = host_ip + + self.hosts_lock.acquire() + # If not in hosts table, maybe a hostname was used instead + if host_ip not in self.hosts: + try: + # Because the host may be getting dropped due to deletion, + # we may be unable to do a hostname lookup. Instead, we'll + # iterate through the table here. + for host in self.hosts.keys(): + if host_ip == self.hosts[host].hostname: + ip = host + break + + if ip not in self.hosts: + # Translated successfully, but IP isn't in the table. + # Raise an exception to drop out to the failure handling + raise + except: + self.hosts_lock.release() + msg = "Unknown host specified: %s" % host_ip + msg_error += msg + "\n" + LOG.error("Error in drop-host: " + msg) + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + msg = "Running drop-host for %s (%s)" % (host_ip, ip) + LOG.info(msg) + audit_log_info(msg) + + del self.hosts[ip] + for patch_id in self.interim_state.keys(): + if ip in self.interim_state[patch_id]: + self.interim_state[patch_id].remove(ip) + + self.hosts_lock.release() + + if sync_nbr: + sync_msg = PatchMessageDropHostReq() + sync_msg.ip = ip + self.socket_lock.acquire() + sync_msg.send(self.sock_out) + self.socket_lock.release() + + return dict(info=msg_info, warning=msg_warning, error=msg_error) + + +# The wsgiref.simple_server module has an error handler that catches +# and prints any exceptions that occur during the API handling to stderr. +# This means the patching sys.excepthook handler that logs uncaught +# exceptions is never called, and those exceptions are lost. +# +# To get around this, we're subclassing the simple_server.ServerHandler +# in order to replace the handle_error method with a custom one that +# logs the exception instead, and will set a global flag to shutdown +# the server and reset. +# +class MyServerHandler(simple_server.ServerHandler): + def handle_error(self): + LOG.exception('An uncaught exception has occurred:') + if not self.headers_sent: + self.result = self.error_output(self.environ, self.start_response) + self.finish_response() + global keep_running + keep_running = False + + +def get_handler_cls(): + cls = simple_server.WSGIRequestHandler + + # old-style class doesn't support super + class MyHandler(cls, object): + def address_string(self): + # In the future, we could provide a config option to allow reverse DNS lookup + return self.client_address[0] + + # Overload the handle function to use our own MyServerHandler + def handle(self): + """Handle a single HTTP request""" + + self.raw_requestline = self.rfile.readline() + if not self.parse_request(): # An error code has been sent, just exit + return + + handler = MyServerHandler( + self.rfile, self.wfile, self.get_stderr(), self.get_environ() + ) + handler.request_handler = self # backpointer for logging + handler.run(self.server.get_app()) + + return MyHandler + + +class PatchControllerApiThread(threading.Thread): + def __init__(self): + threading.Thread.__init__(self) + self.wsgi = None + + def run(self): + host = "127.0.0.1" + port = cfg.api_port + + try: + # In order to support IPv6, server_class.address_family must be + # set to the correct address family. Because the unauthenticated + # API always uses IPv4 for the loopback address, the address_family + # variable cannot be set directly in the WSGIServer class, so a + # local subclass needs to be created for the call to make_server, + # where the correct address_family can be specified. + class server_class(simple_server.WSGIServer): + pass + + server_class.address_family = socket.AF_INET + self.wsgi = simple_server.make_server( + host, port, + app.VersionSelectorApplication(), + server_class=server_class, + handler_class=get_handler_cls()) + + self.wsgi.socket.settimeout(api_socket_timeout) + global keep_running + while keep_running: + self.wsgi.handle_request() + except: + # Log all exceptions + LOG.exception("Error occurred during request processing") + + global thread_death + thread_death.set() + + def kill(self): + # Must run from other thread + if self.wsgi is not None: + self.wsgi.shutdown() + + +class PatchControllerAuthApiThread(threading.Thread): + def __init__(self): + threading.Thread.__init__(self) + # LOG.info ("Initializing Authenticated API thread") + self.wsgi = None + + def run(self): + host = CONF.auth_api_bind_ip + port = CONF.auth_api_port + if host is None: + host = utils.get_versioned_address_all() + try: + # Can only launch authenticated server post-config + while not os.path.exists('/etc/platform/.initial_config_complete'): + time.sleep(5) + + # In order to support IPv6, server_class.address_family must be + # set to the correct address family. Because the unauthenticated + # API always uses IPv4 for the loopback address, the address_family + # variable cannot be set directly in the WSGIServer class, so a + # local subclass needs to be created for the call to make_server, + # where the correct address_family can be specified. + class server_class(simple_server.WSGIServer): + pass + + server_class.address_family = utils.get_management_family() + self.wsgi = simple_server.make_server( + host, port, + auth_app.VersionSelectorApplication(), + server_class=server_class, + handler_class=get_handler_cls()) + + # self.wsgi.serve_forever() + self.wsgi.socket.settimeout(api_socket_timeout) + + global keep_running + while keep_running: + self.wsgi.handle_request() + except: + # Log all exceptions + LOG.exception("Authorized API failure: Error occurred during request processing") + + def kill(self): + # Must run from other thread + if self.wsgi is not None: + self.wsgi.shutdown() + + +class PatchControllerMainThread(threading.Thread): + def __init__(self): + threading.Thread.__init__(self) + # LOG.info ("Initializing Main thread") + + def run(self): + global pc + global thread_death + + # LOG.info ("In Main thread") + + try: + sock_in = pc.setup_socket() + + while sock_in is None: + # Check every thirty seconds? + # Once we've got a conf file, tied into packstack, + # we'll get restarted when the file is updated, + # and this should be unnecessary. + time.sleep(30) + sock_in = pc.setup_socket() + + # Ok, now we've got our socket. Let's start with a hello! + pc.socket_lock.acquire() + + hello = PatchMessageHello() + hello.send(pc.sock_out) + + hello_agent = PatchMessageHelloAgent() + hello_agent.send(pc.sock_out) + + pc.socket_lock.release() + + # Send hello every thirty seconds + hello_timeout = time.time() + 30.0 + remaining = 30 + + agent_query_conns = [] + + while True: + # Check to see if any other thread has died + if thread_death.is_set(): + LOG.info("Detected thread death. Terminating") + return + + # Check for in-service patch restart flag + if os.path.exists(insvc_patch_restart_controller): + LOG.info("In-service patch restart flag detected. Exiting.") + global keep_running + keep_running = False + os.remove(insvc_patch_restart_controller) + return + + inputs = [pc.sock_in] + agent_query_conns + outputs = [] + + # LOG.info("Running select, remaining=%d" % remaining) + rlist, wlist, xlist = select.select(inputs, outputs, inputs, remaining) + + if (len(rlist) == 0 and + len(wlist) == 0 and + len(xlist) == 0): + # Timeout hit + pc.audit_socket() + + # LOG.info("Checking sockets") + for s in rlist: + data = '' + addr = None + msg = None + + if s == pc.sock_in: + # Receive from UDP + pc.socket_lock.acquire() + data, addr = s.recvfrom(1024) + pc.socket_lock.release() + else: + # Receive from TCP + while True: + try: + packet = s.recv(1024) + except socket.error: + LOG.exception("Socket error on recv") + data = '' + break + + if packet: + data += packet + + if data == '': + break + try: + datachk = json.loads(data) + break + except ValueError: + # Message is incomplete + continue + else: + LOG.info('End of TCP message received') + break + + if data == '': + # Connection dropped + agent_query_conns.remove(s) + s.close() + continue + + # Get the TCP endpoint address + addr = s.getpeername() + + msgdata = json.loads(data) + + # For now, discard any messages that are not msgversion==1 + if 'msgversion' in msgdata and msgdata['msgversion'] != 1: + continue + + if 'msgtype' in msgdata: + if msgdata['msgtype'] == messages.PATCHMSG_HELLO: + msg = PatchMessageHello() + elif msgdata['msgtype'] == messages.PATCHMSG_HELLO_ACK: + msg = PatchMessageHelloAck() + elif msgdata['msgtype'] == messages.PATCHMSG_SYNC_REQ: + msg = PatchMessageSyncReq() + elif msgdata['msgtype'] == messages.PATCHMSG_SYNC_COMPLETE: + msg = PatchMessageSyncComplete() + elif msgdata['msgtype'] == messages.PATCHMSG_HELLO_AGENT_ACK: + msg = PatchMessageHelloAgentAck() + elif msgdata['msgtype'] == messages.PATCHMSG_QUERY_DETAILED_RESP: + msg = PatchMessageQueryDetailedResp() + elif msgdata['msgtype'] == messages.PATCHMSG_AGENT_INSTALL_RESP: + msg = PatchMessageAgentInstallResp() + elif msgdata['msgtype'] == messages.PATCHMSG_DROP_HOST_REQ: + msg = PatchMessageDropHostReq() + + if msg is None: + msg = messages.PatchMessage() + + msg.decode(msgdata) + if s == pc.sock_in: + msg.handle(pc.sock_out, addr) + else: + msg.handle(s, addr) + + # We can drop the connection after a query response + if msg.msgtype == messages.PATCHMSG_QUERY_DETAILED_RESP and s != pc.sock_in: + agent_query_conns.remove(s) + s.shutdown(socket.SHUT_RDWR) + s.close() + + while len(stale_hosts) > 0 and len(agent_query_conns) <= 5: + ip = stale_hosts.pop() + try: + agent_sock = socket.create_connection((ip, cfg.agent_port)) + query = PatchMessageQueryDetailed() + query.send(agent_sock) + agent_query_conns.append(agent_sock) + except: + # Put it back on the list + stale_hosts.append(ip) + + remaining = int(hello_timeout - time.time()) + if remaining <= 0 or remaining > 30: + hello_timeout = time.time() + 30.0 + remaining = 30 + + pc.socket_lock.acquire() + + hello = PatchMessageHello() + hello.send(pc.sock_out) + + hello_agent = PatchMessageHelloAgent() + hello_agent.send(pc.sock_out) + + pc.socket_lock.release() + + # Age out neighbours + pc.controller_neighbours_lock.acquire() + nbrs = pc.controller_neighbours.keys() + for n in nbrs: + # Age out controllers after 2 minutes + if pc.controller_neighbours[n].get_age() >= 120: + LOG.info("Aging out controller %s from table" % n) + del pc.controller_neighbours[n] + pc.controller_neighbours_lock.release() + + pc.hosts_lock.acquire() + nbrs = pc.hosts.keys() + for n in nbrs: + # Age out hosts after 1 hour + if pc.hosts[n].get_age() >= 3600: + LOG.info("Aging out host %s from table" % n) + del pc.hosts[n] + for patch_id in pc.interim_state.keys(): + if n in pc.interim_state[patch_id]: + pc.interim_state[patch_id].remove(n) + + pc.hosts_lock.release() + except: + # Log all exceptions + LOG.exception("Error occurred during request processing") + thread_death.set() + + +def main(): + configure_logging() + + cfg.read_config() + + # daemon.pidlockfile.write_pid_to_pidfile(pidfile_path) + + global thread_death + thread_death = threading.Event() + + # Set the TMPDIR environment variable to /scratch so that any modules + # that create directories with tempfile will not use /tmp + os.environ['TMPDIR'] = '/scratch' + + global pc + pc = PatchController() + + LOG.info("launching") + api_thread = PatchControllerApiThread() + auth_api_thread = PatchControllerAuthApiThread() + main_thread = PatchControllerMainThread() + + api_thread.start() + auth_api_thread.start() + main_thread.start() + + thread_death.wait() + global keep_running + keep_running = False + + api_thread.join() + auth_api_thread.join() + main_thread.join() diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/patch_functions.py b/cgcs-patch/cgcs-patch/cgcs_patch/patch_functions.py new file mode 100644 index 00000000..8308d582 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/patch_functions.py @@ -0,0 +1,1205 @@ +""" +Copyright (c) 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import getopt +import glob +import hashlib +import logging +import os +import platform +import re +import shutil +import subprocess +import sys +import tarfile +import tempfile +import xml.etree.ElementTree as ElementTree +from xml.dom import minidom + +from cgcs_patch.patch_verify import verify_files +from cgcs_patch.patch_signing import sign_files +from cgcs_patch.exceptions import MetadataFail, PatchFail, PatchValidationFailure, PatchMismatchFailure + +import cgcs_patch.constants as constants +import rpm + +try: + # The tsconfig module is only available at runtime + from tsconfig.tsconfig import SW_VERSION +except: + SW_VERSION = "unknown" + +# Constants +patch_dir = "/opt/patching" +avail_dir = "%s/metadata/available" % patch_dir +applied_dir = "%s/metadata/applied" % patch_dir +committed_dir = "%s/metadata/committed" % patch_dir + +repo_root_dir = "/www/pages/updates" +repo_dir = {SW_VERSION: "%s/rel-%s" % (repo_root_dir, SW_VERSION)} + +root_package_dir = "%s/packages" % patch_dir +package_dir = {SW_VERSION: "%s/%s" % (root_package_dir, SW_VERSION)} + +logfile = "/var/log/patching.log" +apilogfile = "/var/log/patching-api.log" + +LOG = logging.getLogger('main_logger') +auditLOG = logging.getLogger('audit_logger') +audit_log_msg_prefix = 'User: wrsroot/admin Action: ' + +detached_signature_file = "signature.v2" + + +def handle_exception(exc_type, exc_value, exc_traceback): + """ + Exception handler to log any uncaught exceptions + """ + LOG.error("Uncaught exception", + exc_info=(exc_type, exc_value, exc_traceback)) + sys.__excepthook__(exc_type, exc_value, exc_traceback) + + +def configure_logging(logtofile=True, level=logging.INFO): + if logtofile: + my_exec = os.path.basename(sys.argv[0]) + + log_format = '%(asctime)s: ' \ + + my_exec + '[%(process)s]: ' \ + + '%(filename)s(%(lineno)s): ' \ + + '%(levelname)s: %(message)s' + + formatter = logging.Formatter(log_format, datefmt="%FT%T") + + LOG.setLevel(level) + main_log_handler = logging.FileHandler(logfile) + main_log_handler.setFormatter(formatter) + LOG.addHandler(main_log_handler) + + auditLOG.setLevel(level) + api_log_handler = logging.FileHandler(apilogfile) + api_log_handler.setFormatter(formatter) + auditLOG.addHandler(api_log_handler) + + # Log uncaught exceptions to file + sys.excepthook = handle_exception + else: + logging.basicConfig(level=level) + + +def audit_log_info(msg=''): + msg = audit_log_msg_prefix + msg + auditLOG.info(msg) + + +def get_md5(path): + """ + Utility function for generating the md5sum of a file + :param path: Path to file + """ + md5 = hashlib.md5() + block_size = 8192 + with open(path, 'rb') as f: + for chunk in iter(lambda: f.read(block_size), b''): + md5.update(chunk) + return int(md5.hexdigest(), 16) + + +def add_text_tag_to_xml(parent, + name, + text): + """ + Utility function for adding a text tag to an XML object + :param parent: Parent element + :param name: Element name + :param text: Text value + :return:The created element + """ + tag = ElementTree.SubElement(parent, name) + tag.text = text + return tag + + +def write_xml_file(top, + fname): + # Generate the file, in a readable format if possible + outfile = open(fname, 'w') + rough_xml = ElementTree.tostring(top, 'utf-8') + if platform.python_version() == "2.7.2": + # The 2.7.2 toprettyxml() function unnecessarily indents + # childless tags, adding whitespace. In the case of the + # yum comps.xml file, it makes the file unusable, so just + # write the rough xml + outfile.write(rough_xml) + else: + outfile.write(minidom.parseString(rough_xml).toprettyxml(indent=" ")) + + +def parse_rpm_filename(filename): + + # Drop the extension + (basename, ext) = os.path.splitext(os.path.basename(filename)) + + # RPM name format is: + # [:]--. + # + pattern = re.compile('((([^:]):)?)(.*)-([^-]+)-(.*)\.([^\.]*)$') + + m = pattern.match(basename) + + if m is None: + raise ValueError("Filename does not match expected RPM format: %s" % basename) + + epoch = m.group(3) + pkgname = m.group(4) + version = m.group(5) + release = m.group(6) + arch = m.group(7) + + return (pkgname, arch, PackageVersion(epoch, version, release)) + + +class PackageVersion(object): + """ + The PackageVersion class provides a structure for RPM version information, + along with suport for comparison operators. + """ + def __init__(self, epoch, version, release): + self.epoch = epoch + self.version = version + self.release = release + + def __cmp__(self, other): + """ + This function is called by comparison operators to compare + two versions. The rpm.labelCompare() function takes two versions, + specified in a list structure, and returns -1, 0, or 1. + """ + return rpm.labelCompare((self.epoch, self.version, self.release), + (other.epoch, other.version, other.release)) + + def __str__(self): + """ + This function is called by str() and print to compute the + informal string representation of a PackageVersion object. + """ + prefix = "" + if self.epoch is not None and self.epoch != '': + # Prefix the version with epoch, if specified + prefix = "%s:" % self.epoch + + return "%s%s-%s" % (prefix, self.version, self.release) + + def __hash__(self): + return hash(self.__str__()) + + def generate_rpm_filename(self, pkgname, arch): + prefix = "" + if self.epoch is not None and self.epoch != '': + # Prefix the version with epoch, if specified + prefix = "%s:" % self.epoch + + return "%s%s-%s-%s.%s.rpm" % (prefix, pkgname, self.version, self.release, arch) + + +class BasePackageData: + """ + Information about the base package data provided by the load + """ + def __init__(self): + self.pkgs = {} + self.loaddirs() + + def loaddirs(self): + # Load up available package info + base_dir = "/www/pages/feed" + if not os.path.exists(base_dir): + # Return, since this could be running off-box + return + + # Look for release dirs + for reldir in glob.glob("%s/rel-*" % base_dir): + pattern = re.compile("%s/rel-(.*)" % base_dir) + m = pattern.match(reldir) + sw_rel = m.group(1) + + if sw_rel in self.pkgs: + # We've already parsed this dir once + continue + + self.pkgs[sw_rel] = {} + for root, dirs, files in os.walk("%s/Packages" % reldir): + for name in files: + if name.endswith(".rpm"): + try: + pkgname, arch, pkgver = parse_rpm_filename(name) + except ValueError as e: + raise e + + if pkgname not in self.pkgs[sw_rel]: + self.pkgs[sw_rel][pkgname] = {} + self.pkgs[sw_rel][pkgname][arch] = pkgver + + # Clean up deleted data + for sw_rel in self.pkgs: + if not os.path.exists("%s/rel-%s" % (base_dir, sw_rel)): + del self.pkgs[sw_rel] + + def check_release(self, sw_rel): + return (sw_rel in self.pkgs) + + def find_version(self, sw_rel, pkgname, arch): + if sw_rel not in self.pkgs or \ + pkgname not in self.pkgs[sw_rel] or \ + arch not in self.pkgs[sw_rel][pkgname]: + return None + + return self.pkgs[sw_rel][pkgname][arch] + + +class PatchData: + """ + Aggregated patch data + """ + def __init__(self): + # + # The groups dict provides information about targetted (new) packages, + # identifying the software group in which to include the package. + # This allows the patch agent to identify new packages to install + # (or erase) as appropriate. + # This dict is nested as follows: + # [ patch_sw_version ] - Release associated with the patch + # [ group/ptype ] - Group (personality) in which the pkg belongs + # [ patch_id ] + # [ package ] + # + self.groups = {} + + # + # The metadata dict stores all metadata associated with a patch. + # This dict is keyed on patch_id, with metadata for each patch stored + # in a nested dict. (See parse_metadata method for more info) + # + self.metadata = {} + + # + # The contents dict stores the lists of RPMs provided by each patch, + # indexed by patch_id. + # + self.contents = {} + + # + # The content_versions dict provides a simple list of packages and their + # versions for each patch, used by the patch controller in determining + # patch states. + # content_versions[patch_id][pkgname] = "%s-%s" % (pkgver.version, pkgver.release) + # + self.content_versions = {} + + # + # The package_versions dict provides a mapping of packages to the patch_id, + # including the package arch. + # [ patch_sw_version ] + # [ pkgname ] + # [ arch ] + # [ pkgver ] + # -> patch_id + self.package_versions = {} + + def add_patch(self, patch_id, new_patch): + # We can just use "update" on these dicts because they are indexed by patch_id + self.metadata.update(new_patch.metadata) + self.contents.update(new_patch.contents) + self.content_versions.update(new_patch.content_versions) + + # Need to recursively update package_version and keys dicts + for patch_sw_version in new_patch.package_versions.keys(): + if patch_sw_version not in self.package_versions: + self.package_versions[patch_sw_version] = {} + for pkgname in new_patch.package_versions[patch_sw_version].keys(): + if pkgname not in self.package_versions[patch_sw_version]: + self.package_versions[patch_sw_version][pkgname] = {} + for arch in new_patch.package_versions[patch_sw_version][pkgname].keys(): + if arch not in self.package_versions[patch_sw_version][pkgname]: + self.package_versions[patch_sw_version][pkgname][arch] = {} + for pkgver in new_patch.package_versions[patch_sw_version][pkgname][arch].keys(): + self.package_versions[patch_sw_version][pkgname][arch][pkgver] = patch_id + + for patch_sw_version in new_patch.groups.keys(): + if patch_sw_version not in self.groups: + self.groups[patch_sw_version] = {} + for ptype in new_patch.groups[patch_sw_version].keys(): + if ptype not in self.groups[patch_sw_version]: + self.groups[patch_sw_version][ptype] = {} + for patch_id in new_patch.groups[patch_sw_version][ptype].keys(): + if patch_id not in self.groups[patch_sw_version][ptype]: + self.groups[patch_sw_version][ptype][patch_id] = {} + self.groups[patch_sw_version][ptype][patch_id].update( + new_patch.groups[patch_sw_version][ptype][patch_id]) + + def update_patch(self, updated_patch): + for patch_id in updated_patch.metadata.keys(): + # Update all fields except repostate + cur_repostate = self.metadata[patch_id]['repostate'] + self.metadata[patch_id].update(updated_patch.metadata[patch_id]) + self.metadata[patch_id]['repostate'] = cur_repostate + + def delete_patch(self, patch_id): + for patch_sw_version in self.package_versions.keys(): + for pkgname in self.package_versions[patch_sw_version].keys(): + for arch in self.package_versions[patch_sw_version][pkgname].keys(): + for pkgver in self.package_versions[patch_sw_version][pkgname][arch].keys(): + if self.package_versions[patch_sw_version][pkgname][arch][pkgver] == patch_id: + del self.package_versions[patch_sw_version][pkgname][arch][pkgver] + if len(self.package_versions[patch_sw_version][pkgname][arch]) is 0: + del self.package_versions[patch_sw_version][pkgname][arch] + if len(self.package_versions[patch_sw_version][pkgname]) is 0: + del self.package_versions[patch_sw_version][pkgname] + if len(self.package_versions[patch_sw_version]) is 0: + del self.package_versions[patch_sw_version] + + for patch_sw_version in self.groups.keys(): + for ptype in self.groups[patch_sw_version].keys(): + if patch_id in self.groups[patch_sw_version][ptype]: + del self.groups[patch_sw_version][ptype][patch_id] + + del self.content_versions[patch_id] + del self.contents[patch_id] + del self.metadata[patch_id] + + @staticmethod + def modify_metadata_text(filename, + key, + value): + """ + Open an xml file, find first element matching 'key' and replace the text with 'value' + """ + new_filename = "%s.new" % filename + tree = ElementTree.parse(filename) + + # Prevent a proliferation of carriage returns when we write this XML back out to file. + for e in tree.getiterator(): + if e.text is not None: + e.text = e.text.rstrip() + if e.tail is not None: + e.tail = e.tail.rstrip() + + root = tree.getroot() + + # Make the substitution + e = root.find(key) + if e is None: + msg = "modify_metadata_text: failed to find tag '%s'" % key + LOG.error(msg) + raise PatchValidationFailure(msg) + e.text = value + + # write the modified file + outfile = open(new_filename, 'w') + rough_xml = ElementTree.tostring(root, 'utf-8') + if platform.python_version() == "2.7.2": + # The 2.7.2 toprettyxml() function unnecessarily indents + # childless tags, adding whitespace. In the case of the + # yum comps.xml file, it makes the file unusable, so just + # write the rough xml + outfile.write(rough_xml) + else: + outfile.write(minidom.parseString(rough_xml).toprettyxml(indent=" ")) + outfile.close() + os.rename(new_filename, filename) + + def parse_metadata(self, + filename, + repostate=None): + """ + Parse an individual patch metadata XML file + :param filename: XML file + :param repostate: Indicates Applied, Available, or Committed + :return: Patch ID + """ + tree = ElementTree.parse(filename) + root = tree.getroot() + + """ + + PATCH_0001 + Brief description + Longer description + + + Dev + + + + pkgA + pkgB + + + pkgB + + + """ + patch_id = root.findtext("id") + if patch_id is None: + LOG.error("Patch metadata contains no id tag") + return None + + self.metadata[patch_id] = {} + + self.metadata[patch_id]["repostate"] = repostate + + # Patch state is unknown at this point + self.metadata[patch_id]["patchstate"] = "n/a" + + self.metadata[patch_id]["sw_version"] = "unknown" + + for key in ["status", + "unremovable", + "sw_version", + "summary", + "description", + "install_instructions", + "warnings"]: + value = root.findtext(key) + if value is not None: + self.metadata[patch_id][key] = value + + # Default reboot_required to Y + rr_value = root.findtext("reboot_required") + if rr_value is None or rr_value != "N": + self.metadata[patch_id]["reboot_required"] = "Y" + else: + self.metadata[patch_id]["reboot_required"] = "N" + + patch_sw_version = self.metadata[patch_id]["sw_version"] + global package_dir + if patch_sw_version not in package_dir: + package_dir[patch_sw_version] = "%s/%s" % (root_package_dir, patch_sw_version) + repo_dir[patch_sw_version] = "%s/rel-%s" % (repo_root_dir, patch_sw_version) + + # Specifying personality for given packages is optional, + # intended to allow a patch to include a new package. + # For each configured personality type, create a software group. + for personality in root.findall("personality"): + ptype = personality.attrib["type"] + tag = "personality-%s" % ptype + self.metadata[patch_id][tag] = list() + for pkg in personality.findall("package"): + self.metadata[patch_id][tag].append(pkg.text) + if patch_sw_version not in self.groups: + self.groups[patch_sw_version] = {} + if ptype not in self.groups[patch_sw_version]: + self.groups[patch_sw_version][ptype] = {} + if patch_id not in self.groups[patch_sw_version][ptype]: + self.groups[patch_sw_version][ptype][patch_id] = {} + self.groups[patch_sw_version][ptype][patch_id][pkg.text] = True + + self.metadata[patch_id]["requires"] = [] + for req in root.findall("requires"): + for req_patch in req.findall("req_patch_id"): + self.metadata[patch_id]["requires"].append(req_patch.text) + + self.contents[patch_id] = list() + self.content_versions[patch_id] = {} + + for content in root.findall("contents"): + for rpmname in content.findall("rpm"): + try: + pkgname, arch, pkgver = parse_rpm_filename(rpmname.text) + except ValueError as e: + LOG.exception(e) + return None + + self.contents[patch_id].append(rpmname.text) + self.content_versions[patch_id][pkgname] = "%s-%s" % (pkgver.version, pkgver.release) + + if patch_sw_version not in self.package_versions: + self.package_versions[patch_sw_version] = {} + if pkgname not in self.package_versions[patch_sw_version]: + self.package_versions[patch_sw_version][pkgname] = {} + if arch not in self.package_versions[patch_sw_version][pkgname]: + self.package_versions[patch_sw_version][pkgname][arch] = {} + + self.package_versions[patch_sw_version][pkgname][arch][pkgver] = patch_id + + return patch_id + + def find_patch_with_pkgver(self, sw_ver, pkgname, arch, pkgver): + if sw_ver not in self.package_versions or \ + pkgname not in self.package_versions[sw_ver] or \ + arch not in self.package_versions[sw_ver][pkgname] or \ + pkgver not in self.package_versions[sw_ver][pkgname][arch]: + return None + + return self.package_versions[sw_ver][pkgname][arch][pkgver] + + def load_all_metadata(self, + loaddir=os.getcwd(), + repostate=None): + """ + Parse all metadata files in the specified dir + :return: + """ + for fname in glob.glob("%s/*.xml" % loaddir): + self.parse_metadata(fname, repostate) + + def load_all(self): + # Reset the data + self.__init__() + self.load_all_metadata(applied_dir, repostate=constants.APPLIED) + self.load_all_metadata(avail_dir, repostate=constants.AVAILABLE) + self.load_all_metadata(committed_dir, repostate=constants.COMMITTED) + + def gen_release_groups_xml(self, sw_version, dir=None): + """ + Generate the groups configuration file for the patching repo + """ + if dir is None: + dir = repo_dir[sw_version] + + if not os.path.exists(dir): + os.makedirs(dir) + + fname = "%s/comps.xml" % dir + top = ElementTree.Element('comps') + if sw_version in self.groups: + for groupname in sorted(self.groups[sw_version].keys()): + if self.groups[sw_version][groupname]: + group = ElementTree.SubElement(top, 'group') + + add_text_tag_to_xml(group, 'id', + "updates-%s" % groupname) + add_text_tag_to_xml(group, 'default', + "false") + add_text_tag_to_xml(group, 'uservisible', + "true") + add_text_tag_to_xml(group, 'display_order', + "1024") + add_text_tag_to_xml(group, 'name', + "updates-%s" % groupname) + add_text_tag_to_xml(group, 'description', + "Patches for %s" % groupname) + + package_element = ElementTree.SubElement(group, + 'packagelist') + + for patch_id in sorted(self.groups[sw_version][groupname]): + if self.metadata[patch_id]["repostate"] == constants.APPLIED \ + or self.metadata[patch_id]["repostate"] == constants.COMMITTED: + for pkg in sorted(self.groups[sw_version][groupname][patch_id]): + tag = ElementTree.SubElement(package_element, + 'packagereq', + type="mandatory") + tag.text = pkg + + write_xml_file(top, fname) + + def gen_groups_xml(self): + for ver, rdir in repo_dir.iteritems(): + self.gen_release_groups_xml(ver) + + def query_line(self, + patch_id, + index): + if index is None: + return None + + if index == "contents": + return self.contents[patch_id] + + if index not in self.metadata[patch_id]: + return None + + value = self.metadata[patch_id][index] + return value + + +class PatchMetadata: + """ + Creating metadata for a single patch + """ + def __init__(self): + self.id = None + self.sw_version = None + self.summary = None + self.description = None + self.install_instructions = None + self.warnings = None + self.status = None + self.unremovable = None + self.reboot_required = None + self.requires = [] + self.groups = {} + self.contents = {} + + def add_package(self, + groupname, + pkg): + """ + Add a package to a particular group + :param groupname: Yum software group, eg. "controller" + :param pkg: Name of the package + :return: + """ + if groupname not in self.groups: + self.groups[groupname] = {} + + self.groups[groupname][pkg] = True + + def add_rpm(self, + fname): + """ + Add an RPM to the patch + :param fname: RPM filename + :return: + """ + rpmname = os.path.basename(fname) + self.contents[rpmname] = True + + def gen_xml(self, + fname="metadata.xml"): + """ + Generate patch metadata XML file + :param fname: Path to output file + :return: + """ + top = ElementTree.Element('patch') + + add_text_tag_to_xml(top, 'id', + self.id) + add_text_tag_to_xml(top, 'sw_version', + self.sw_version) + add_text_tag_to_xml(top, 'summary', + self.summary) + add_text_tag_to_xml(top, 'description', + self.description) + add_text_tag_to_xml(top, 'install_instructions', + self.install_instructions) + add_text_tag_to_xml(top, 'warnings', + self.warnings) + add_text_tag_to_xml(top, 'status', + self.status) + add_text_tag_to_xml(top, 'unremovable', + self.unremovable) + add_text_tag_to_xml(top, 'reboot_required', + self.reboot_required) + + for groupname in sorted(self.groups.keys()): + if self.groups[groupname]: + group = ElementTree.SubElement(top, + 'personality', + type=groupname) + + for pkg in sorted(self.groups[groupname]): + add_text_tag_to_xml(group, 'package', pkg) + + content = ElementTree.SubElement(top, 'contents') + for rpmname in sorted(self.contents.keys()): + add_text_tag_to_xml(content, 'rpm', rpmname) + + req = ElementTree.SubElement(top, 'requires') + for req_patch in sorted(self.requires): + add_text_tag_to_xml(req, 'req_patch_id', req_patch) + + write_xml_file(top, fname) + + +class PatchFile: + """ + Patch file + """ + def __init__(self): + self.meta = PatchMetadata() + self.rpmlist = {} + + def add_rpm(self, + fname, + personality=None): + """ + Add an RPM to the patch + :param fname: Path to RPM + :param personality: Optional: Node type to which + the package belongs. Can be a + string or a list of strings. + :return: + """ + # Add the RPM to the metadata + self.meta.add_rpm(fname) + + # Add the RPM to the patch + self.rpmlist[os.path.abspath(fname)] = True + + if personality is not None: + # Get the package name from the RPM itself, + # and add it to the appropriate group(s) + pkgname = subprocess.check_output(["rpm", + "-qp", + "--queryformat", + "%{NAME}", + "--nosignature", + fname]) + if isinstance(personality, list): + for p in personality: + self.meta.add_package(p, pkgname) + elif isinstance(personality, str): + self.meta.add_package(personality, pkgname) + + def gen_patch(self, + outdir=os.getcwd()): + """ + Generate the patch file, named PATCHID.patch + :param outdir: Output directory for the patch + :return: + """ + if self.meta.sw_version is None or self.meta.sw_version == '': + raise MetadataFail("The release version must be specified in the sw_version field") + + patchfile = "%s/%s.patch" % (outdir, self.meta.id) + + # Create a temporary working directory + tmpdir = tempfile.mkdtemp(prefix="patch_") + + # Save the current directory, so we can chdir back after + orig_wd = os.getcwd() + + # Change to the tmpdir + os.chdir(tmpdir) + + # Copy RPM files to tmpdir + for rpmfile in self.rpmlist.keys(): + shutil.copy(rpmfile, tmpdir) + + # add file signatures to RPMs + try: + subprocess.check_call(["sign-rpms", "-d", tmpdir]) + except subprocess.CalledProcessError as e: + print "Failed to to add file signatures to RPMs. Call to sign-rpms process returned non-zero exit status %i" % e.returncode + os.chdir(orig_wd) + shutil.rmtree(tmpdir) + raise SystemExit(e.returncode) + + # generate tar file + tar = tarfile.open("software.tar", "w") + for rpmfile in self.rpmlist.keys(): + tar.add(os.path.basename(rpmfile)) + tar.close() + + # Generate the metadata xml file + self.meta.gen_xml("metadata.xml") + + # assemble the patch + PatchFile.write_patch(patchfile) + + # Change back to original working dir + os.chdir(orig_wd) + + shutil.rmtree(tmpdir) + + print "Patch is %s" % patchfile + + @staticmethod + def write_patch(patchfile): + # SAL: Write the patch file. Assumes we are in a directory containing metadata.tar, and software.tar + + # Generate the metadata tarfile + tar = tarfile.open("metadata.tar", "w") + tar.add("metadata.xml") + tar.close() + + # Generate the signature file + md_md5 = get_md5("metadata.tar") + sw_md5 = get_md5("software.tar") + ff = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + sig = (md_md5 ^ sw_md5) ^ ff + sigfile = open("signature", "w") + sigfile.write("%x" % sig) + sigfile.close() + + # Generate the detached signature + sign_files(['metadata.tar', 'software.tar'], + detached_signature_file) + + # Create the patch + tar = tarfile.open(patchfile, "w:gz") + tar.add("metadata.tar") + tar.add("software.tar") + tar.add("signature") + tar.add(detached_signature_file) + tar.close() + + @staticmethod + def read_patch(path, metadata_only=False): + # We want to enable signature checking by default + verify_signature = True + + # SAL: Open the patch file and extract the contents to the current dir + tar = tarfile.open(path, "r:gz") + tar.extract("metadata.tar") + tar.extract("software.tar") + tar.extract("signature") + try: + tar.extract(detached_signature_file) + except KeyError: + msg = "Patch has not been signed" + LOG.warning(msg) + + # Verify the data integrity signature first + sigfile = open("signature", "r") + sig = int(sigfile.read(), 16) + sigfile.close() + + md_md5 = get_md5("metadata.tar") + sw_md5 = get_md5("software.tar") + ff = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + expected_sig = (md_md5 ^ sw_md5) ^ ff + + if sig != expected_sig: + msg = "Patch failed verification" + LOG.error(msg) + raise PatchValidationFailure(msg) + + # Lookahead into metadata to get patch sw_version -- if older than + # 18.03, don't do the check + tar = tarfile.open("metadata.tar") + tar.extractall() + xml_root = ElementTree.parse('metadata.xml').getroot() + sw_version = xml_root.find('sw_version') + if str(sw_version) == '17.06': + verify_signature = False + + # Clean up lookahead + os.unlink('metadata.xml') + + if verify_signature: + # If there should be a detached signature, verify it + if os.path.exists(detached_signature_file): + filenames=["metadata.tar", "software.tar"] + sig_valid = verify_files( + filenames, + detached_signature_file) + if sig_valid is True: + msg = "Signature verified, patch has been signed" + LOG.info(msg) + else: + msg = "Signature check failed" + LOG.error(msg) + raise PatchValidationFailure(msg) + else: + msg = "Patch has not been signed" + LOG.error(msg) + raise PatchValidationFailure(msg) + + tar = tarfile.open("metadata.tar") + tar.extractall() + + if not metadata_only: + tar = tarfile.open("software.tar") + tar.extractall() + + @staticmethod + def query_patch(patch, field=None): + + abs_patch = os.path.abspath(patch) + + # Create a temporary working directory + tmpdir = tempfile.mkdtemp(prefix="patch_") + + # Save the current directory, so we can chdir back after + orig_wd = os.getcwd() + + # Change to the tmpdir + os.chdir(tmpdir) + + r = {} + + try: + PatchFile.read_patch(abs_patch, metadata_only=True) + thispatch = PatchData() + patch_id = thispatch.parse_metadata("metadata.xml") + r["id"] = patch_id + if field is None: + for f in ["status", "unremovable", "summary", + "description", "install_instructions", + "warnings", "reboot_required"]: + r[f] = thispatch.query_line(patch_id, f) + else: + r[field] = thispatch.query_line(patch_id, field) + + except PatchValidationFailure as e: + msg = "Patch validation failed during extraction" + LOG.exception(msg) + raise e + except PatchMismatchFailure as e: + msg = "Patch Mismatch during extraction" + LOG.exception(msg) + raise e + except tarfile.TarError: + msg = "Failed during patch extraction" + LOG.exception(msg) + raise PatchValidationFailure(msg) + finally: + # Change back to original working dir + os.chdir(orig_wd) + shutil.rmtree(tmpdir) + return r + + @staticmethod + def modify_patch(patch, + key, + value): + + abs_patch = os.path.abspath(patch) + new_abs_patch = "%s.new" % abs_patch + + # Create a temporary working directory + tmpdir = tempfile.mkdtemp(prefix="patch_") + + # Save the current directory, so we can chdir back after + orig_wd = os.getcwd() + + # Change to the tmpdir + os.chdir(tmpdir) + + try: + PatchFile.read_patch(abs_patch, metadata_only=True) + PatchData.modify_metadata_text("metadata.xml", key, value) + PatchFile.write_patch(new_abs_patch) + os.rename(new_abs_patch, abs_patch) + + except PatchValidationFailure as e: + raise e + except PatchMismatchFailure as e: + raise e + except tarfile.TarError: + msg = "Failed during patch extraction" + LOG.exception(msg) + raise PatchValidationFailure(msg) + finally: + # Change back to original working dir + os.chdir(orig_wd) + shutil.rmtree(tmpdir) + return + + @staticmethod + def extract_patch(patch, + metadata_dir=avail_dir, + metadata_only=False, + existing_content=None, + allpatches=None, + base_pkgdata=None): + """ + Extract the metadata and patch contents + :param patch: Patch file + :param metadata_dir: Directory to store the metadata XML file + :return: + """ + thispatch = None + + abs_patch = os.path.abspath(patch) + abs_metadata_dir = os.path.abspath(metadata_dir) + + # Create a temporary working directory + tmpdir = tempfile.mkdtemp(prefix="patch_") + + # Save the current directory, so we can chdir back after + orig_wd = os.getcwd() + + # Change to the tmpdir + os.chdir(tmpdir) + + try: + # Open the patch file and extract the contents to the tmpdir + PatchFile.read_patch(abs_patch, metadata_only) + + thispatch = PatchData() + patch_id = thispatch.parse_metadata("metadata.xml") + + if patch_id is None: + print "Failed to import patch" + # Change back to original working dir + os.chdir(orig_wd) + shutil.rmtree(tmpdir) + return None + + if not metadata_only and base_pkgdata is not None: + # Run version validation tests first + patch_sw_version = thispatch.metadata[patch_id]["sw_version"] + if not base_pkgdata.check_release(patch_sw_version): + msg = "Patch %s software release (%s) is not installed" % (patch_id, patch_sw_version) + LOG.exception(msg) + raise PatchValidationFailure(msg) + + for rpmname in thispatch.contents[patch_id]: + pkgname, arch, pkgver = parse_rpm_filename(rpmname) + base_pkgver = base_pkgdata.find_version(patch_sw_version, pkgname, arch) + if base_pkgver is not None: + # Compare the patch RPM's version against the base + if pkgver <= base_pkgver: + msg = "RPM %s in patch %s must be higher version than original (%s)" % \ + (rpmname, patch_id, base_pkgver) + LOG.exception(msg) + raise PatchValidationFailure(msg) + + if allpatches is not None: + # Compare the patch RPM's version against other patches + other = allpatches.find_patch_with_pkgver(patch_sw_version, pkgname, arch, pkgver) + if other is not None: + msg = "Patch %s contains rpm %s, which is already provided by patch %s" % \ + (patch_id, rpmname, other) + LOG.exception(msg) + raise PatchValidationFailure(msg) + + if metadata_only: + # This is a re-import. Ensure the content lines up + if existing_content is None \ + or len(existing_content) != len(thispatch.contents[patch_id]): + msg = "Contents of re-imported patch do not match" + LOG.exception(msg) + raise PatchMismatchFailure(msg) + for rpmname in existing_content: + if rpmname not in thispatch.contents[patch_id]: + msg = "Contents of re-imported patch do not match" + LOG.exception(msg) + raise PatchMismatchFailure(msg) + + shutil.move("metadata.xml", + "%s/%s-metadata.xml" % (abs_metadata_dir, patch_id)) + + if not metadata_only: + for rpmname in thispatch.contents[patch_id]: + patch_sw_version = thispatch.metadata[patch_id]["sw_version"] + rpm_dir = package_dir[patch_sw_version] + if not os.path.exists(rpm_dir): + os.makedirs(rpm_dir) + shutil.move(rpmname, "%s/" % rpm_dir) + except PatchValidationFailure as e: + raise e + except PatchMismatchFailure as e: + raise e + except tarfile.TarError: + msg = "Failed during patch extraction" + LOG.exception(msg) + raise PatchValidationFailure(msg) + except KeyError: + msg = "Failed during patch extraction" + LOG.exception(msg) + raise PatchValidationFailure(msg) + except OSError: + msg = "Failed during patch extraction" + LOG.exception(msg) + raise PatchFail(msg) + except IOError: + msg = "Failed during patch extraction" + LOG.exception(msg) + raise PatchFail(msg) + finally: + # Change back to original working dir + os.chdir(orig_wd) + shutil.rmtree(tmpdir) + + return thispatch + + +def patch_build(): + configure_logging(logtofile=False) + + try: + opts, remainder = getopt.getopt(sys.argv[1:], + '', + ['id=', + 'release=', + 'summary=', + 'status=', + 'unremovable', + 'reboot-required=', + 'desc=', + 'warn=', + 'inst=', + 'req=', + 'controller=', + 'controller-compute=', + 'controller-compute-lowlatency=', + 'compute=', + 'compute-lowlatency=', + 'storage=', + 'all-nodes=']) + except getopt.GetoptError: + print "Usage: %s [ ] ... " \ + % os.path.basename(sys.argv[0]) + print "Options:" + print "\t--id Patch ID" + print "\t--release Platform release version" + print "\t--status Patch Status Code (ie. O, R, V)" + print "\t--unremovable Marks patch as unremovable" + print "\t--reboot-required Marks patch as reboot-required (default=Y)" + print "\t--summary Patch Summary" + print "\t--desc Patch Description" + print "\t--warn Patch Warnings" + print "\t--inst Patch Install Instructions" + print "\t--req Required Patch" + print "\t--controller New package for controller" + print "\t--compute New package for compute node" + print "\t--compute-lowlatency New package for compute-lowlatency node" + print "\t--storage New package for storage node" + print "\t--controller-compute New package for combined node" + print "\t--controller-compute-lowlatency New package for lowlatency combined node" + print "\t--all-nodes New package for all node types" + exit(1) + + pf = PatchFile() + + # Default the release + pf.meta.sw_version = os.environ['PLATFORM_RELEASE'] + + for opt, arg in opts: + if opt == "--id": + pf.meta.id = arg + elif opt == "--release": + pf.meta.sw_version = arg + elif opt == "--summary": + pf.meta.summary = arg + elif opt == "--status": + pf.meta.status = arg + elif opt == "--unremovable": + pf.meta.unremovable = "Y" + elif opt == "--reboot-required": + if arg != "Y" and arg != "N": + print "The --reboot-required option requires either Y or N as argument." + exit(1) + pf.meta.reboot_required = arg + elif opt == "--desc": + pf.meta.description = arg + elif opt == "--warn": + pf.meta.warnings = arg + elif opt == "--inst": + pf.meta.install_instructions = arg + elif opt == "--req": + pf.meta.requires.append(arg) + elif opt == "--all-nodes": + for p in ("controller", + "compute", + "compute-lowlatency", + "storage", + "controller-compute", + "controller-compute-lowlatency"): + pf.add_rpm(arg, personality=p) + elif opt in ("--controller", + "--compute", + "--compute-lowlatency", + "--storage", + "--controller-compute", + "--controller-compute-lowlatency"): + pf.add_rpm(arg, personality=opt[2:]) + + if pf.meta.id is None: + print "The --id argument is mandatory." + exit(1) + + for rpmfile in remainder: + pf.add_rpm(rpmfile) + + pf.gen_patch() diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/patch_signing.py b/cgcs-patch/cgcs-patch/cgcs_patch/patch_signing.py new file mode 100644 index 00000000..8334767d --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/patch_signing.py @@ -0,0 +1,66 @@ +""" +Copyright (c) 2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import os +from Crypto.Signature import PKCS1_PSS +from Crypto.Hash import SHA256 +from Crypto.PublicKey import RSA +from Crypto.Util.asn1 import DerSequence +from binascii import a2b_base64 +from cgcs_patch.patch_verify import read_RSA_key + +# To save memory, read and hash 1M of files at a time +default_blocksize=1*1024*1024 + +# When we sign patches, look for private keys in the following paths +# +# The (currently hardcoded) path on the signing server will be replaced +# by the capability to specify filename from calling function. +private_key_files=['/signing/keys/formal-private-key.pem', + os.path.expandvars('$MY_REPO/build-tools/signing/dev-private-key.pem') + ] + + +def sign_files(filenames, signature_file, private_key=None): + """ + Utility function for signing data in files. + :param filenames: A list of files containing the data to be signed + :param signature_file: The name of the file to which the signature will be + stored + :param private_key: If specified, sign with this private key. Otherwise, + the files in private_key_files will be searched for + and used, if found. + """ + + # Hash the data across all files + blocksize=default_blocksize + data_hash = SHA256.new() + for filename in filenames: + with open(filename, 'rb') as infile: + data=infile.read(blocksize) + while len(data) > 0: + data_hash.update(data) + data=infile.read(blocksize) + + # Find a private key to use, if not already provided + if private_key is None: + for filename in private_key_files: + # print 'Checking to see if ' + filename + ' exists\n' + if os.path.exists(filename): + # print 'Getting private key from ' + filename + '\n' + private_key = read_RSA_key(open(filename, 'rb').read()) + + assert (private_key is not None),"Could not find private signing key" + + # Encrypt the hash (sign the data) with the key we find + signer = PKCS1_PSS.new(private_key) + signature = signer.sign(data_hash) + + # Save it + with open(signature_file, 'wb') as outfile: + outfile.write(signature) + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/patch_verify.py b/cgcs-patch/cgcs-patch/cgcs_patch/patch_verify.py new file mode 100644 index 00000000..9c4939ed --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/patch_verify.py @@ -0,0 +1,147 @@ +""" +Copyright (c) 2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import os +import logging + +from Crypto.Signature import PKCS1_v1_5 +from Crypto.Signature import PKCS1_PSS +from Crypto.Hash import SHA256 +from Crypto.PublicKey import RSA +from Crypto.Util.asn1 import DerSequence +from binascii import a2b_base64 + +from cgcs_patch.certificates import dev_certificate, formal_certificate + +# To save memory, read and hash 1M of files at a time +default_blocksize=1*1024*1024 + +dev_certificate_marker='/etc/pki/wrs/dev_certificate_enable.bin' +LOG = logging.getLogger('main_logger') + + +def verify_hash(data_hash, signature_bytes, certificate_list): + """ + Checkes that a hash's signature can be validates against an approved + certificate + :param data_hash: A hash of the data to be validated + :param signature_bytes: A pre-generated signature (typically, the hash + encrypted with a private key) + :param certifcate_list: A list of approved certificates or public keys + which the signature is validated against + :return: True if the signature was validated against a certificate + """ + verified = False + for cert in certificate_list: + if verified: + break + pub_key = read_RSA_key(cert) + x = pub_key.exportKey() + + # PSS is the recommended signature scheme, but some tools (like OpenSSL) + # use the older v1_5 scheme. We try to validate against both. + # + # We use PSS for patch validation, but use v1_5 for ISO validation + # since we want to generate detached sigs that a customer can validate + # OpenSSL + verifier = PKCS1_PSS.new(pub_key) + verified = verifier.verify(data_hash, signature_bytes) + if not verified: + verifier = PKCS1_v1_5.new(pub_key) + verified = verifier.verify(data_hash, signature_bytes) + return verified + + +def get_public_certificates(): + """ + Builds a list of accepted certificates which can be used to validate + further things. This list may contain multiple certificates depending on + the configuration of the system (for instance, should we include the + developer certificate in the list). + :return: A list of certificates in PEM format + """ + cert_list = [formal_certificate] + + # We enable the dev certificate based on the presence of a file. This file + # contains a hash of an arbitrary string ('Titanum patching') which has been + # encrypted with our formal private key. If the file is present (and valid) + # then we add the developer key to the approved certificates list + if os.path.exists(dev_certificate_marker): + with open(dev_certificate_marker) as infile: + signature = infile.read() + data_hash = SHA256.new() + data_hash.update('Titanium patching') + if verify_hash(data_hash, signature, cert_list): + cert_list.append(dev_certificate) + else: + msg = "Invalid data found in " + dev_certificate_marker + LOG.error(msg) + + return cert_list + + +def read_RSA_key(key_data): + """ + Utility function for reading an RSA key half from encoded data + :param key_data: PEM data containing raw key or X.509 certificate + :return: An RSA key object + """ + try: + # Handle data that is just a raw key + key = RSA.importKey(key_data) + except ValueError: + # The RSA.importKey function cannot read X.509 certificates directly + # (depending on the version of the Crypto library). Instead, we + # may need to extract the key from the certificate before building + # the key object + # + # We need to strip the BEGIN and END lines from PEM first + x509lines = key_data.replace(' ','').split() + x509text = ''.join(x509lines[1:-1]) + x509data = DerSequence() + x509data.decode(a2b_base64(x509text)) + + # X.509 contains a few parts. The first part (index 0) is the + # certificate itself, (TBS or "to be signed" cert) and the 7th field + # of that cert is subjectPublicKeyInfo, which can be imported. + # RFC3280 + tbsCert = DerSequence() + tbsCert.decode(x509data[0]) + + # Initialize RSA key from the subjectPublicKeyInfo field + key = RSA.importKey(tbsCert[6]) + return key + + +def verify_files(filenames, signature_file): + """ + Verify data files against a detached signature. + :param filenames: A list of files containing the data which was signed + :param public_key_file: A file containing the public key or certificate + corresponding to the key which signed the data + :param signature_file: The name of the file containing the signature + :return: True if the signature was verified, False otherwise + """ + + # Hash the data across all files + blocksize=default_blocksize + data_hash = SHA256.new() + for filename in filenames: + with open(filename, 'rb') as infile: + data=infile.read(blocksize) + while len(data) > 0: + data_hash.update(data) + data=infile.read(blocksize) + + # Get the signature + with open(signature_file, 'rb') as sig_file: + signature_bytes = sig_file.read() + + # Verify the signature + certificate_list = get_public_certificates() + return verify_hash(data_hash, signature_bytes, certificate_list) + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/templates/query.html b/cgcs-patch/cgcs-patch/cgcs_patch/templates/query.html new file mode 100644 index 00000000..e002287e --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/templates/query.html @@ -0,0 +1,92 @@ + +
+% if not pd is UNDEFINED and len(pd) > 0: + + + + + + + % for patch_id in sorted(pd.keys()): + ${patchrow(patch_id)} + % endfor +
Patch IDPatch Data
+% endif + +% if not info is UNDEFINED and len(info) > 0: +

${info}

+% endif + +% if not warning is UNDEFINED and len(warning) > 0: +

Warning:
${warning}

+% endif + +% if not error is UNDEFINED and len(error) > 0: +

Error:
${error}

+% endif + +

+Show all
+Show applied
+Show available
+Query Hosts
+ +

+
+ + +
+ +<%def name="patchrow(patch_id)"> + <% + p = pd[patch_id] + %> + + ${patch_id} + + + % if p["repostate"] != "": + + % endif + % if p["patchstate"] != "": + + % endif + % if p["status"] != "": + + % endif + % if p["unremovable" != ""]: + + % endif + % if p["reboot_required" != ""]: + + % endif + % if p["summary"] != "": + + % endif + % if p["description"] != "": + + % endif + % if p["install_instructions"] != "": + + % endif + % if p["warnings"] != "": + + % endif + % if p["repostate"] == "Applied": + + + + + % endif + % if p["repostate"] == "Available": + + + + + % endif +
Repo State:${p["repostate"]}
Patch State:${p["patchstate"]}
Status:${p["status"]}
Unremovable:${p["unremovable"]}
Reboot-Required:${p["reboot_required"]}
Summary:${p["summary"]}
Description:${p["description"]}
Install Instructions:${p["install_instructions"]}
Warnings:${p["warnings"]}
Actions:Remove
Actions:Apply
+ Delete
+ + + + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/templates/query.xml b/cgcs-patch/cgcs-patch/cgcs_patch/templates/query.xml new file mode 100755 index 00000000..257d723d --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/templates/query.xml @@ -0,0 +1,95 @@ +% if not pd is UNDEFINED: + + % if len(pd) > 0: + % for patch_id in sorted(pd.keys()): +${patchelem(patch_id)} + % endfor + % endif + +% endif +% if not info is UNDEFINED or not warning is UNDEFINED or not error is UNDEFINED: + +% if not info is UNDEFINED and len(info) > 0: +${info} +% endif + + +% if not warning is UNDEFINED and len(warning) > 0: +${warning} +% endif + + +% if not error is UNDEFINED and len(error) > 0: +${error} +% endif + +% endif +<%def name="patchelem(patch_id)">\ +<%p = pd[patch_id] %>\ + + + ${patch_id} + + + % if p["status"] != "": + ${p["status"]} + % endif + + + % if p["sw_version"] != "": + ${p["sw_version"]} + % endif + + + % if p["repostate"] != "": + ${p["repostate"]} + % endif + + + % if p["patchstate"] != "": + ${p["patchstate"]} + % endif + + + % if p["status"] != "": + ${p["status"]} + % endif + + + % if p["unremovable"] != "": + ${p["unremovable"]} + % endif + + + % if p["reboot_required"] != "": + ${p["reboot_required"]} + % endif + + + % if p["summary"] != "": + ${p["summary"]} + % endif + + + % if p["description"] != "": + ${p["description"]} + % endif + + + % if p["install_instructions"] != "": + ${p["install_instructions"]} + % endif + + + % if p["warnings"] != "": + ${p["warnings"]} + % endif + + + % if "requires" in p and len(p["requires"]) > 0: + % for req in sorted(p["requires"]): + ${req} + % endfor + % endif + + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/templates/query_agents.html b/cgcs-patch/cgcs-patch/cgcs_patch/templates/query_agents.html new file mode 100644 index 00000000..25526d97 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/templates/query_agents.html @@ -0,0 +1,32 @@ + +
+ + + + + + + + + + % for agent in data: + ${agentrow(agent)} + % endfor +
HostnameIPPatch Patch?Requires RebootTime since last ack
+ +

+Show all
+Show applied
+Show available
+ + +<%def name="agentrow(agent)"> + + ${agent["ip"]} + ${agent["hostname"]} + ${agent["patch_current"]} + ${agent["requires_reboot"]} + ${agent["secs_since_ack"]} + + + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/templates/query_hosts.xml b/cgcs-patch/cgcs-patch/cgcs_patch/templates/query_hosts.xml new file mode 100755 index 00000000..a8957e77 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/templates/query_hosts.xml @@ -0,0 +1,75 @@ +% if not data is UNDEFINED and len(data) > 0: + + % for host in data: +${hostelem(host)} + % endfor + +% endif +<%def name="hostelem(host)">\ +<%h = host %>\ + + + % if h["hostname"] != "": + ${h["hostname"]} + % endif + + + % if h["requires_reboot"] != "": + ${h["requires_reboot"]} + % endif + + + % if h["nodetype"] != "": + ${h["nodetype"]} + % endif + + + % if h["ip"] != "": + ${h["ip"]} + % endif + + + % if "missing_pkgs" in h and len(h["missing_pkgs"]) > 0: + % for pkg in sorted(h["missing_pkgs"]): + ${pkg} + % endfor + % endif + + + % if "installed" in h and len(h["installed"]) > 0: + % for pkg in sorted(h["installed"]): + + ${pkg} + ${h["installed"][pkg]} + + % endfor + % endif + + + % if "to_remove" in h and len(h["to_remove"]) > 0: + % for pkg in sorted(h["to_remove"]): + ${pkg} + % endfor + % endif + + + % if h["secs_since_ack"] != "": + ${h["secs_since_ack"]} + % endif + + + % if h["patch_failed"] != "": + ${h["patch_failed"]} + % endif + + + % if h["stale_details"] != "": + ${h["stale_details"]} + % endif + + + % if h["patch_current"] != "": + ${h["patch_current"]} + % endif + + \ No newline at end of file diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/templates/show.html b/cgcs-patch/cgcs-patch/cgcs_patch/templates/show.html new file mode 100644 index 00000000..431c96f9 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/templates/show.html @@ -0,0 +1,83 @@ + +
+% if not metadata is UNDEFINED and len(metadata) > 0: + % for patch_id in sorted(metadata.keys()): + ${showpatch(patch_id)} + % endfor +% endif + +% if not info is UNDEFINED and len(info) > 0: +

${info}

+% endif + +% if not warning is UNDEFINED and len(warning) > 0: +

Warning:
${warning}

+% endif + +% if not error is UNDEFINED and len(error) > 0: +

Error:
${error}

+% endif + +

+Show all
+Show applied
+Show available
+Query Hosts
+ +

+
+ + +
+ +<%def name="showpatch(patch_id)"> + <% + p = metadata[patch_id] + %> +

${patch_id}

+ + % if p["repostate"] != "": + + % endif + % if p["patchstate"] != "": + + % endif + % if p["status"] != "": + + % endif + % if p["unremovable"] != "": + + % endif + % if p["reboot_required"] != "": + + % endif + % if p["summary"] != "": + + % endif + % if p["description"] != "": + + % endif + % if p["install_instructions"] != "": + + % endif + % if p["warnings"] != "": + + % endif + % if "requires" in p and len(p["requires"]) > 0: + + % endif + % if not contents is UNDEFINED and patch_id in contents: + + % endif + +
Repo State:${p["repostate"]}
Patch State:${p["patchstate"]}
Status:${p["status"]}
Unremovable:${p["unremovable"]}
Reboot-Required:${p["reboot_required"]}
Summary:${p["summary"]}
Description:${p["description"]}
Install Instructions:${p["install_instructions"]}
Warnings:${p["warnings"]}
Requires: + % for req in sorted(p["requires"]): + ${req}
+ % endfor +
Contents: + % for pkg in sorted(contents[patch_id]): + ${pkg}
+ % endfor +
+ + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/templates/show.xml b/cgcs-patch/cgcs-patch/cgcs_patch/templates/show.xml new file mode 100755 index 00000000..f2b58212 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/templates/show.xml @@ -0,0 +1,92 @@ + +% if not contents is UNDEFINED and len(contents) > 0: +% for patch_id in sorted(contents.keys()): + + % for pkg in sorted(contents[patch_id]): + ${pkg} + % endfor + +% endfor +% endif + + +% if not error is UNDEFINED and len(error) > 0: +${error} +% endif + + +% if not metadata is UNDEFINED and len(metadata) > 0: + % for patch_id in sorted(metadata.keys()): +${showpatch(patch_id)} + % endfor +% endif + +<%def name="showpatch(patch_id)">\ +<% p = metadata[patch_id] %>\ + + + ${patch_id} + + + % if p["status"] != "": + ${p["status"]} + % endif + + + % if p["unremovable"] != "": + ${p["unremovable"]} + % endif + + + % if p["reboot_required"] != "": + ${p["reboot_required"]} + % endif + + + % if p["sw_version"] != "": + ${p["sw_version"]} + % endif + + + % if p["repostate"] != "": + ${p["repostate"]} + % endif + + + % if p["patchstate"] != "": + ${p["patchstate"]} + % endif + + + % if p["status"] != "": + ${p["status"]} + % endif + + + % if p["summary"] != "": + ${p["summary"]} + % endif + + + % if p["description"] != "": + ${p["description"]} + % endif + + + % if p["install_instructions"] != "": + ${p["install_instructions"]} + % endif + + + % if p["warnings"] != "": + ${p["warnings"]} + % endif + + + % if "requires" in p and len(p["requires"]) > 0: + % for req in sorted(p["requires"]): + ${req} + % endfor + % endif + + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch/utils.py b/cgcs-patch/cgcs-patch/cgcs_patch/utils.py new file mode 100644 index 00000000..0b2d73ff --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch/utils.py @@ -0,0 +1,74 @@ +""" +Copyright (c) 2016-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +from netaddr import IPAddress +import cgcs_patch.constants as constants +import socket + +import ctypes +import ctypes.util + +libc = ctypes.CDLL(ctypes.util.find_library('c')) + + +def if_nametoindex(name): + return libc.if_nametoindex(name) + + +def gethostbyname(hostname): + """ gethostbyname with IPv6 support """ + try: + return socket.getaddrinfo(hostname, None)[0][4][0] + except: + return None + + +def get_management_version(): + """ Determine whether management is IPv4 or IPv6 """ + controller_ip_string = gethostbyname(constants.CONTROLLER_FLOATING_HOSTNAME) + if controller_ip_string: + controller_ip_address = IPAddress(controller_ip_string) + return controller_ip_address.version + else: + return constants.ADDRESS_VERSION_IPV4 + + +def get_management_family(): + ip_version = get_management_version() + if ip_version == constants.ADDRESS_VERSION_IPV6: + return socket.AF_INET6 + else: + return socket.AF_INET + + +def get_versioned_address_all(): + ip_version = get_management_version() + if ip_version == constants.ADDRESS_VERSION_IPV6: + return "::" + else: + return "0.0.0.0" + + +def ip_to_url(ip_address_string): + """ Add brackets if an IPv6 address """ + try: + ip_address = IPAddress(ip_address_string) + if ip_address.version == constants.ADDRESS_VERSION_IPV6: + return "[%s]" % ip_address_string + else: + return ip_address_string + except: + return ip_address_string + + +def ip_to_versioned_localhost(ip_address_string): + """ Add brackets if an IPv6 address """ + ip_address = IPAddress(ip_address_string) + if ip_address.version == constants.ADDRESS_VERSION_IPV6: + return "::1" + else: + return "localhost" diff --git a/cgcs-patch/cgcs-patch/cgcs_patch_id/README.txt b/cgcs-patch/cgcs-patch/cgcs_patch_id/README.txt new file mode 100644 index 00000000..44e31cc9 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch_id/README.txt @@ -0,0 +1,34 @@ +Intended to run on a single build server. Currently yow-cgts2-lx + +# On other build servers + mkdir -p /localdisk/designer/jenkins/bin + cp patch_id_allocator_client.py /localdisk/designer/jenkins/bin + + +# On the intended server: e.g. yow-cgts2-lx + mkdir -p /localdisk/designer/jenkins/bin + cp *py /localdisk/designer/jenkins/bin/ + mkdir -p /localdisk/designer/jenkins/patch_ids + sudo cp patch_id_allocator_server.conf /etc/init + sudo initctl reload-configuration + sudo start script + +# Change to a different server + edit patch_id_allocator_client.py + change the line ... + server = 'yow-cgts2-lx.wrs.com' + +# TODO: + Need to back up the /localdisk/designer/jenkins/patch_ids directory + +# Quick test + Point your browser at this url: + http://yow-cgts2-lx:8888/get_patch_id + + expected result is: + CGCS_None_PATCH_0000 + + on each reload of the page, the number increments: + CGCS_None_PATCH_0001 + CGCS_None_PATCH_0002 + .... diff --git a/cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator.py b/cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator.py new file mode 100755 index 00000000..8bb2e25d --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator.py @@ -0,0 +1,49 @@ +#!/usr/bin/python +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import posixfile +import string +import time + +directory="/localdisk/designer/jenkins/patch_ids" + +def get_unique_id(filename, digits=4): + counter = 1 + path = "%s/%s" % (directory, filename) + try: + # open for update + file = posixfile.open(path, "r+") + file.lock("w|", digits) + counter = int(file.read(digits)) + 1 + except IOError: + # create it + try: + file = posixfile.open(path, "w") + file.lock("w|", digits) + except IOError: + print "creation of file '%s' failed" % path + return -1 + + file.seek(0) # rewind + format = "%%0%dd" % digits + file.write(format % counter) + + # Note: close releases lock + file.close() + + return counter + +def get_patch_id(version, prefix="CGCS", digits=4): + filename = "%s_%s_patchid" % (prefix, version) + id = get_unique_id(filename) + if id < 0: + return None + patch_id_format = "%%s_%%s_PATCH_%%0%dd" % digits + patch_id = patch_id_format % (prefix, version, id) + return patch_id + diff --git a/cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_client.py b/cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_client.py new file mode 100755 index 00000000..8a4aa2cf --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_client.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import urllib +import urllib2 +import getopt +import sys + + +opts = ['sw_version=', 'prefix=' ] + +server = 'yow-cgts2-lx.wrs.com' +port = 8888 + +def request_patch_id(sw_version="1.01", prefix="CGCS"): + raw_parms = {} + raw_parms['sw_version'] = sw_version + raw_parms['prefix'] = prefix + print "raw_parms = %s" % str(raw_parms) + + url = "http://%s:%d/get_patch_id" % (server, port) + params = urllib.urlencode(raw_parms) + response = urllib2.urlopen(url, params).read() + return response + +def main(): + optlist, remainder = getopt.getopt(sys.argv[1:], '', opts) + + sw_version = None + prefix = None + raw_parms = {} + + print "optlist = %s" % str(optlist) + print "remainder = %s" % str(remainder) + for key, val in optlist: + print "key = %s, val = %s" % (key, val) + if key == '--sw_version': + sw_version = val + print "sw_version = %s" % sw_version + raw_parms['sw_version'] = sw_version + + if key == '--prefix': + prefix = val + print "prefix = %s" % prefix + raw_parms['prefix'] = prefix + + # response = request_patch_id(sw_version=sw_version, prefix=prefix) + response = request_patch_id(**raw_parms) + print "response = %s" % str(response) + + +if __name__ == "__main__": + main() diff --git a/cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_server.conf b/cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_server.conf new file mode 100644 index 00000000..831e6cab --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_server.conf @@ -0,0 +1,16 @@ +# upstart script for patch_id_allocator_server +# +# Intallation +# sudo cp patch_id_allocator_server.conf /etc/init +# sudo initctl reload-configuration +# sudo start script + +description "patch_id service" +author "Scott Little " + +start on runlevel [234] +stop on runlevel [0156] + +chdir /tmp +exec /localdisk/designer/jenkins/bin/patch_id_allocator_server.py +respawn diff --git a/cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_server.py b/cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_server.py new file mode 100755 index 00000000..88499bf4 --- /dev/null +++ b/cgcs-patch/cgcs-patch/cgcs_patch_id/patch_id_allocator_server.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import os +import sys +import web +import patch_id_allocator as pida + + + +port = 8888 + +urls = ( + '/get_patch_id', 'get_patch_id', +) + +class get_patch_id: + def GET(self): + data = web.input(sw_version=None, prefix="CGCS") + output = pida.get_patch_id(data.sw_version, data.prefix) + return output + + def POST(self): + data = web.input(sw_version=None, prefix="CGCS") + output = pida.get_patch_id(data.sw_version, data.prefix) + return output + +class MyApplication(web.application): + def run(self, port=8080, *middleware): + func = self.wsgifunc(*middleware) + return web.httpserver.runsimple(func, ('0.0.0.0', port)) + +def main(): + app = MyApplication(urls, globals()) + app.run(port=port) + +if __name__ == "__main__": + main() + diff --git a/cgcs-patch/cgcs-patch/setup.py b/cgcs-patch/cgcs-patch/setup.py new file mode 100644 index 00000000..7926d599 --- /dev/null +++ b/cgcs-patch/cgcs-patch/setup.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python +# +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import setuptools + +setuptools.setup(name='cgcs_patch', + version='1.0', + description='CGCS Patch', + packages=setuptools.find_packages(), + package_data = { + # Include templates + '': ['templates/*'], + } +) + diff --git a/cgcs-patch/restart-info.html b/cgcs-patch/restart-info.html new file mode 100755 index 00000000..9254967e --- /dev/null +++ b/cgcs-patch/restart-info.html @@ -0,0 +1,712 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Process restart information +
Process/ServiceFunctionIn service patchableManaged byRestart commandPatch Restart commandRestart dependencyImpact(if restarted while in operation)Special handling required
ceilometer-pollingDaemon that polls Openstack services and build metersYPMON/etc/init.d/openstack-ceilometer-polling restartNAs batch_polled_samples is set to True, may lose some samples that + are in the pollsters memory if the process is restarted exactly + when they have just finished polling for samples and are about to + publish these samples to RabbitMQ. This is about 10 millisecond + window for cpu_source and 0.03 millisecond 1 second window for + meter related sources. + N
ceilometer-agent-notificationDaemon that listens to notifications on message queue, converts + them to Events and Samples and applies pipeline actions + YSMsm-restart-safe service ceilometer-agent-notification
+ which runs the following:

+ /bin/sh /usr/lib/ocf/resource.d/openstack/ceilometer-agent-notification stop
+ /bin/sh /usr/lib/ocf/resource.d/openstack/ceilometer-agent-notification start +
NMay lose some samples/events if the process is restarted while they + are being transformed or converted. + N
ceilometer-collectorDaemon that gathers and records event and metering data created by + notification and polling agents + YSMsm-restart-safe service ceilometer-collector
+ which runs the following:

+ /bin/sh /usr/lib/ocf/resource.d/openstack/ceilometer-collector stop
+ /bin/sh /usr/lib/ocf/resource.d/openstack/ceilometer-collector + start +
NMay lose some samples/events if the process is restarted while they + are being persisted in Postgres DB. This is a tiny window + especially with recent optimization work (no message signature + verification, one single call to create_sample stored proc).
+ Note: Making sure that child processes and their database + connections are released when a parent process is stopped is part + of collector functionality. It is not specific to in-service + patching. +
N
ceilometer-apiService to query and view data recorded by the collectorYSMsm-restart-safe service ceilometer-api
+ which runs the following:

+ /bin/sh /usr/lib/ocf/resource.d/openstack/ceilometer-api stop
+ /bin/sh /usr/lib/ocf/resource.d/openstack/ceilometer-api start +
NWhile the service is restarted, horizon or CLI ceilometer request + will fail. Horizon request will be re-established automatically in + its next polling interval. CLI command needs to be re-issued. + N
ceilometer-expirer-activeCron job that purges expired samples and events as well as related + meter and resource data + YCRONN/A

+ To run the expirer manually: /usr/bin/ceilometer-expirer-active +
NThere is no need to restart after patch. The change will take + effect next time the expirer cron job is run.
+ Unless there are new features specifically planned for expirer, + this code is very stable. +
N
haproxyA Proxy service that is responsible for forwarding external REST + API requests to Open Stack and Titanium Cloud services that listening on the + internal interfaces. + YSMsm-restart-safe service haproxy
+ which runs the following:

+ /bin/sh /etc/init.d/haproxy stop
+ /bin/sh /etc/init.d/haproxy start +
/usr/local/sbin/patch-restart-haproxyNWhile the service is restarted, the outstanding requests will fail + and new requests will get connection error until the service is + re-enabled. + Y
smService management daemonNPMON/etc/init.d/sm restartNWill cause all services disabled on the active controller before + the standby controller takes over the control. + N
sm-apiDaemon that provides sm apiNPMONNN
sm-eruDaemon that records sm eru dataNNN
sm-watchdogDaemon that loads NFS watchdog module to look for and handle + stalled NFS threads + NNN
neutron-serverService that manages network functionsYSMsm-restart-safe service neutron-server
+ which runs the following:

+ /bin/sh /usr/lib/ocf/resource.d/openstack/neutron-server stop
+ /bin/sh /usr/lib/ocf/resource.d/openstack/neutron-server start +
/bin/neutron-restart neutron-server
or
/bin/neutron-restart --all
NWill cause neutron services to not be available while restarting, + which will prevent instances from being created while it is down. + Could cause RPCs from computes to fail while it is restarting. + N
neutron-dhcp-agentAgent on compute node that manages DHCP servers for tenant + networks + YPMON/etc/init.d/neutron-dhcp-agent restart/bin/neutron-restart neutron-dhcp-agent
or
/bin/neutron-restart --all
NWill prevent binding new DHCP servers while it is down. Requires + special handling to kill metadata haproxy processes for networks. + Y
neutron-metadata-agentAgent on compute node serving metadata to nodesYPMON/etc/init.d/neutron-metadata-agent restart/bin/neutron-restart neutron-metadata-agent
or
/bin/neutron-restart --all
NNodes will not be able to receive metadata while it is downN
neutron-sriov-nic-agentAgent on compute node responsible for setting SR-IOV port + information + YPMON/etc/init.d/neutron-sriov-nic-agent restart/bin/neutron-restart neutron-sriov-nic-agent
or
/bin/neutron-restart --all
NWill not be able to set device parameters while restartingN
neutron-bgp-dragentBGP dynamic routing agent on controller node + YPMON/etc/init.d/neutron-bgp-dragent restart/bin/neutron-restart neutron-bgp-dragent
or
/bin/neutron-restart --all
NWill not be able to set device parameters while restartingN
keystone-allKeystone provides services that support an identity, token + management, and service catalog and policy functionality. + YSMsm-restart-safe service keystone
+ which runs the following:

+ /bin/sh /usr/lib/ocf/resource.d/openstack/keystone stop
+ /bin/sh /usr/lib/ocf/resource.d/openstack/keystone start +
/usr/local/sbin/patch-restart-processes keystone-allNWhile the service is restarted, the outstanding requests will fail + and new requests will get connection error until the service is + re-enabled. + N
aodh-apiAodh service that handles API requests for openstack alarming.YSMsm-restart-safe service aodh-api
+ which runs the following:

+ /bin/sh /usr/lib/ocf/resource.d/openstack/aodh-api stop
+ /bin/sh /usr/lib/ocf/resource.d/openstack/aodh-api start +
NWhile the service is restarted, the outstanding requests will fail + and new requests will get connection error until the service is + re-enabled. + N
aodh-evaluatorAodh service that performs threshold evaluation for openstack + alarming. + YSMsm-restart-safe service aodh-evaluator
+ which runs the following:

+ /bin/sh /usr/lib/ocf/resource.d/openstack/aodh-evaluator stop
+ /bin/sh /usr/lib/ocf/resource.d/openstack/aodh-evaluator start +
NWhile the service is restarted no openstack alarm threshold + evaluations will be executed until the service is re-enabled. + N
aodh-listenerAodh service that generates alarms based on events.YSMsm-restart-safe service aodh-listener
+ which runs the following:

+ /bin/sh /usr/lib/ocf/resource.d/openstack/aodh-listener stop
+ /bin/sh /usr/lib/ocf/resource.d/openstack/aodh-listener start +
NWhile the service is restarted no openstack event based alarms will + be generated until the service is re-enabled. + N
aodh-notifierAodh service that sends openstack alarm notifications.YSMsm-restart-safe service aodh-notifier
+ which runs the following:

+ /bin/sh /usr/lib/ocf/resource.d/openstack/aodh-notifier stop
+ /bin/sh /usr/lib/ocf/resource.d/openstack/aodh-notifier start +
NWhile the service is restarted no openstack alarm threshold + notifications will be issued until the service is re-enabled. + N
aodh-expirer-activeCron job that purges expired openstack alarmsYCRONN/A

+ To run the expirer manually: /usr/bin/aodh-expirer-active +
NThere is no need to restart after patch. The change will take + effect next time the expirer cron job is run.
+ Unless there are new features specifically planned for expirer, + this code is very stable. +
N
heat-apiHeat service for API requests for openstack orchestration.YSMsm-restart-safe service heat-api
+ which runs the following:

+ /bin/sh /usr/lib/ocf/resource.d/openstack/heat-api stop
+ /bin/sh /usr/lib/ocf/resource.d/openstack/heat-api start +
NWhile the service is restarted, horizon or CLI heat requests will + fail. Horizon will re-established automatically. CLI commands needs + to be re-issued. Heat stack updates in progress may fail. + N
heat-api-cfnHeat service for AWS Cloudformation API requests.YSMsm-restart-safe service heat-api-cfn
+ which runs the following:

+ /bin/sh /usr/lib/ocf/resource.d/openstack/heat-api-cfn stop
+ /bin/sh /usr/lib/ocf/resource.d/openstack/heat-api-cfn start +
NWhile the service is restarted, cloudformation API requests such as + autoscaling will not be processed. + N
heat-api-cloudwatchHeat service for AWS Cloudwatch metric collection.YSMsm-restart-safe service heat-api-cloudwatch
+ which runs the following:

+ /bin/sh /usr/lib/ocf/resource.d/openstack/heat-api-cloudwatch stop
+ /bin/sh /usr/lib/ocf/resource.d/openstack/heat-api-cloudwatch start +
NWhile the service is restarted, stats sent from VMs will not be + processed. + N
heat-engineHeat service for AWS Cloudwatch metric collection.YSMsm-restart-safe service heat-engine
+ which runs the following:

+ /bin/sh /usr/lib/ocf/resource.d/openstack/heat-engine stop
+ /bin/sh /usr/lib/ocf/resource.d/openstack/heat-engine start +
NWhile the service is restarted, openstrack heat orchestration + commands will not be processed. Stacks being created, deleted or + updated will fail and need to be re-initiated. + N
heat-purge-deleted-activeCron job that purges deleted openstack heat stacks from the + database + YCRONN/A

+ To run the expirer manually: /usr/bin/heat-purge-deleted-active +
NThere is no need to restart after patch. The change will take + effect next time the cron job is run.
+ Unless there are new features specifically planned, this code is + very stable. +
N
GlanceGlance imaging service - a single script restarts both glance-api + and glance-registry. + YSM/usr/bin/restart-glance
+
NWhile the service is restarted, the outstanding requests will + continue and new requests will get connection error until the + service is re-enabled. The graceful restart takes more than 30 + secs the process is killed. Timers are configurable from the + restart script + N
CinderCinder volume service - a single script restarts cinder-volume, + cinder-scheduler and cinder-api. + YSM/usr/bin/restart-cinder
+
NWhile the service is restarted, the outstanding requests will + continue and new requests will get connection error until the + service is re-enabled. Timers are configurable from the restart + script + N
HorizonHorizon - Openstack Dashboard GUI used to control openstack and Titanium Cloud + operations + YSMsm-restart service horizon
+
/usr/bin/horizon-patching-restartNWhen horizon is restarted via the patch restart command all users + will be logged out. If they try to log back in before the server is + up again they will see an internal server error. It usually takes + less than a minute for the service to restart + N
IO-MonitorDaemon which monitors cinder devices and raises alarms for excessive storage IO load.YPMONpmon-restart io-monitor-manager/usr/local/sbin/patch-restart-processes io-monitor-managerNGenerally there should be no impact. It is very unlikely for + the system to encounter an excessive storage IO load which will + only last a couple of seconds until the io-monitor process is restarted, + such that it will not be detected. + N
vimVirtual Infrastructure ManagerYSMsm-restart-safe service vimNWhile the service is restarting, requests through the VIM API or + through the Nova API Proxy will fail. Any instance actions normally + triggered due to instance state changes (from nova) will not occur + until the process starts up again and audits the instance states. + N
vim-apiVirtual Infrastructure Manager APIYSMsm-restart-safe service vim-apiNWhile the service is restarting, requests through the external VIM + API will fail. + N
vim-webserverVirtual Infrastructure Manager Web ServerYSMsm-restart-safe service vim-webserverNNo impact. This service is for design use only.N
nova-apiNova API ServiceYSMsm-restart-safe service nova-api
/bin/nova-restartNWhile the service is restarted, the outstanding requests will + fail and new requests will get connection error until the service + is re-enabled. + N
nova-placement-apiNova Placement API ServiceYSMsm-restart-safe service nova-placement-api
/bin/nova-restartNWhile the service is restarted, the outstanding requests will + fail and new requests will get connection error until the service + is re-enabled. + N
nova-conductorNova Conductor ServiceYSMsm-restart-safe service nova-conductor
/bin/nova-restartNWhile the service is restarted, the outstanding requests will + fail and new requests will get connection error until the service + is re-enabled. + N
nova-schedulerNova Scheduler ServiceYSMsm-restart-safe service nova-scheduler
/bin/nova-restartNWhile the service is restarted, the outstanding requests will + fail and new requests will get connection error until the service + is re-enabled. + N
nova-console-authNova Console Auth ServiceYSMsm-restart-safe service nova-console-auth
/bin/nova-restartNWhile the service is restarted, the outstanding requests will + fail and new requests will get connection error until the service + is re-enabled. + N
nova-novncNova VNC ServiceYSMsm-restart-safe service nova-novnc
/bin/nova-restartNWhile the service is restarted, the outstanding requests will + fail and new requests will get connection error until the service + is re-enabled. + N
nova-computeNova Compute ServiceYPMON/usr/local/sbin/pmon-restart nova-compute
/bin/nova-restartNWhile the services is restarted, the outstanding requests will + fail and new requests will get connection error until the service + is re-enabled. + N
ceph-osd & ceph-monCeph OSD and Monitor processesYPMON/etc/ceph/ceph_pmon_wrapper.sh restart
/etc/ceph/ceph_pmon_wrapper.sh restartNCeph processes on a node will restart (ceph-mon and ceph-osd). The restart + will take at most 30s and functionality should not be affected. Note that this + command should not be executed at the same time on storage-0 and any of the + controller nodes as we do not support restarting two of the three ceph-mon at + the same time. + Restarting it on controller-0, controller-1 & storage-0, + at the same time with glance, cinder, nova, ceph-rest-api, sysinv or ceph-manager + on the active controller should be avoided due to ~30 secs delay to ceph APIs. + This delay happens when any of the ceph-mon changes state and may cause timeouts + when dependent services restart. Recommendations: (1) On the active controller, + restart Ceph before the other service; (2) updating ctrl-0,ctrl-1 & storage-0 + at the same time should be avoided.
Process/ServiceFunctionIn service patchableManaged byRestart commandPatch Restart commandRestart dependencyImpact(if restarted while in operation)Special handling required
+ + diff --git a/mwa-chilon.map b/mwa-chilon.map new file mode 100644 index 00000000..03cc52b3 --- /dev/null +++ b/mwa-chilon.map @@ -0,0 +1,10 @@ +cgcs/middleware/sysinv/recipes-common|sysinv +cgcs/middleware/patching/recipes-common/cgcs-patch|cgcs-patch +cgcs/middleware/patching/recipes-common/patch-alarm|patch-alarm +cgcs/middleware/patching/recipes-common/patch-boot-args|patch-boot-args +cgcs/middleware/patching/recipes-common/patch-scripts|patch-scripts +cgcs/middleware/patching/recipes-common/requests-toolbelt|requests-toolbelt +cgcs/middleware/patching/recipes-common/smart-helper|smart-helper +cgcs/middleware/config/recipes-compute/computeconfig/computeconfig|computeconfig +cgcs/middleware/config/recipes-control/controllerconfig/controllerconfig|controllerconfig +cgcs/middleware/config/recipes-common/tsconfig|tsconfig diff --git a/patch-alarm/LICENSE b/patch-alarm/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/patch-alarm/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/patch-alarm/PKG-INFO b/patch-alarm/PKG-INFO new file mode 100644 index 00000000..308f416e --- /dev/null +++ b/patch-alarm/PKG-INFO @@ -0,0 +1,13 @@ +Metadata-Version: 1.1 +Name: patch-alarm +Version: 1.0 +Summary: Patch alarm management +Home-page: +Author: Windriver +Author-email: info@windriver.com +License: Apache-2.0 + +Description: Patch alarm management + + +Platform: UNKNOWN diff --git a/patch-alarm/centos/build_srpm b/patch-alarm/centos/build_srpm new file mode 100755 index 00000000..b45b2e29 --- /dev/null +++ b/patch-alarm/centos/build_srpm @@ -0,0 +1,95 @@ +source "$SRC_BASE/build-tools/spec-utils" + +if [ "x$DATA" == "x" ]; then + echo "ERROR: Environment variable 'DATA' not defined." + exit 1 +fi + +if [ ! -f "$DATA" ]; then + echo "ERROR: Couldn't find '$PWD/$DATA'" + exit 1 +fi + +unset TIS_PATCH_VER # Ensure there's nothing in the env already + +source $DATA + +if [ -z "$TIS_PATCH_VER" ]; then + echo "ERROR: TIS_PATCH_VER must be defined" + exit 1 +fi + +SRC_DIR="patch-alarm" +EXTRA_DIR="scripts" + +VERSION=$(grep '^Version:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//') +TAR_NAME=$(grep '^Name:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//') +CUR_DIR=`pwd` +BUILD_DIR="$RPMBUILD_BASE" + +mkdir -p $BUILD_DIR/SRPMS + +TAR="$TAR_NAME-$VERSION.tar.gz" +TAR_PATH="$BUILD_DIR/SOURCES/$TAR" + +TAR_NEEDED=0 +if [ -f $TAR_PATH ]; then + n=`find . -cnewer $TAR_PATH -and ! -path './.git*' \ + -and ! -path './build/*' \ + -and ! -path './.pc/*' \ + -and ! -path './patches/*' \ + -and ! -path "./$DISTRO/*" \ + -and ! -path './pbr-*.egg/*' \ + | wc -l` + if [ $n -gt 0 ]; then + TAR_NEEDED=1 + fi +else + TAR_NEEDED=1 +fi + +if [ $TAR_NEEDED -gt 0 ]; then + tar czvf $TAR_PATH $SRC_DIR $EXTRA_DIR \ + --exclude='.git*' \ + --exclude='build' \ + --exclude='.pc' \ + --exclude='patches' \ + --exclude="$DISTRO" \ + --exclude='pbr-*.egg' \ + --transform "s,^$SRC_DIR,$TAR_NAME-$VERSION," +fi + + +for SPEC in `ls $BUILD_DIR/SPECS`; do + SPEC_PATH="$BUILD_DIR/SPECS/$SPEC" + RELEASE=`spec_find_tag Release "$SPEC_PATH" 2>> /dev/null` + if [ $? -ne 0 ]; then + echo "ERROR: 'Release' not found in '$SPEC_PATH'" + fi + NAME=`spec_find_tag Name "$SPEC_PATH" 2>> /dev/null` + if [ $? -ne 0 ]; then + echo "ERROR: 'Name' not found in '$SPEC_PATH'" + fi + SRPM="$NAME-$VERSION-$RELEASE.src.rpm" + SRPM_PATH="$BUILD_DIR/SRPMS/$SRPM" + + BUILD_NEEDED=0 + if [ -f $SRPM_PATH ]; then + n=`find . -cnewer $SRPM_PATH | wc -l` + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + else + BUILD_NEEDED=1 + fi + + if [ $BUILD_NEEDED -gt 0 ]; then + echo "SPEC file: $SPEC_PATH" + echo "SRPM build directory: $BUILD_DIR" + echo "TIS_PATCH_VER: $TIS_PATCH_VER" + + sed -i -e "1 i%define tis_patch_ver $TIS_PATCH_VER" $SPEC_PATH + rpmbuild -bs $SPEC_PATH --define="%_topdir $BUILD_DIR" --define="_tis_dist .tis" + fi +done + diff --git a/patch-alarm/centos/build_srpm.data b/patch-alarm/centos/build_srpm.data new file mode 100644 index 00000000..70b4b5dc --- /dev/null +++ b/patch-alarm/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=2 diff --git a/patch-alarm/centos/patch-alarm.spec b/patch-alarm/centos/patch-alarm.spec new file mode 100644 index 00000000..1e31d10b --- /dev/null +++ b/patch-alarm/centos/patch-alarm.spec @@ -0,0 +1,55 @@ +Summary: Patch alarm management +Name: patch-alarm +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown +Source0: %{name}-%{version}.tar.gz + +%define debug_package %{nil} + +BuildRequires: python-setuptools +Requires: python-devel +Requires: /bin/bash + +%description +TIS Platform Patching + +%define pythonroot /usr/lib64/python2.7/site-packages + +%prep +%setup + +%build +%{__python} setup.py build + +%install +%{__python} setup.py install --root=$RPM_BUILD_ROOT \ + --install-lib=%{pythonroot} \ + --prefix=/usr \ + --install-data=/usr/share \ + --single-version-externally-managed + + install -m 755 -d %{buildroot}%{_bindir} + install -m 755 -d %{buildroot}%{_sysconfdir}/init.d + + install -m 700 ${RPM_BUILD_DIR}/scripts/bin/patch-alarm-manager \ + %{buildroot}%{_bindir}/patch-alarm-manager + + install -m 700 ${RPM_BUILD_DIR}/scripts/init.d/patch-alarm-manager \ + %{buildroot}%{_sysconfdir}/init.d/patch-alarm-manager + +%clean +rm -rf $RPM_BUILD_ROOT + + +%files +%defattr(-,root,root,-) +%doc LICENSE +%{pythonroot}/patch_alarm +%{pythonroot}/patch_alarm-*.egg-info +"%{_bindir}/patch-alarm-manager" +"%{_sysconfdir}/init.d/patch-alarm-manager" + diff --git a/patch-alarm/patch-alarm/LICENSE b/patch-alarm/patch-alarm/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/patch-alarm/patch-alarm/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/patch-alarm/patch-alarm/patch_alarm/__init__.py b/patch-alarm/patch-alarm/patch_alarm/__init__.py new file mode 100644 index 00000000..0da84c8c --- /dev/null +++ b/patch-alarm/patch-alarm/patch_alarm/__init__.py @@ -0,0 +1,6 @@ +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" diff --git a/patch-alarm/patch-alarm/patch_alarm/patch_alarm_manager.py b/patch-alarm/patch-alarm/patch_alarm/patch_alarm_manager.py new file mode 100644 index 00000000..3c18c556 --- /dev/null +++ b/patch-alarm/patch-alarm/patch_alarm/patch_alarm_manager.py @@ -0,0 +1,223 @@ +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +################### +# IMPORTS +################### +import logging +import time +import requests +import json +import os + +from daemon import runner +from fm_api import fm_api +from fm_api import constants as fm_constants + +import cgcs_patch.config as cfg +from cgcs_patch.patch_functions import configure_logging +from cgcs_patch.constants import ENABLE_DEV_CERTIFICATE_PATCH_IDENTIFIER + +################### +# CONSTANTS +################### +LOG_FILE = '/var/log/patch-alarms.log' +PID_FILE = '/var/run/patch-alarm-manager.pid' + +#logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG) + + +################### +# METHODS +################### +def start_polling(): + cfg.read_config() + patch_alarm_daemon = PatchAlarmDaemon() + alarm_runner = runner.DaemonRunner(patch_alarm_daemon) + alarm_runner.daemon_context.umask = 0o022 + alarm_runner.do_action() + + +################### +# CLASSES +################### +class PatchAlarmDaemon(): + """ Daemon process representation of + the patch monitoring program + """ + def __init__(self): + # Daemon-specific init + self.stdin_path = '/dev/null' + self.stdout_path = '/dev/null' + self.stderr_path = '/dev/null' + self.pidfile_path = PID_FILE + self.pidfile_timeout = 5 + + self.api_addr = "127.0.0.1:%d" % cfg.api_port + + self.fm_api = fm_api.FaultAPIs() + + def run(self): + configure_logging() + + requests_logger = logging.getLogger('requests') + requests_logger.setLevel(logging.CRITICAL) + + while True: + # start monitoring patch status + self.check_patch_alarms() + + # run/poll every 1 min + time.sleep(60) + + def check_patch_alarms(self): + self._handle_patch_alarms() + self._get_handle_failed_hosts() + + def _handle_patch_alarms(self): + url = "http://%s/patch/query" % self.api_addr + + try: + req = requests.get(url) + except: + return + + entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST, "controller") + + raise_pip_alarm = False + raise_obs_alarm = False + raise_cert_alarm = False + if req.status_code == 200: + data = json.loads(req.text) + + if 'pd' in data: + for patch_id, metadata in data['pd'].iteritems(): + if 'patchstate' in metadata and \ + (metadata['patchstate'] == 'Partial-Apply' or metadata['patchstate'] == 'Partial-Remove'): + raise_pip_alarm = True + if 'status' in metadata and \ + (metadata['status'] == 'OBS' or metadata['status'] == 'Obsolete'): + raise_obs_alarm = True + # If there is a patch in the system (in any state) that is + # named some variation of "enable-dev-certificate", raise + # the 'developer certificate could allow for untrusted + # patches' alarm + if ENABLE_DEV_CERTIFICATE_PATCH_IDENTIFIER in patch_id: + raise_cert_alarm = True + + pip_alarm = self.fm_api.get_fault(fm_constants.FM_ALARM_ID_PATCH_IN_PROGRESS, + entity_instance_id) + if raise_pip_alarm and pip_alarm is None: + logging.info("Raising patch-in-progress alarm") + fault = fm_api.Fault(alarm_id = fm_constants.FM_ALARM_ID_PATCH_IN_PROGRESS, + alarm_type = fm_constants.FM_ALARM_TYPE_5, + alarm_state = fm_constants.FM_ALARM_STATE_SET, + entity_type_id = fm_constants.FM_ENTITY_TYPE_HOST, + entity_instance_id = entity_instance_id, + severity = fm_constants.FM_ALARM_SEVERITY_MINOR, + reason_text = 'Patching operation in progress', + probable_cause = fm_constants.ALARM_PROBABLE_CAUSE_65, + proposed_repair_action = 'Complete reboots of affected hosts', + service_affecting = False) + + self.fm_api.set_fault(fault) + elif not raise_pip_alarm and pip_alarm is not None: + logging.info("Clearing patch-in-progress alarm") + self.fm_api.clear_fault(fm_constants.FM_ALARM_ID_PATCH_IN_PROGRESS, + entity_instance_id) + + obs_alarm = self.fm_api.get_fault(fm_constants.FM_ALARM_ID_PATCH_OBS_IN_SYSTEM, + entity_instance_id) + if raise_obs_alarm and obs_alarm is None: + logging.info("Raising obsolete-patch-in-system alarm") + fault = fm_api.Fault(alarm_id = fm_constants.FM_ALARM_ID_PATCH_OBS_IN_SYSTEM, + alarm_type = fm_constants.FM_ALARM_TYPE_5, + alarm_state = fm_constants.FM_ALARM_STATE_SET, + entity_type_id = fm_constants.FM_ENTITY_TYPE_HOST, + entity_instance_id = entity_instance_id, + severity = fm_constants.FM_ALARM_SEVERITY_WARNING, + reason_text = 'Obsolete patch in system', + probable_cause = fm_constants.ALARM_PROBABLE_CAUSE_65, + proposed_repair_action = 'Remove and delete obsolete patches', + service_affecting = False) + + self.fm_api.set_fault(fault) + elif not raise_obs_alarm and obs_alarm is not None: + logging.info("Clearing obsolete-patch-in-system alarm") + self.fm_api.clear_fault(fm_constants.FM_ALARM_ID_PATCH_OBS_IN_SYSTEM, + entity_instance_id) + + cert_alarm = self.fm_api.get_fault(fm_constants.FM_ALARM_ID_NONSTANDARD_CERT_PATCH, + entity_instance_id) + if raise_cert_alarm and cert_alarm is None: + logging.info("Raising developer-certificate-enabled alarm") + fault = fm_api.Fault(alarm_id = fm_constants.FM_ALARM_ID_NONSTANDARD_CERT_PATCH, + alarm_type = fm_constants.FM_ALARM_TYPE_9, + alarm_state = fm_constants.FM_ALARM_STATE_SET, + entity_type_id = fm_constants.FM_ENTITY_TYPE_HOST, + entity_instance_id = entity_instance_id, + severity = fm_constants.FM_ALARM_SEVERITY_CRITICAL, + reason_text = 'Developer patch certificate is enabled', + probable_cause = fm_constants.ALARM_PROBABLE_CAUSE_65, + proposed_repair_action = 'Reinstall system to disable certificate and remove untrusted patches', + suppression = False, + service_affecting = False) + + self.fm_api.set_fault(fault) + + + def _get_handle_failed_hosts(self): + url = "http://%s/patch/query_hosts" % self.api_addr + + try: + req = requests.get(url) + except: + return + + entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST, "controller") + + failed_hosts = [] + if req.status_code == 200: + data = json.loads(req.text) + + if 'data' in data: + for host in data['data']: + if 'hostname' in host and 'patch_failed' in host and host['patch_failed']: + failed_hosts.append(host['hostname']) + + # Query existing alarms + patch_failed_alarm = self.fm_api.get_fault(fm_constants.FM_ALARM_ID_PATCH_HOST_INSTALL_FAILED, + entity_instance_id) + + if len(failed_hosts) > 0: + reason_text = "Patch installation failed on the following hosts: %s" % ", ".join(sorted(failed_hosts)) + + if patch_failed_alarm is None or reason_text != patch_failed_alarm.reason_text: + if patch_failed_alarm is None: + logging.info("Raising patch-host-install-failure alarm") + else: + logging.info("Updating patch-host-install-failure alarm") + + fault = fm_api.Fault(alarm_id = fm_constants.FM_ALARM_ID_PATCH_HOST_INSTALL_FAILED, + alarm_type = fm_constants.FM_ALARM_TYPE_5, + alarm_state = fm_constants.FM_ALARM_STATE_SET, + entity_type_id = fm_constants.FM_ENTITY_TYPE_HOST, + entity_instance_id = entity_instance_id, + severity = fm_constants.FM_ALARM_SEVERITY_MAJOR, + reason_text = reason_text, + probable_cause = fm_constants.ALARM_PROBABLE_CAUSE_65, + proposed_repair_action = 'Undo patching operation', + service_affecting = False) + self.fm_api.set_fault(fault) + + elif patch_failed_alarm is not None: + logging.info("Clearing patch-host-install-failure alarm") + self.fm_api.clear_fault(fm_constants.FM_ALARM_ID_PATCH_HOST_INSTALL_FAILED, + entity_instance_id) + + return False + diff --git a/patch-alarm/patch-alarm/setup.py b/patch-alarm/patch-alarm/setup.py new file mode 100644 index 00000000..4f1cdd03 --- /dev/null +++ b/patch-alarm/patch-alarm/setup.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import setuptools + +setuptools.setup(name='patch_alarm', + version='1.0.0', + description='CEPH alarm', + license='Apache-2.0', + packages=['patch_alarm'], + entry_points={ + } +) diff --git a/patch-alarm/scripts/bin/patch-alarm-manager b/patch-alarm/scripts/bin/patch-alarm-manager new file mode 100644 index 00000000..f4579b87 --- /dev/null +++ b/patch-alarm/scripts/bin/patch-alarm-manager @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import sys + +try: + from patch_alarm import patch_alarm_manager +except EnvironmentError as e: + print >> sys.stderr, "Error importing patch_alarm_manager: ", str(e) + sys.exit(1) + +patch_alarm_manager.start_polling() diff --git a/patch-alarm/scripts/init.d/patch-alarm-manager b/patch-alarm/scripts/init.d/patch-alarm-manager new file mode 100644 index 00000000..0e3294a3 --- /dev/null +++ b/patch-alarm/scripts/init.d/patch-alarm-manager @@ -0,0 +1,98 @@ +#!/bin/sh +# +# Copyright (c) 2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +### BEGIN INIT INFO +# Provides: patch-alarm-manager +# Required-Start: $patch +# Required-Stop: $patch +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Daemon for polling patch status +# Description: Daemon for polling patch status +### END INIT INFO + +DESC="patch-alarm-manager" +DAEMON="/usr/bin/patch-alarm-manager" +RUNDIR="/var/run" +PIDFILE=$RUNDIR/$DESC.pid + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/prod/$(cat $PIDFILE) + if [ -d ${PIDFILE} ]; then + echo "$DESC already running." + exit 0 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + mkdir -p $RUNDIR + start-stop-daemon --start --quiet \ + --pidfile ${PIDFILE} --exec ${DAEMON} start + + #--make-pidfile + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + exit 1 + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &>/dev/null ; then + echo "$DESC is running" + exit 0 + else + echo "$DESC is not running but has pid file" + exit 1 + fi + fi + echo "$DESC is not running" + exit 3 +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/patch-boot-args/LICENSE b/patch-boot-args/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/patch-boot-args/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/patch-scripts/EXAMPLE_0001/centos/EXAMPLE_0001.spec b/patch-scripts/EXAMPLE_0001/centos/EXAMPLE_0001.spec new file mode 100644 index 00000000..9dbd7dae --- /dev/null +++ b/patch-scripts/EXAMPLE_0001/centos/EXAMPLE_0001.spec @@ -0,0 +1,27 @@ +Name: EXAMPLE_0001 +Summary: TIS In-Service Patch Scripts Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: example-restart + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/EXAMPLE_0001/centos/build_srpm.data b/patch-scripts/EXAMPLE_0001/centos/build_srpm.data new file mode 100644 index 00000000..ff906984 --- /dev/null +++ b/patch-scripts/EXAMPLE_0001/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=0 diff --git a/patch-scripts/EXAMPLE_0001/scripts/example-restart b/patch-scripts/EXAMPLE_0001/scripts/example-restart new file mode 100644 index 00000000..35e5310f --- /dev/null +++ b/patch-scripts/EXAMPLE_0001/scripts/example-restart @@ -0,0 +1,128 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# NOTE: The following restart example code could be implemented in scripts +# owned by the various domains, with a single high-level call in the patch-script. +# This would be the preferred method, in fact, to ensure the patch-scripts +# themselves are simple and clean. +# + +# +# First up, we'll handle restarting the sysinv-agent, which runs on all nodes +# +if [ ! -f $PATCH_FLAGDIR/sysinv-agent.restarted ] +then + # The sysinv-agent has not yet been restarted in this patch operation + systemctl status sysinv-agent.service + if [ $? -eq 0 ] + then + # The daemon is running, so restart it + loginfo "$0: Restarting sysinv-agent" + pmon-restart sysinv-agent + touch $PATCH_FLAGDIR/sysinv-agent.restarted + + # Wait up to 15 seconds for service to recover + let -i UNTIL=$SECONDS+15 + while [ $UNTIL -ge $SECONDS ] + do + # Check to make sure it's running + systemctl status sysinv-agent.service + if [ $? -eq 0 ] + then + break + fi + + # Not running... Let's wait a couple of seconds and check again + sleep 2 + done + + systemctl status sysinv-agent.service + if [ $? -ne 0 ] + then + # Still not running! Clear the flag and mark the RC as failed + rm -f $PATCH_FLAGDIR/sysinv-agent.restarted + GLOBAL_RC=$PATCH_STATUS_FAILED + loginfo "$0: Failed to restart sysinv-agent" + fi + fi +fi + +# +# Next, handle restarting horizon. +# TODO: There will be some SM enhancements coming to provide +# utilities we can use to facilitate in-service patching. +# For now, we'll do this a slightly uglier fashion +# +if is_controller +then + # Horizon only runs on the controller + + if [ ! -f $PATCH_FLAGDIR/horizon.restarted ] + then + # Check SM to see if Horizon is running + sm-query service horizon | grep -q 'enabled-active' + if [ $? -eq 0 ] + then + loginfo "$0: Restarting horizon" + + # Ask SM to restart Horizon + sm-restart service horizon + touch $PATCH_FLAGDIR/horizon.restarted + + # Wait up to 30 seconds for service to recover + let -i UNTIL=$SECONDS+30 + while [ $UNTIL -ge $SECONDS ] + do + # Check to see if it's running + sm-query service horizon | grep -q 'enabled-active' + if [ $? -eq 0 ] + then + break + fi + + # Still not running? Let's wait 5 seconds and check again + sleep 5 + done + + sm-query service horizon | grep -q 'enabled-active' + if [ $? -ne 0 ] + then + # Still not running! Clear the flag and mark the RC as failed + loginfo "$0: Failed to restart horizon" + rm -f $PATCH_FLAGDIR/horizon.restarted + GLOBAL_RC=$PATCH_STATUS_FAILED + sm-query service horizon + fi + fi + fi +fi + +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC + diff --git a/patch-scripts/EXAMPLE_0002/centos/EXAMPLE_0002.spec b/patch-scripts/EXAMPLE_0002/centos/EXAMPLE_0002.spec new file mode 100644 index 00000000..94edab8b --- /dev/null +++ b/patch-scripts/EXAMPLE_0002/centos/EXAMPLE_0002.spec @@ -0,0 +1,27 @@ +Name: EXAMPLE_0002 +Summary: TIS In-Service Patch Scripts Example, Patching Daemons +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: example-cgcs-patch-restart + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/EXAMPLE_0002/centos/build_srpm.data b/patch-scripts/EXAMPLE_0002/centos/build_srpm.data new file mode 100644 index 00000000..ff906984 --- /dev/null +++ b/patch-scripts/EXAMPLE_0002/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=0 diff --git a/patch-scripts/EXAMPLE_0002/scripts/example-cgcs-patch-restart b/patch-scripts/EXAMPLE_0002/scripts/example-cgcs-patch-restart new file mode 100644 index 00000000..f128a3ba --- /dev/null +++ b/patch-scripts/EXAMPLE_0002/scripts/example-cgcs-patch-restart @@ -0,0 +1,48 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart, +# triggering a restart of the patching daemons themselves +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# +# First up, we'll handle restarting the patch-agent, which runs on all nodes +# Since the patch-agent is a delayed restart, there's no error to return +# +/usr/sbin/sw-patch-agent-restart + +# +# Next, handle restarting the patch-controller. +# +if is_controller +then + /usr/sbin/sw-patch-controller-daemon-restart +fi + + +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC + diff --git a/patch-scripts/EXAMPLE_0003/centos/EXAMPLE_0003.spec b/patch-scripts/EXAMPLE_0003/centos/EXAMPLE_0003.spec new file mode 100644 index 00000000..68215670 --- /dev/null +++ b/patch-scripts/EXAMPLE_0003/centos/EXAMPLE_0003.spec @@ -0,0 +1,27 @@ +Name: EXAMPLE_0003 +Summary: TIS In-Service Patch Scripts Example, using process-restart +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: example-process-restart + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/EXAMPLE_0003/centos/build_srpm.data b/patch-scripts/EXAMPLE_0003/centos/build_srpm.data new file mode 100644 index 00000000..ff906984 --- /dev/null +++ b/patch-scripts/EXAMPLE_0003/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=0 diff --git a/patch-scripts/EXAMPLE_0003/scripts/example-process-restart b/patch-scripts/EXAMPLE_0003/scripts/example-process-restart new file mode 100644 index 00000000..1af34283 --- /dev/null +++ b/patch-scripts/EXAMPLE_0003/scripts/example-process-restart @@ -0,0 +1,57 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart, +# triggering a restart of the patching daemons themselves +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# processes that run on all nodes +processes_to_restart="sysinv-agent ceilometer-polling" +/usr/local/sbin/patch-restart-processes sysinv-agent ceilometer-polling +if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + exit ${PATCH_STATUS_FAILED} +fi + +# +# Next, handle restarting the patch-controller. +# +if is_controller +then + processes_to_restart="ceilometer-api ceilometer-agent-notification ceilometer-collector" + /usr/local/sbin/patch-restart-processes ${processes_to_restart} + if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + exit ${PATCH_STATUS_FAILED} + fi +fi + + +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC + diff --git a/patch-scripts/EXAMPLE_AODH/centos/EXAMPLE_AODH.spec b/patch-scripts/EXAMPLE_AODH/centos/EXAMPLE_AODH.spec new file mode 100644 index 00000000..505ca046 --- /dev/null +++ b/patch-scripts/EXAMPLE_AODH/centos/EXAMPLE_AODH.spec @@ -0,0 +1,27 @@ +Name: EXAMPLE_AODH +Summary: TIS In-Service Aodh Patch Script Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: aodh-restart-example + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/EXAMPLE_AODH/centos/build_srpm.data b/patch-scripts/EXAMPLE_AODH/centos/build_srpm.data new file mode 100644 index 00000000..ff906984 --- /dev/null +++ b/patch-scripts/EXAMPLE_AODH/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=0 diff --git a/patch-scripts/EXAMPLE_AODH/scripts/aodh-restart-example b/patch-scripts/EXAMPLE_AODH/scripts/aodh-restart-example new file mode 100644 index 00000000..a4534f07 --- /dev/null +++ b/patch-scripts/EXAMPLE_AODH/scripts/aodh-restart-example @@ -0,0 +1,48 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart, +# triggering a restart of the patching daemons themselves +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + + +# AODH only runs on the controller + +if is_controller +then + processes_to_restart="aodh-api aodh-evaluator aodh-listener aodh-notifier" + /usr/local/sbin/patch-restart-processes ${processes_to_restart} + if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + exit ${PATCH_STATUS_FAILED} + fi +fi + + +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC + diff --git a/patch-scripts/EXAMPLE_HEAT/centos/EXAMPLE_HEAT.spec b/patch-scripts/EXAMPLE_HEAT/centos/EXAMPLE_HEAT.spec new file mode 100644 index 00000000..6a05d76f --- /dev/null +++ b/patch-scripts/EXAMPLE_HEAT/centos/EXAMPLE_HEAT.spec @@ -0,0 +1,27 @@ +Name: EXAMPLE_HEAT +Summary: TIS In-Service Heat Patch Script Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: heat-restart-example + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/EXAMPLE_HEAT/centos/build_srpm.data b/patch-scripts/EXAMPLE_HEAT/centos/build_srpm.data new file mode 100644 index 00000000..ff906984 --- /dev/null +++ b/patch-scripts/EXAMPLE_HEAT/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=0 diff --git a/patch-scripts/EXAMPLE_HEAT/scripts/heat-restart-example b/patch-scripts/EXAMPLE_HEAT/scripts/heat-restart-example new file mode 100644 index 00000000..e982eaec --- /dev/null +++ b/patch-scripts/EXAMPLE_HEAT/scripts/heat-restart-example @@ -0,0 +1,48 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart, +# triggering a restart of the patching daemons themselves +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + + +# HEAT only runs on the controller + +if is_controller +then + processes_to_restart="heat-api heat-engine heat-api-cloudwatch heat-api-cfn" + /usr/local/sbin/patch-restart-processes ${processes_to_restart} + if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + exit ${PATCH_STATUS_FAILED} + fi +fi + + +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC + diff --git a/patch-scripts/EXAMPLE_MTCE/centos/EXAMPLE_MTCE.spec b/patch-scripts/EXAMPLE_MTCE/centos/EXAMPLE_MTCE.spec new file mode 100644 index 00000000..6702a2c5 --- /dev/null +++ b/patch-scripts/EXAMPLE_MTCE/centos/EXAMPLE_MTCE.spec @@ -0,0 +1,27 @@ +Name: EXAMPLE_MTCE +Summary: TIS In-Service Maintenance Patch Script Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: mtce-restart-example + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/EXAMPLE_MTCE/centos/build_srpm.data b/patch-scripts/EXAMPLE_MTCE/centos/build_srpm.data new file mode 100644 index 00000000..6c396e7c --- /dev/null +++ b/patch-scripts/EXAMPLE_MTCE/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=1 diff --git a/patch-scripts/EXAMPLE_MTCE/scripts/mtce-restart-example b/patch-scripts/EXAMPLE_MTCE/scripts/mtce-restart-example new file mode 100644 index 00000000..19db6990 --- /dev/null +++ b/patch-scripts/EXAMPLE_MTCE/scripts/mtce-restart-example @@ -0,0 +1,46 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart for all maintenance processes +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# NOTE: The following restart example code could be implemented in scripts +# owned by the various domains, with a single high-level call in the patch-script. +# This would be the preferred method, in fact, to ensure the patch-scripts +# themselves are simple and clean. +# +/usr/local/sbin/patch-restart-mtce \ + mtcalarmd mtclogd \ + hbsAgent hbsClient \ + mtcAgent mtcClient \ + pmond rmond fsmond hwmond hostwd \ + guestServer guestAgent + +if [ $? -ne 0 ] ; then + GLOBAL_RC=$PATCH_STATUS_FAILED +fi +# Exit the script with the overall return code +# +exit $GLOBAL_RC + diff --git a/patch-scripts/EXAMPLE_NEUTRON/centos/EXAMPLE_NEUTRON.spec b/patch-scripts/EXAMPLE_NEUTRON/centos/EXAMPLE_NEUTRON.spec new file mode 100644 index 00000000..38f32fac --- /dev/null +++ b/patch-scripts/EXAMPLE_NEUTRON/centos/EXAMPLE_NEUTRON.spec @@ -0,0 +1,27 @@ +Name: EXAMPLE_NEUTRON +Summary: TIS In-Service Neutron Patch Script Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: neutron-restart-example + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/EXAMPLE_NEUTRON/centos/build_srpm.data b/patch-scripts/EXAMPLE_NEUTRON/centos/build_srpm.data new file mode 100644 index 00000000..ff906984 --- /dev/null +++ b/patch-scripts/EXAMPLE_NEUTRON/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=0 diff --git a/patch-scripts/EXAMPLE_NEUTRON/scripts/neutron-restart-example b/patch-scripts/EXAMPLE_NEUTRON/scripts/neutron-restart-example new file mode 100644 index 00000000..2fa39363 --- /dev/null +++ b/patch-scripts/EXAMPLE_NEUTRON/scripts/neutron-restart-example @@ -0,0 +1,40 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart for all neutron processes +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# NOTE: The following restart example code could be implemented in scripts +# owned by the various domains, with a single high-level call in the patch-script. +# This would be the preferred method, in fact, to ensure the patch-scripts +# themselves are simple and clean. +# + +/bin/neutron-restart --all +if [ $? -ne 0 ] +then + GLOBAL_RC=$PATCH_STATUS_FAILED +fi +exit $GLOBAL_RC + diff --git a/patch-scripts/EXAMPLE_NOVA/centos/EXAMPLE_NOVA.spec b/patch-scripts/EXAMPLE_NOVA/centos/EXAMPLE_NOVA.spec new file mode 100644 index 00000000..47ce4ea0 --- /dev/null +++ b/patch-scripts/EXAMPLE_NOVA/centos/EXAMPLE_NOVA.spec @@ -0,0 +1,27 @@ +Name: EXAMPLE_NOVA +Summary: TIS In-Service Nova Patch Script Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: nova-restart-example + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/EXAMPLE_NOVA/centos/build_srpm.data b/patch-scripts/EXAMPLE_NOVA/centos/build_srpm.data new file mode 100644 index 00000000..ff906984 --- /dev/null +++ b/patch-scripts/EXAMPLE_NOVA/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=0 diff --git a/patch-scripts/EXAMPLE_NOVA/scripts/nova-restart-example b/patch-scripts/EXAMPLE_NOVA/scripts/nova-restart-example new file mode 100644 index 00000000..56a9387d --- /dev/null +++ b/patch-scripts/EXAMPLE_NOVA/scripts/nova-restart-example @@ -0,0 +1,39 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart for all neutron processes +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# NOTE: The following restart example code could be implemented in scripts +# owned by the various domains, with a single high-level call in the patch-script. +# This would be the preferred method, in fact, to ensure the patch-scripts +# themselves are simple and clean. +# + +/bin/nova-restart +if [ $? -ne 0 ] +then + GLOBAL_RC=$PATCH_STATUS_FAILED +fi +exit $GLOBAL_RC diff --git a/patch-scripts/EXAMPLE_RR/centos/EXAMPLE_RR.spec b/patch-scripts/EXAMPLE_RR/centos/EXAMPLE_RR.spec new file mode 100644 index 00000000..e4d4afd0 --- /dev/null +++ b/patch-scripts/EXAMPLE_RR/centos/EXAMPLE_RR.spec @@ -0,0 +1,21 @@ +Name: EXAMPLE_RR +Summary: TIS Reboot-Required Patch RPM Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River + +%description +%{summary} + +%files + +%post +touch /var/run/node_is_patched_rr +exit 0 + +%preun +touch /var/run/node_is_patched_rr +exit 0 + diff --git a/patch-scripts/EXAMPLE_RR/centos/build_srpm.data b/patch-scripts/EXAMPLE_RR/centos/build_srpm.data new file mode 100644 index 00000000..b12438cc --- /dev/null +++ b/patch-scripts/EXAMPLE_RR/centos/build_srpm.data @@ -0,0 +1,2 @@ +ALLOW_EMPTY_RPM=true +TIS_PATCH_VER=0 diff --git a/patch-scripts/EXAMPLE_SYSINV/centos/EXAMPLE_SYSINV.spec b/patch-scripts/EXAMPLE_SYSINV/centos/EXAMPLE_SYSINV.spec new file mode 100644 index 00000000..1e8909ae --- /dev/null +++ b/patch-scripts/EXAMPLE_SYSINV/centos/EXAMPLE_SYSINV.spec @@ -0,0 +1,26 @@ +Name: EXAMPLE_SYSINV +Summary: TIS In-Service SysInv Patch Script Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: sysinv-restart-example + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 diff --git a/patch-scripts/EXAMPLE_SYSINV/centos/build_srpm.data b/patch-scripts/EXAMPLE_SYSINV/centos/build_srpm.data new file mode 100644 index 00000000..0a4d919c --- /dev/null +++ b/patch-scripts/EXAMPLE_SYSINV/centos/build_srpm.data @@ -0,0 +1,3 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=1 + diff --git a/patch-scripts/EXAMPLE_SYSINV/scripts/sysinv-restart-example b/patch-scripts/EXAMPLE_SYSINV/scripts/sysinv-restart-example new file mode 100644 index 00000000..ff7aee0b --- /dev/null +++ b/patch-scripts/EXAMPLE_SYSINV/scripts/sysinv-restart-example @@ -0,0 +1,52 @@ +#!/bin/bash +# +# Copyright (c) 2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart, +# triggering a restart of the patching daemons themselves +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + + +if is_controller +then + processes_to_restart="sysinv-conductor sysinv-api" + /usr/local/sbin/patch-restart-processes ${processes_to_restart} + if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + exit ${PATCH_STATUS_FAILED} + fi +fi + +processes_to_restart="sysinv-agent" +/usr/local/sbin/patch-restart-processes ${processes_to_restart} +if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + exit ${PATCH_STATUS_FAILED} +fi + +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC diff --git a/patch-scripts/EXAMPLE_VIM/centos/EXAMPLE_VIM.spec b/patch-scripts/EXAMPLE_VIM/centos/EXAMPLE_VIM.spec new file mode 100644 index 00000000..c8922120 --- /dev/null +++ b/patch-scripts/EXAMPLE_VIM/centos/EXAMPLE_VIM.spec @@ -0,0 +1,26 @@ +Name: EXAMPLE_VIM +Summary: TIS In-Service Vim Patch Script Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: vim-restart-example + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 diff --git a/patch-scripts/EXAMPLE_VIM/centos/build_srpm.data b/patch-scripts/EXAMPLE_VIM/centos/build_srpm.data new file mode 100644 index 00000000..ff836a9a --- /dev/null +++ b/patch-scripts/EXAMPLE_VIM/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=3 diff --git a/patch-scripts/EXAMPLE_VIM/scripts/vim-restart-example b/patch-scripts/EXAMPLE_VIM/scripts/vim-restart-example new file mode 100644 index 00000000..80dfc15e --- /dev/null +++ b/patch-scripts/EXAMPLE_VIM/scripts/vim-restart-example @@ -0,0 +1,48 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart, +# triggering a restart of the patching daemons themselves +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + + +# Vim only runs on the controller + +if is_controller +then + processes_to_restart="nfv-vim nfv-vim-api nfv-vim-webserver" + /usr/local/sbin/patch-restart-processes ${processes_to_restart} + if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + GLOBAL_RC=$PATCH_STATUS_FAILED + fi +fi + + +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC + diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_KERNEL/centos/SUITE_B_KERNEL.spec b/patch-scripts/test-patches-suite-b/SUITE_B_KERNEL/centos/SUITE_B_KERNEL.spec new file mode 100644 index 00000000..c8e7331e --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_KERNEL/centos/SUITE_B_KERNEL.spec @@ -0,0 +1,21 @@ +Name: SUITE_B_KERNEL +Summary: TIS Reboot-Required Patch RPM Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River + +%description +%{summary} + +%files + +%post +touch /var/run/node_is_patched_rr +exit 0 + +%preun +touch /var/run/node_is_patched_rr +exit 0 + diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_KERNEL/centos/build_srpm.data b/patch-scripts/test-patches-suite-b/SUITE_B_KERNEL/centos/build_srpm.data new file mode 100644 index 00000000..23dfbf95 --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_KERNEL/centos/build_srpm.data @@ -0,0 +1,2 @@ +ALLOW_EMPTY_RPM=true +TIS_PATCH_VER=19 diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_A/centos/SUITE_B_PATCH_A.spec b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_A/centos/SUITE_B_PATCH_A.spec new file mode 100644 index 00000000..46990507 --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_A/centos/SUITE_B_PATCH_A.spec @@ -0,0 +1,27 @@ +Name: SUITE_B_PATCH_A +Summary: TIS In-Service Patch Scripts +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: restart-script + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_A/centos/build_srpm.data b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_A/centos/build_srpm.data new file mode 100644 index 00000000..ff836a9a --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_A/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=3 diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_A/scripts/restart-script b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_A/scripts/restart-script new file mode 100644 index 00000000..2e922f48 --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_A/scripts/restart-script @@ -0,0 +1,56 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart for all neutron processes +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# NOTE: The following restart example code could be implemented in scripts +# owned by the various domains, with a single high-level call in the patch-script. +# This would be the preferred method, in fact, to ensure the patch-scripts +# themselves are simple and clean. +# + +# MTCE +bash -x /usr/local/sbin/patch-restart-mtce \ + mtcalarmd mtclogd \ + hbsAgent hbsClient \ + mtcAgent mtcClient \ + pmond rmond fsmond hwmond hostwd \ + guestServer guestAgent +if [ $? -ne 0 ] ; then + loginfo "Mtce patching restart failed" + GLOBAL_RC=$PATCH_STATUS_FAILED +fi + +# SYSINV +processes_to_restart="sysinv-conductor sysinv-api sysinv-agent" +bash -x /usr/local/sbin/patch-restart-processes ${processes_to_restart} +if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + GLOBAL_RC=$PATCH_STATUS_FAILED +fi + + +exit $GLOBAL_RC diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_B/centos/SUITE_B_PATCH_B.spec b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_B/centos/SUITE_B_PATCH_B.spec new file mode 100644 index 00000000..b048c562 --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_B/centos/SUITE_B_PATCH_B.spec @@ -0,0 +1,27 @@ +Name: SUITE_B_PATCH_B +Summary: TIS In-Service Patch Scripts +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: restart-script + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_B/centos/build_srpm.data b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_B/centos/build_srpm.data new file mode 100644 index 00000000..ff836a9a --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_B/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=3 diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_B/scripts/restart-script b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_B/scripts/restart-script new file mode 100644 index 00000000..c233dac8 --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_B/scripts/restart-script @@ -0,0 +1,56 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart for all neutron processes +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# NOTE: The following restart example code could be implemented in scripts +# owned by the various domains, with a single high-level call in the patch-script. +# This would be the preferred method, in fact, to ensure the patch-scripts +# themselves are simple and clean. +# + +# NFV +if is_controller +then + processes_to_restart="nfv-vim nfv-vim-api nfv-vim-webserver" + bash -x /usr/local/sbin/patch-restart-processes ${processes_to_restart} + if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + GLOBAL_RC=$PATCH_STATUS_FAILED + fi +fi + +# SYSINV +processes_to_restart="sysinv-conductor sysinv-api sysinv-agent" +bash -x /usr/local/sbin/patch-restart-processes ${processes_to_restart} +if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + GLOBAL_RC=$PATCH_STATUS_FAILED +fi + + +exit $GLOBAL_RC diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_C/centos/SUITE_B_PATCH_C.spec b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_C/centos/SUITE_B_PATCH_C.spec new file mode 100644 index 00000000..bf2287c1 --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_C/centos/SUITE_B_PATCH_C.spec @@ -0,0 +1,27 @@ +Name: SUITE_B_PATCH_C +Summary: TIS In-Service Patch Scripts +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: restart-script + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_C/centos/build_srpm.data b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_C/centos/build_srpm.data new file mode 100644 index 00000000..ff836a9a --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_C/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=3 diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_C/scripts/restart-script b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_C/scripts/restart-script new file mode 100644 index 00000000..81d1f5c7 --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_C/scripts/restart-script @@ -0,0 +1,55 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart for all neutron processes +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# NOTE: The following restart example code could be implemented in scripts +# owned by the various domains, with a single high-level call in the patch-script. +# This would be the preferred method, in fact, to ensure the patch-scripts +# themselves are simple and clean. +# + +# NOVA +bash -x /bin/nova-restart +if [ $? -ne 0 ] +then + loginfo "Nova patching restart failed" + GLOBAL_RC=$PATCH_STATUS_FAILED +fi + +# MTCE +bash -x /usr/local/sbin/patch-restart-mtce \ + mtcalarmd mtclogd \ + hbsAgent hbsClient \ + mtcAgent mtcClient \ + pmond rmond fsmond hwmond hostwd \ + guestServer guestAgent +if [ $? -ne 0 ] ; then + loginfo "Mtce patching restart failed" + GLOBAL_RC=$PATCH_STATUS_FAILED +fi + + +exit $GLOBAL_RC diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_D/centos/SUITE_B_PATCH_D.spec b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_D/centos/SUITE_B_PATCH_D.spec new file mode 100644 index 00000000..8b6681b2 --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_D/centos/SUITE_B_PATCH_D.spec @@ -0,0 +1,27 @@ +Name: SUITE_B_PATCH_D +Summary: TIS In-Service Patch Scripts +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: restart-script + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_D/centos/build_srpm.data b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_D/centos/build_srpm.data new file mode 100644 index 00000000..ff836a9a --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_D/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=3 diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_D/scripts/restart-script b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_D/scripts/restart-script new file mode 100644 index 00000000..3198b6b9 --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_D/scripts/restart-script @@ -0,0 +1,62 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart for all neutron processes +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# NOTE: The following restart example code could be implemented in scripts +# owned by the various domains, with a single high-level call in the patch-script. +# This would be the preferred method, in fact, to ensure the patch-scripts +# themselves are simple and clean. +# + +# NOVA +bash -x /bin/nova-restart +if [ $? -ne 0 ] +then + loginfo "Nova patching restart failed" + GLOBAL_RC=$PATCH_STATUS_FAILED +fi + +# HORIZON +if is_controller +then + bash -x /usr/bin/horizon-patching-restart + if [ $? != 0 ] ; then + loginfo "Horizon patching restart failed" + GLOBAL_RC=$PATCH_STATUS_FAILED + fi +fi + +# SYSINV +processes_to_restart="sysinv-conductor sysinv-api sysinv-agent" +bash -x /usr/local/sbin/patch-restart-processes ${processes_to_restart} +if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + GLOBAL_RC=$PATCH_STATUS_FAILED +fi + + +exit $GLOBAL_RC diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_E/centos/SUITE_B_PATCH_E.spec b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_E/centos/SUITE_B_PATCH_E.spec new file mode 100644 index 00000000..ed3272fb --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_E/centos/SUITE_B_PATCH_E.spec @@ -0,0 +1,27 @@ +Name: SUITE_B_PATCH_E +Summary: TIS In-Service Patch Scripts +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: restart-script + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_E/centos/build_srpm.data b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_E/centos/build_srpm.data new file mode 100644 index 00000000..ff836a9a --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_E/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=3 diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_E/scripts/restart-script b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_E/scripts/restart-script new file mode 100644 index 00000000..9beac8c5 --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_E/scripts/restart-script @@ -0,0 +1,46 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart for all neutron processes +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# NOTE: The following restart example code could be implemented in scripts +# owned by the various domains, with a single high-level call in the patch-script. +# This would be the preferred method, in fact, to ensure the patch-scripts +# themselves are simple and clean. +# + +# MTCE +bash -x /usr/local/sbin/patch-restart-mtce \ + mtcalarmd mtclogd \ + hbsAgent hbsClient \ + mtcAgent mtcClient \ + pmond rmond fsmond hwmond hostwd \ + guestServer guestAgent +if [ $? -ne 0 ] ; then + loginfo "Mtce patching restart failed" + GLOBAL_RC=$PATCH_STATUS_FAILED +fi + +exit $GLOBAL_RC diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_F/centos/SUITE_B_PATCH_F.spec b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_F/centos/SUITE_B_PATCH_F.spec new file mode 100644 index 00000000..60765c95 --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_F/centos/SUITE_B_PATCH_F.spec @@ -0,0 +1,27 @@ +Name: SUITE_B_PATCH_F +Summary: TIS In-Service Patch Scripts +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: restart-script + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_F/centos/build_srpm.data b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_F/centos/build_srpm.data new file mode 100644 index 00000000..ff836a9a --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_F/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=3 diff --git a/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_F/scripts/restart-script b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_F/scripts/restart-script new file mode 100644 index 00000000..586fe67e --- /dev/null +++ b/patch-scripts/test-patches-suite-b/SUITE_B_PATCH_F/scripts/restart-script @@ -0,0 +1,86 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart for all neutron processes +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# NOTE: The following restart example code could be implemented in scripts +# owned by the various domains, with a single high-level call in the patch-script. +# This would be the preferred method, in fact, to ensure the patch-scripts +# themselves are simple and clean. +# + +# NOVA +bash -x /bin/nova-restart +if [ $? -ne 0 ] +then + loginfo "Nova patching restart failed" + GLOBAL_RC=$PATCH_STATUS_FAILED +fi + +# MTCE +bash -x /usr/local/sbin/patch-restart-mtce \ + mtcalarmd mtclogd \ + hbsAgent hbsClient \ + mtcAgent mtcClient \ + pmond rmond fsmond hwmond hostwd \ + guestServer guestAgent +if [ $? -ne 0 ] ; then + loginfo "Mtce patching restart failed" + GLOBAL_RC=$PATCH_STATUS_FAILED +fi + +# NFV +if is_controller +then + processes_to_restart="nfv-vim nfv-vim-api nfv-vim-webserver" + bash -x /usr/local/sbin/patch-restart-processes ${processes_to_restart} + if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + GLOBAL_RC=$PATCH_STATUS_FAILED + fi +fi + +# HORIZON +if is_controller +then + bash -x /usr/bin/horizon-patching-restart + if [ $? != 0 ] ; then + loginfo "Horizon patching restart failed" + GLOBAL_RC=$PATCH_STATUS_FAILED + fi +fi + +# SYSINV +processes_to_restart="sysinv-conductor sysinv-api sysinv-agent" +bash -x /usr/local/sbin/patch-restart-processes ${processes_to_restart} +if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + GLOBAL_RC=$PATCH_STATUS_FAILED +fi + + +exit $GLOBAL_RC diff --git a/patch-scripts/test-patches/A/centos/A.spec b/patch-scripts/test-patches/A/centos/A.spec new file mode 100644 index 00000000..647354d2 --- /dev/null +++ b/patch-scripts/test-patches/A/centos/A.spec @@ -0,0 +1,21 @@ +Name: A +Summary: TIS Reboot-Required Patch RPM Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River + +%description +%{summary} + +%files + +%post +touch /var/run/node_is_patched_rr +exit 0 + +%preun +touch /var/run/node_is_patched_rr +exit 0 + diff --git a/patch-scripts/test-patches/A/centos/build_srpm.data b/patch-scripts/test-patches/A/centos/build_srpm.data new file mode 100644 index 00000000..b12438cc --- /dev/null +++ b/patch-scripts/test-patches/A/centos/build_srpm.data @@ -0,0 +1,2 @@ +ALLOW_EMPTY_RPM=true +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/B/centos/B.spec b/patch-scripts/test-patches/B/centos/B.spec new file mode 100644 index 00000000..f0401933 --- /dev/null +++ b/patch-scripts/test-patches/B/centos/B.spec @@ -0,0 +1,21 @@ +Name: B +Summary: TIS Reboot-Required Patch RPM Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River + +%description +%{summary} + +%files + +%post +touch /var/run/node_is_patched_rr +exit 0 + +%preun +touch /var/run/node_is_patched_rr +exit 0 + diff --git a/patch-scripts/test-patches/B/centos/build_srpm.data b/patch-scripts/test-patches/B/centos/build_srpm.data new file mode 100644 index 00000000..b12438cc --- /dev/null +++ b/patch-scripts/test-patches/B/centos/build_srpm.data @@ -0,0 +1,2 @@ +ALLOW_EMPTY_RPM=true +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/C/centos/C.spec b/patch-scripts/test-patches/C/centos/C.spec new file mode 100644 index 00000000..3a472237 --- /dev/null +++ b/patch-scripts/test-patches/C/centos/C.spec @@ -0,0 +1,21 @@ +Name: C +Summary: TIS Reboot-Required Patch RPM Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River + +%description +%{summary} + +%files + +%post +touch /var/run/node_is_patched_rr +exit 0 + +%preun +touch /var/run/node_is_patched_rr +exit 0 + diff --git a/patch-scripts/test-patches/C/centos/build_srpm.data b/patch-scripts/test-patches/C/centos/build_srpm.data new file mode 100644 index 00000000..b12438cc --- /dev/null +++ b/patch-scripts/test-patches/C/centos/build_srpm.data @@ -0,0 +1,2 @@ +ALLOW_EMPTY_RPM=true +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/INSVC_ALLNODES/centos/INSVC_ALLNODES.spec b/patch-scripts/test-patches/INSVC_ALLNODES/centos/INSVC_ALLNODES.spec new file mode 100644 index 00000000..40ae07b8 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_ALLNODES/centos/INSVC_ALLNODES.spec @@ -0,0 +1,27 @@ +Name: INSVC_ALLNODES +Summary: TIS In-Service All Nodes Patch +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: allnodes-restart + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/test-patches/INSVC_ALLNODES/centos/build_srpm.data b/patch-scripts/test-patches/INSVC_ALLNODES/centos/build_srpm.data new file mode 100644 index 00000000..ff906984 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_ALLNODES/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/INSVC_ALLNODES/scripts/allnodes-restart b/patch-scripts/test-patches/INSVC_ALLNODES/scripts/allnodes-restart new file mode 100644 index 00000000..3ca612b5 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_ALLNODES/scripts/allnodes-restart @@ -0,0 +1,36 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +processes_to_restart="sysinv-agent" +/usr/local/sbin/patch-restart-processes ${processes_to_restart} +if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + GLOBAL_RC=$PATCH_STATUS_FAILED +fi + +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC + diff --git a/patch-scripts/test-patches/INSVC_COMPUTE/centos/INSVC_COMPUTE.spec b/patch-scripts/test-patches/INSVC_COMPUTE/centos/INSVC_COMPUTE.spec new file mode 100644 index 00000000..6e443cf5 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_COMPUTE/centos/INSVC_COMPUTE.spec @@ -0,0 +1,27 @@ +Name: INSVC_COMPUTE +Summary: TIS In-Service Compute Patch +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: compute-restart + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/test-patches/INSVC_COMPUTE/centos/build_srpm.data b/patch-scripts/test-patches/INSVC_COMPUTE/centos/build_srpm.data new file mode 100644 index 00000000..ff906984 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_COMPUTE/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/INSVC_COMPUTE/scripts/compute-restart b/patch-scripts/test-patches/INSVC_COMPUTE/scripts/compute-restart new file mode 100644 index 00000000..7c1b6275 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_COMPUTE/scripts/compute-restart @@ -0,0 +1,34 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# TODO: Add restart of nova-compute? + +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC + diff --git a/patch-scripts/test-patches/INSVC_CONTROLLER/centos/INSVC_CONTROLLER.spec b/patch-scripts/test-patches/INSVC_CONTROLLER/centos/INSVC_CONTROLLER.spec new file mode 100644 index 00000000..b747b553 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_CONTROLLER/centos/INSVC_CONTROLLER.spec @@ -0,0 +1,26 @@ +Name: INSVC_CONTROLLER +Summary: TIS In-Service Controller Patch +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: controller-restart + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 diff --git a/patch-scripts/test-patches/INSVC_CONTROLLER/centos/build_srpm.data b/patch-scripts/test-patches/INSVC_CONTROLLER/centos/build_srpm.data new file mode 100644 index 00000000..ff906984 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_CONTROLLER/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/INSVC_CONTROLLER/scripts/controller-restart b/patch-scripts/test-patches/INSVC_CONTROLLER/scripts/controller-restart new file mode 100644 index 00000000..80dfc15e --- /dev/null +++ b/patch-scripts/test-patches/INSVC_CONTROLLER/scripts/controller-restart @@ -0,0 +1,48 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart, +# triggering a restart of the patching daemons themselves +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + + +# Vim only runs on the controller + +if is_controller +then + processes_to_restart="nfv-vim nfv-vim-api nfv-vim-webserver" + /usr/local/sbin/patch-restart-processes ${processes_to_restart} + if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + GLOBAL_RC=$PATCH_STATUS_FAILED + fi +fi + + +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC + diff --git a/patch-scripts/test-patches/INSVC_NOVA/centos/INSVC_NOVA.spec b/patch-scripts/test-patches/INSVC_NOVA/centos/INSVC_NOVA.spec new file mode 100644 index 00000000..071ca4d8 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_NOVA/centos/INSVC_NOVA.spec @@ -0,0 +1,27 @@ +Name: INSVC_NOVA +Summary: In-Service Nova Patch Script +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: nova-restart + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/test-patches/INSVC_NOVA/centos/build_srpm.data b/patch-scripts/test-patches/INSVC_NOVA/centos/build_srpm.data new file mode 100644 index 00000000..ff906984 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_NOVA/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/INSVC_NOVA/scripts/nova-restart b/patch-scripts/test-patches/INSVC_NOVA/scripts/nova-restart new file mode 100644 index 00000000..1591d0be --- /dev/null +++ b/patch-scripts/test-patches/INSVC_NOVA/scripts/nova-restart @@ -0,0 +1,39 @@ +#!/bin/bash +# +# Copyright (c) 2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart for all nova processes +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# NOTE: The following restart example code could be implemented in scripts +# owned by the various domains, with a single high-level call in the patch-script. +# This would be the preferred method, in fact, to ensure the patch-scripts +# themselves are simple and clean. +# + +/bin/nova-restart +if [ $? -ne 0 ] +then + GLOBAL_RC=$PATCH_STATUS_FAILED +fi +exit $GLOBAL_RC diff --git a/patch-scripts/test-patches/INSVC_RESTART_FAILURE/centos/INSVC_RESTART_FAILURE.spec b/patch-scripts/test-patches/INSVC_RESTART_FAILURE/centos/INSVC_RESTART_FAILURE.spec new file mode 100644 index 00000000..a3f417ba --- /dev/null +++ b/patch-scripts/test-patches/INSVC_RESTART_FAILURE/centos/INSVC_RESTART_FAILURE.spec @@ -0,0 +1,27 @@ +Name: INSVC_RESTART_FAILURE +Summary: TIS In-Service Restart Failure Patch +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: restart-failure + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/test-patches/INSVC_RESTART_FAILURE/centos/build_srpm.data b/patch-scripts/test-patches/INSVC_RESTART_FAILURE/centos/build_srpm.data new file mode 100644 index 00000000..ff906984 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_RESTART_FAILURE/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/INSVC_RESTART_FAILURE/scripts/restart-failure b/patch-scripts/test-patches/INSVC_RESTART_FAILURE/scripts/restart-failure new file mode 100644 index 00000000..7da1fda0 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_RESTART_FAILURE/scripts/restart-failure @@ -0,0 +1,22 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# Exit with a failure +# +exit $PATCH_STATUS_FAILED + diff --git a/patch-scripts/test-patches/INSVC_STORAGE/centos/INSVC_STORAGE.spec b/patch-scripts/test-patches/INSVC_STORAGE/centos/INSVC_STORAGE.spec new file mode 100644 index 00000000..e8179bbd --- /dev/null +++ b/patch-scripts/test-patches/INSVC_STORAGE/centos/INSVC_STORAGE.spec @@ -0,0 +1,27 @@ +Name: INSVC_STORAGE +Summary: TIS In-Service Storage Patch +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +Source0: storage-restart + +%install + install -Dp -m 700 %{S:0} %{buildroot}%{_patch_scripts}/%{name} + +%description +%{summary} + +%files +%defattr(-,root,root,-) +%{_patch_scripts}/* + +%post +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + +%preun +cp -f %{_patch_scripts}/%{name} %{_runtime_patch_scripts}/ +exit 0 + diff --git a/patch-scripts/test-patches/INSVC_STORAGE/centos/build_srpm.data b/patch-scripts/test-patches/INSVC_STORAGE/centos/build_srpm.data new file mode 100644 index 00000000..ff906984 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_STORAGE/centos/build_srpm.data @@ -0,0 +1,2 @@ +COPY_LIST="scripts/*" +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/INSVC_STORAGE/scripts/storage-restart b/patch-scripts/test-patches/INSVC_STORAGE/scripts/storage-restart new file mode 100644 index 00000000..ad7a8279 --- /dev/null +++ b/patch-scripts/test-patches/INSVC_STORAGE/scripts/storage-restart @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# TODO: Add restart of something only on storage node? + +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC + diff --git a/patch-scripts/test-patches/LARGE/centos/LARGE.spec b/patch-scripts/test-patches/LARGE/centos/LARGE.spec new file mode 100644 index 00000000..d4b76cc3 --- /dev/null +++ b/patch-scripts/test-patches/LARGE/centos/LARGE.spec @@ -0,0 +1,21 @@ +Name: LARGE +Summary: TIS Reboot-Required Patch RPM Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River + +%description +%{summary} + +%files + +%post +touch /var/run/node_is_patched_rr +exit 0 + +%preun +touch /var/run/node_is_patched_rr +exit 0 + diff --git a/patch-scripts/test-patches/LARGE/centos/build_srpm.data b/patch-scripts/test-patches/LARGE/centos/build_srpm.data new file mode 100644 index 00000000..b12438cc --- /dev/null +++ b/patch-scripts/test-patches/LARGE/centos/build_srpm.data @@ -0,0 +1,2 @@ +ALLOW_EMPTY_RPM=true +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/RR_ALLNODES/centos/RR_ALLNODES.spec b/patch-scripts/test-patches/RR_ALLNODES/centos/RR_ALLNODES.spec new file mode 100644 index 00000000..f9bfd6be --- /dev/null +++ b/patch-scripts/test-patches/RR_ALLNODES/centos/RR_ALLNODES.spec @@ -0,0 +1,21 @@ +Name: RR_ALLNODES +Summary: TIS Reboot-Required Patch RPM Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River + +%description +%{summary} + +%files + +%post +touch /var/run/node_is_patched_rr +exit 0 + +%preun +touch /var/run/node_is_patched_rr +exit 0 + diff --git a/patch-scripts/test-patches/RR_ALLNODES/centos/build_srpm.data b/patch-scripts/test-patches/RR_ALLNODES/centos/build_srpm.data new file mode 100644 index 00000000..b12438cc --- /dev/null +++ b/patch-scripts/test-patches/RR_ALLNODES/centos/build_srpm.data @@ -0,0 +1,2 @@ +ALLOW_EMPTY_RPM=true +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/RR_COMPUTE/centos/RR_COMPUTE.spec b/patch-scripts/test-patches/RR_COMPUTE/centos/RR_COMPUTE.spec new file mode 100644 index 00000000..2601c7c3 --- /dev/null +++ b/patch-scripts/test-patches/RR_COMPUTE/centos/RR_COMPUTE.spec @@ -0,0 +1,21 @@ +Name: RR_COMPUTE +Summary: TIS Reboot-Required Patch RPM Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River + +%description +%{summary} + +%files + +%post +touch /var/run/node_is_patched_rr +exit 0 + +%preun +touch /var/run/node_is_patched_rr +exit 0 + diff --git a/patch-scripts/test-patches/RR_COMPUTE/centos/build_srpm.data b/patch-scripts/test-patches/RR_COMPUTE/centos/build_srpm.data new file mode 100644 index 00000000..b12438cc --- /dev/null +++ b/patch-scripts/test-patches/RR_COMPUTE/centos/build_srpm.data @@ -0,0 +1,2 @@ +ALLOW_EMPTY_RPM=true +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/RR_CONTROLLER/centos/RR_CONTROLLER.spec b/patch-scripts/test-patches/RR_CONTROLLER/centos/RR_CONTROLLER.spec new file mode 100644 index 00000000..ae9d74ad --- /dev/null +++ b/patch-scripts/test-patches/RR_CONTROLLER/centos/RR_CONTROLLER.spec @@ -0,0 +1,21 @@ +Name: RR_CONTROLLER +Summary: TIS Reboot-Required Patch RPM Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River + +%description +%{summary} + +%files + +%post +touch /var/run/node_is_patched_rr +exit 0 + +%preun +touch /var/run/node_is_patched_rr +exit 0 + diff --git a/patch-scripts/test-patches/RR_CONTROLLER/centos/build_srpm.data b/patch-scripts/test-patches/RR_CONTROLLER/centos/build_srpm.data new file mode 100644 index 00000000..b12438cc --- /dev/null +++ b/patch-scripts/test-patches/RR_CONTROLLER/centos/build_srpm.data @@ -0,0 +1,2 @@ +ALLOW_EMPTY_RPM=true +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/RR_NOVA/centos/RR_NOVA.spec b/patch-scripts/test-patches/RR_NOVA/centos/RR_NOVA.spec new file mode 100644 index 00000000..7493fd72 --- /dev/null +++ b/patch-scripts/test-patches/RR_NOVA/centos/RR_NOVA.spec @@ -0,0 +1,21 @@ +Name: RR_NOVA +Summary: Reboot-Required Nova Patch +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River + +%description +%{summary} + +%files + +%post +touch /var/run/node_is_patched_rr +exit 0 + +%preun +touch /var/run/node_is_patched_rr +exit 0 + diff --git a/patch-scripts/test-patches/RR_NOVA/centos/build_srpm.data b/patch-scripts/test-patches/RR_NOVA/centos/build_srpm.data new file mode 100644 index 00000000..b12438cc --- /dev/null +++ b/patch-scripts/test-patches/RR_NOVA/centos/build_srpm.data @@ -0,0 +1,2 @@ +ALLOW_EMPTY_RPM=true +TIS_PATCH_VER=0 diff --git a/patch-scripts/test-patches/RR_STORAGE/centos/RR_STORAGE.spec b/patch-scripts/test-patches/RR_STORAGE/centos/RR_STORAGE.spec new file mode 100644 index 00000000..39d7bad1 --- /dev/null +++ b/patch-scripts/test-patches/RR_STORAGE/centos/RR_STORAGE.spec @@ -0,0 +1,21 @@ +Name: RR_STORAGE +Summary: TIS Reboot-Required Patch RPM Example +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River + +%description +%{summary} + +%files + +%post +touch /var/run/node_is_patched_rr +exit 0 + +%preun +touch /var/run/node_is_patched_rr +exit 0 + diff --git a/patch-scripts/test-patches/RR_STORAGE/centos/build_srpm.data b/patch-scripts/test-patches/RR_STORAGE/centos/build_srpm.data new file mode 100644 index 00000000..b12438cc --- /dev/null +++ b/patch-scripts/test-patches/RR_STORAGE/centos/build_srpm.data @@ -0,0 +1,2 @@ +ALLOW_EMPTY_RPM=true +TIS_PATCH_VER=0 diff --git a/requests-toolbelt/PKG-INFO b/requests-toolbelt/PKG-INFO new file mode 100644 index 00000000..9160a043 --- /dev/null +++ b/requests-toolbelt/PKG-INFO @@ -0,0 +1,12 @@ +Metadata-Version: 1.1 +Name: requests-toolbelt +Version: 0.5.1 +Summary: A utility belt for advanced users of python-requests +Home-page: https://toolbelt.readthedocs.org/ +Author: Ian Cordasco, Cory Benfield +License: Apache-2.0 + +Description: A utility belt for advanced users of python-requests + + +Platform: UNKNOWN diff --git a/requests-toolbelt/centos/build_srpm.data b/requests-toolbelt/centos/build_srpm.data new file mode 100644 index 00000000..00dfd30d --- /dev/null +++ b/requests-toolbelt/centos/build_srpm.data @@ -0,0 +1,7 @@ +VERSION=0.5.1 +TAR_NAME=requests-toolbelt +TAR="$TAR_NAME-$VERSION.tar.gz" +# SRC_DIR="$TAR_NAME-$VERSION" +COPY_LIST="${CGCS_BASE}/downloads/$TAR" + +TIS_PATCH_VER=1 diff --git a/requests-toolbelt/centos/requests-toolbelt.spec b/requests-toolbelt/centos/requests-toolbelt.spec new file mode 100644 index 00000000..bcfd895a --- /dev/null +++ b/requests-toolbelt/centos/requests-toolbelt.spec @@ -0,0 +1,43 @@ +Summary: A utility belt for advanced users of python-requests +Name: requests-toolbelt +Version: 0.5.1 +Release: 0%{?_tis_dist}.%{tis_patch_ver} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: https://toolbelt.readthedocs.org/ +Source0: %{name}-%{version}.tar.gz + +%define debug_package %{nil} + +BuildRequires: python-setuptools +Requires: python-devel +Requires: /bin/bash + +%description +A utility belt for advanced users of python-requests + +%define pythonroot /usr/lib64/python2.7/site-packages + +%prep +%setup + +%build +%{__python} setup.py build + +%install +%{__python} setup.py install --root=$RPM_BUILD_ROOT \ + --install-lib=%{pythonroot} \ + --prefix=/usr \ + --install-data=/usr/share \ + --single-version-externally-managed + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root,-) +%doc LICENSE +%{pythonroot}/requests_toolbelt +%{pythonroot}/requests_toolbelt-*.egg-info + diff --git a/smart-helper/LICENSE b/smart-helper/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/smart-helper/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/smart-helper/files/etc.rpm.platform b/smart-helper/files/etc.rpm.platform new file mode 100644 index 00000000..5a6282c9 --- /dev/null +++ b/smart-helper/files/etc.rpm.platform @@ -0,0 +1,11 @@ +x86_64-wrs-linux +intel_x86_64-.*-linux +x86_64-.*-linux +noarch-.*-linux.* +any-.*-linux.* +all-.*-linux.* +lib32_intel_x86_64-.*-linux +lib32_x86-.*-linux +noarch-.*-linux.* +any-.*-linux.* +all-.*-linux.* diff --git a/smart-helper/files/etc.rpm.sysinfo.Dirnames b/smart-helper/files/etc.rpm.sysinfo.Dirnames new file mode 100644 index 00000000..b498fd49 --- /dev/null +++ b/smart-helper/files/etc.rpm.sysinfo.Dirnames @@ -0,0 +1 @@ +/ diff --git a/tsconfig/.gitignore b/tsconfig/.gitignore new file mode 100644 index 00000000..352964d5 --- /dev/null +++ b/tsconfig/.gitignore @@ -0,0 +1,6 @@ +!.distro +.distro/centos7/rpmbuild/RPMS +.distro/centos7/rpmbuild/SRPMS +.distro/centos7/rpmbuild/BUILD +.distro/centos7/rpmbuild/BUILDROOT +.distro/centos7/rpmbuild/SOURCES/tsconfig*tar.gz diff --git a/tsconfig/LICENSE b/tsconfig/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/tsconfig/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tsconfig/PKG-INFO b/tsconfig/PKG-INFO new file mode 100644 index 00000000..4332a941 --- /dev/null +++ b/tsconfig/PKG-INFO @@ -0,0 +1,13 @@ +Metadata-Version: 1.1 +Name: tsconfig +Version: 1.0 +Summary: Titanium Cloud Config Info +Home-page: +Author: Windriver +Author-email: info@windriver.com +License: Apache-2.0 + +Description: Titanium Cloud Config Info + + +Platform: UNKNOWN diff --git a/tsconfig/centos/build_srpm.data b/tsconfig/centos/build_srpm.data new file mode 100644 index 00000000..9a176b72 --- /dev/null +++ b/tsconfig/centos/build_srpm.data @@ -0,0 +1,3 @@ +SRC_DIR="tsconfig" +COPY_LIST_TO_TAR="scripts" +TIS_PATCH_VER=8 diff --git a/tsconfig/centos/tsconfig.spec b/tsconfig/centos/tsconfig.spec new file mode 100644 index 00000000..a3801e79 --- /dev/null +++ b/tsconfig/centos/tsconfig.spec @@ -0,0 +1,48 @@ +Summary: Titanium Cloud Config Info +Name: tsconfig +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown +Source0: %{name}-%{version}.tar.gz + +%define debug_package %{nil} + +BuildRequires: python-setuptools + +%description +Titanium Cloud Config Info + +%define local_dir /usr/ +%define local_bindir %{local_dir}/bin/ +%define pythonroot /usr/lib64/python2.7/site-packages + +%prep +%setup + +%build +%{__python} setup.py build + +%install +%{__python} setup.py install --root=$RPM_BUILD_ROOT \ + --install-lib=%{pythonroot} \ + --prefix=/usr \ + --install-data=/usr/share \ + --single-version-externally-managed + +install -d -m 755 %{buildroot}%{local_bindir} +install -p -D -m 700 scripts/tsconfig %{buildroot}%{local_bindir}/tsconfig + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root,-) +%doc LICENSE +%{local_bindir}/* +%dir %{pythonroot}/%{name} +%{pythonroot}/%{name}/* +%dir %{pythonroot}/%{name}-%{version}.0-py2.7.egg-info +%{pythonroot}/%{name}-%{version}.0-py2.7.egg-info/* diff --git a/tsconfig/scripts/tsconfig b/tsconfig/scripts/tsconfig new file mode 100644 index 00000000..c19d2167 --- /dev/null +++ b/tsconfig/scripts/tsconfig @@ -0,0 +1,48 @@ +. /etc/build.info + +# +# Keep the following path and flag declarations in sync with tsconfig.py +# + +# Platform configuration paths and files + +PLATFORM_CONF_PATH=/etc/platform +PLATFORM_CONF_FILE=${PLATFORM_CONF_PATH}/platform.conf +PLATFORM_SIMPLEX_FLAG=${PLATFORM_CONF_PATH}/simplex + +VOLATILE_PATH=/var/run +PLATFORM_PATH=/opt/platform +CONFIG_PATH=${PLATFORM_PATH}/config/${SW_VERSION} +# TODO(mpeters) remove the PACKSTACK_PATH +PACKSTACK_PATH=${PLATFORM_PATH}/packstack/${SW_VERSION} +PUPPET_PATH=${PLATFORM_PATH}/puppet/${SW_VERSION} +CGCS_PATH=/opt/cgcs + +# Compute configuration flags + +# Set after the first application of compute manifests +INITIAL_COMPUTE_CONFIG_COMPLETE=${PLATFORM_CONF_PATH}/.initial_compute_config_complete +# Set after each application of compute manifests +VOLATILE_COMPUTE_CONFIG_COMPLETE=${VOLATILE_PATH}/.compute_config_complete +# Set to prevent starting compute services (used in combined node upgrade) +VOLATILE_DISABLE_COMPUTE_SERVICES=${VOLATILE_PATH}/.disable_compute_services + +# Upgrade flags + +# Set on controller-0 to force controller-1 to do an upgrade after install. +CONTROLLER_UPGRADE_FLAG=${PLATFORM_CONF_PATH}/.upgrade_controller_1 +# Set on controller-0 (by controller-1) to indicate a completed upgrade. +CONTROLLER_UPGRADE_COMPLETE_FLAG=${PLATFORM_CONF_PATH}/.upgrade_controller_1_complete +# Set on controller-0 (by controller-1) to indicate a failed upgrade data migration +CONTROLLER_UPGRADE_FAIL_FLAG=${PLATFORM_CONF_PATH}/.upgrade_controller_1_fail +# Set on controller-1 to indicate we are rolling back the upgrade +UPGRADE_ROLLBACK_FLAG=${PLATFORM_CONF_PATH}/.upgrade_rollback +# Set on controller-1 to indicate we are aborting the upgrade +UPGRADE_ABORT_FILE=.upgrade_abort +UPGRADE_ABORT_FLAG=${CONFIG_PATH}/${UPGRADE_ABORT_FILE} +# Set on controller-0 (by controller-1) to indicate that data migration has started +CONTROLLER_UPGRADE_STARTED_FLAG=${PLATFORM_CONF_PATH}/.upgrade_controller_1_started + +# Backup / Restore flags +BACKUP_IN_PROGRESS_FLAG=${PLATFORM_CONF_PATH}/.backup_in_progress +RESTORE_IN_PROGRESS_FLAG=${PLATFORM_CONF_PATH}/.restore_in_progress diff --git a/tsconfig/tsconfig/LICENSE b/tsconfig/tsconfig/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/tsconfig/tsconfig/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tsconfig/tsconfig/setup.py b/tsconfig/tsconfig/setup.py new file mode 100644 index 00000000..fdf11975 --- /dev/null +++ b/tsconfig/tsconfig/setup.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python + +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import setuptools + +setuptools.setup(name='tsconfig', + version='1.0.0', + description='tsconfig', + license='Apache-2.0', + packages=['tsconfig'], + entry_points={ + } +) diff --git a/tsconfig/tsconfig/tsconfig/__init__.py b/tsconfig/tsconfig/tsconfig/__init__.py new file mode 100644 index 00000000..0da84c8c --- /dev/null +++ b/tsconfig/tsconfig/tsconfig/__init__.py @@ -0,0 +1,6 @@ +""" +Copyright (c) 2014 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" diff --git a/tsconfig/tsconfig/tsconfig/tsconfig.py b/tsconfig/tsconfig/tsconfig/tsconfig.py new file mode 100644 index 00000000..7a68eeba --- /dev/null +++ b/tsconfig/tsconfig/tsconfig/tsconfig.py @@ -0,0 +1,208 @@ +""" +Copyright (c) 2014-2016 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 + +""" + +import os +import ConfigParser +import StringIO +import logging + +SW_VERSION = "" +SW_VERSION_1610 = '16.10' +SW_VERSION_1706 = '17.06' + +nodetype = None +subfunctions = [] +region_config = "no" +region_1_name = None +region_2_name = None +vswitch_type = None +management_interface = None +oam_interface = None +infrastructure_interface = None +sdn_enabled = "no" +host_uuid = None +install_uuid = None +system_type = None +system_mode = None +security_profile = None +distributed_cloud_role = None + +PLATFORM_CONF_PATH = '/etc/platform' +PLATFORM_CONF_FILE = os.path.join(PLATFORM_CONF_PATH, 'platform.conf') +PLATFORM_SIMPLEX_FLAG = os.path.join(PLATFORM_CONF_PATH, 'simplex') + +PUPPET_CONF_PATH = '/etc/puppet' + + +def _load(): + global SW_VERSION, nodetype, subfunctions + # Read the build.info file + build_info='/etc/build.info' + + if not os.path.isfile(build_info): + # Assume that we are in a test environment. Dirty, dirty, dirty... + SW_VERSION = '18.03' + nodetype = 'controller' + subfunctions = ['controller'] + return + + # The build.info file has no section headers, which causes problems + # for ConfigParser. So we'll fake it out. + ini_str = '[build_info]\n' + open(build_info, 'r').read() + ini_fp = StringIO.StringIO(ini_str) + + config = ConfigParser.SafeConfigParser() + config.readfp(ini_fp) + + try: + value = config.get('build_info', 'SW_VERSION') + + SW_VERSION = value.strip('"') + except ConfigParser.Error: + logging.exception("Failed to read SW_VERSION from /etc/build.info") + return False + + # Read the platform.conf file + + # The platform.conf file has no section headers, which causes problems + # for ConfigParser. So we'll fake it out. + ini_str = '[platform_conf]\n' + open(PLATFORM_CONF_FILE, 'r').read() + ini_fp = StringIO.StringIO(ini_str) + config.readfp(ini_fp) + + try: + value = config.get('platform_conf', 'nodetype') + + nodetype = value + + value = config.get('platform_conf', 'subfunction') + + subfunctions = value.split(",") + + global region_config + if config.has_option('platform_conf', 'region_config'): + region_config = config.get('platform_conf', 'region_config') + + global region_1_name + if config.has_option('platform_conf', 'region_1_name'): + region_1_name = config.get('platform_conf', 'region_1_name') + + global region_2_name + if config.has_option('platform_conf', 'region_2_name'): + region_2_name = config.get('platform_conf', 'region_2_name') + + global vswitch_type + if config.has_option('platform_conf', 'vswitch_type'): + vswitch_type = config.get('platform_conf', 'vswitch_type') + + global management_interface + if config.has_option('platform_conf', 'management_interface'): + management_interface = config.get('platform_conf', + 'management_interface') + + global oam_interface + if config.has_option('platform_conf', 'oam_interface'): + oam_interface = config.get('platform_conf', 'oam_interface') + + global infrastructure_interface + if config.has_option('platform_conf', 'infrastructure_interface'): + infrastructure_interface = config.get('platform_conf', + 'infrastructure_interface') + global sdn_enabled + if config.has_option('platform_conf', 'sdn_enabled'): + sdn_enabled = config.get('platform_conf', 'sdn_enabled') + + global host_uuid + if config.has_option('platform_conf', 'UUID'): + host_uuid = config.get('platform_conf', 'UUID') + + global install_uuid + if config.has_option('platform_conf', 'INSTALL_UUID'): + install_uuid = config.get('platform_conf', 'INSTALL_UUID') + + global system_type + if config.has_option('platform_conf', 'system_type'): + system_type = config.get('platform_conf', 'system_type') + + global system_mode + if config.has_option('platform_conf', 'system_mode'): + system_mode = config.get('platform_conf', 'system_mode') + + global security_profile + if config.has_option('platform_conf', 'security_profile'): + security_profile = config.get('platform_conf', 'security_profile') + + global distributed_cloud_role + if config.has_option('platform_conf', 'distributed_cloud_role'): + distributed_cloud_role = config.get('platform_conf', 'distributed_cloud_role') + + except ConfigParser.Error: + logging.exception("Failed to read platform.conf") + return False + + +_load() + +''' Keep the following path and flag declarations in sync with the tsconfig + bash script. +''' + +# Platform configuration paths and files + +VOLATILE_PATH = "/var/run" +PLATFORM_PATH = "/opt/platform" +CONFIG_PATH = PLATFORM_PATH + "/config/" + SW_VERSION + "/" +# TODO(mpeters) remove the PACKSTACK_PATH +PACKSTACK_PATH = PLATFORM_PATH + "/packstack/" + SW_VERSION + "/" +PUPPET_PATH = PLATFORM_PATH + "/puppet/" + SW_VERSION + "/" +CGCS_PATH = "/opt/cgcs" +KEYRING_PATH = PLATFORM_PATH + "/.keyring/" + SW_VERSION + +# Compute configuration flags + +# Set after initial application of node manifest +INITIAL_CONFIG_COMPLETE_FLAG = os.path.join( + PLATFORM_CONF_PATH, ".initial_config_complete") +# Set after the first application of compute manifests +INITIAL_COMPUTE_CONFIG_COMPLETE = os.path.join( + PLATFORM_CONF_PATH, ".initial_compute_config_complete") +# Set after each application of compute manifests +VOLATILE_COMPUTE_CONFIG_COMPLETE = os.path.join( + VOLATILE_PATH, ".compute_config_complete") +# Set to prevent starting compute services (used in combined node upgrade) +VOLATILE_DISABLE_COMPUTE_SERVICES = os.path.join( + VOLATILE_PATH, ".disable_compute_services") + +# Upgrade flags + +# Set on controller-0 to force controller-1 to do an upgrade after install. +CONTROLLER_UPGRADE_FLAG = os.path.join( + PLATFORM_CONF_PATH, '.upgrade_controller_1') +# Set on controller-0 (by controller-1) to indicate a completed upgrade. +CONTROLLER_UPGRADE_COMPLETE_FLAG = os.path.join( + PLATFORM_CONF_PATH, '.upgrade_controller_1_complete') +# Set on controller-0 (by controller-1) to indicate a failed upgrade. +CONTROLLER_UPGRADE_FAIL_FLAG = os.path.join( + PLATFORM_CONF_PATH, '.upgrade_controller_1_fail') +# Set on controller-1 to indicate we are rolling back the upgrade +UPGRADE_ROLLBACK_FLAG = os.path.join( + PLATFORM_CONF_PATH, '.upgrade_rollback') +# Set on controller-1 to indicate we are aborting the upgrade +UPGRADE_ABORT_FILE = '.upgrade_abort' +UPGRADE_ABORT_FLAG = os.path.join( + CONFIG_PATH, UPGRADE_ABORT_FILE) + +# Set on controller-0 (by controller-1) to indicate that data migration has +# started +CONTROLLER_UPGRADE_STARTED_FLAG = os.path.join( + PLATFORM_CONF_PATH, '.upgrade_controller_1_started') + +# Backup / Restore flags +BACKUP_IN_PROGRESS_FLAG = os.path.join( + PLATFORM_CONF_PATH, '.backup_in_progress') +RESTORE_IN_PROGRESS_FLAG = os.path.join( + PLATFORM_CONF_PATH, '.restore_in_progress')