From 02fca75f5cd6fda48c04131022b862bfd98ef8b2 Mon Sep 17 00:00:00 2001 From: Reynaldo P Gomes Date: Fri, 30 Jun 2023 11:33:51 -0300 Subject: [PATCH] Kubernetes Power Manager as StarlingX application This commit is responsible for create the StarlingX application for Kubernetes Power Manager. TEST PLAN: PASS: AIO-SX, STD: check if the application can be successfully installed. PASS: ALL: check if label "app.starlingx.io/component" was applied in "intel-power" namespace on install. PASS: AIO-SX, STD: check if the application can be successfully uninstalled. PASS: ALL: check if installed CRDs are removed on uninstall. PASS: ALL: check if daemonsets (agents) started by the controller are removed on uninstall. PASS: ALL: check if the namespace was deleted on uninstall. Author: Thiago Antonio Miranda Co-author: Reynaldo P Gomes Co-author: Eduardo Alberti Depends-on: https://review.opendev.org/c/starlingx/config/+/887958 Story: 2010773 Task: 48320 Change-Id: I340b4960d429ee38ea86a2e919a95484e5bef70a Signed-off-by: Reynaldo P Gomes --- .zuul.yaml | 72 +++- bindep.txt | 10 + debian_build_layer.cfg | 1 + debian_iso_image.inc | 1 + debian_pkg_dirs | 2 + debian_stable_docker_images.inc | 2 + github_sync.trigger | 3 + .../debian/build.sh | 54 +++ .../power-node-agent.stable_docker_image | 9 + .../debian/power-operator.stable_docker_image | 9 + ...1-Enable-the-usage-of-Device-Plugins.patch | 323 +++++++++++++++++ ...and-container-deletion-from-workload.patch | 223 ++++++++++++ .../debian/deb_folder/changelog | 5 + .../debian/deb_folder/control | 26 ++ .../debian/deb_folder/copyright | 41 +++ ...pp-kubernetes-power-manager-wheels.install | 1 + ...n3-k8sapp-kubernetes-power-manager.install | 1 + .../debian/deb_folder/rules | 33 ++ .../debian/deb_folder/source/format | 2 + .../debian/meta_data.yaml | 9 + .../.gitignore | 35 ++ .../.stestr.conf | 4 + .../k8sapp_kubernetes_power_manager/LICENSE | 202 +++++++++++ .../README.rst | 5 + .../__init__.py | 0 .../common/__init__.py | 0 .../common/constants.py | 47 +++ .../common/utils.py | 39 ++ .../helm/__init__.py | 0 .../helm/kubernetes_power_manager.py | 210 +++++++++++ .../lifecycle/__init__.py | 0 .../lifecycle_kubernetes_power_manager.py | 240 +++++++++++++ .../tests/__init__.py | 0 .../tests/test_kubernetes_power_manager.py | 30 ++ .../tests/test_plugins.py | 37 ++ .../k8sapp_kubernetes_power_manager/pylint.rc | 337 ++++++++++++++++++ .../requirements.txt | 2 + .../k8sapp_kubernetes_power_manager/setup.cfg | 36 ++ .../k8sapp_kubernetes_power_manager/setup.py | 12 + .../test-requirements.txt | 21 ++ .../k8sapp_kubernetes_power_manager/tox.ini | 103 ++++++ .../upper-constraints.txt | 1 + .../debian/deb_folder/changelog | 5 + .../debian/deb_folder/control | 17 + .../debian/deb_folder/copyright | 41 +++ .../debian/deb_folder/rules | 48 +++ .../debian/deb_folder/source/format | 1 + .../stx-kubernetes-power-manager-helm.install | 1 + .../debian/meta_data.yaml | 13 + .../stx-kubernetes-power-manager-helm/README | 14 + .../files/metadata.yaml | 23 ++ .../fluxcd-manifests/base/helmrepository.yaml | 13 + .../fluxcd-manifests/base/kustomization.yaml | 10 + .../kubernetes-power-manager/helmrelease.yaml | 42 +++ ...rnetes-power-manager-static-overrides.yaml | 5 + ...rnetes-power-manager-system-overrides.yaml | 5 + .../kustomization.yaml | 21 ++ .../fluxcd-manifests/kustomization.yaml | 13 + .../helm-charts/Makefile | 43 +++ .../kubernetes-power-manager/Chart.yaml | 11 + .../kubernetes-power-manager/README.md | 39 ++ .../crds/cstates.yaml | 67 ++++ .../crds/powerconfigs.yaml | 73 ++++ .../crds/powernodes.yaml | 157 ++++++++ .../crds/powerpods.yaml | 50 +++ .../crds/powerprofiles.yaml | 77 ++++ .../crds/powerworkloads.yaml | 116 ++++++ .../crds/timeofdaycronjobs.yaml | 220 ++++++++++++ .../crds/timeofdays.yaml | 160 +++++++++ .../crds/uncores.yaml | 71 ++++ .../templates/agent-ds-configmap.yaml | 74 ++++ .../templates/cstate.yaml | 11 + .../templates/manager.yaml | 90 +++++ .../templates/powerconfig.yaml | 21 ++ .../templates/powerworkload.yaml | 19 + .../templates/rbac.yaml | 266 ++++++++++++++ .../templates/sharedprofile.yaml | 26 ++ .../kubernetes-power-manager/values.yaml | 26 ++ tox.ini | 46 +-- 79 files changed, 4101 insertions(+), 22 deletions(-) create mode 100644 bindep.txt create mode 100644 debian_build_layer.cfg create mode 100644 debian_iso_image.inc create mode 100644 debian_pkg_dirs create mode 100644 debian_stable_docker_images.inc create mode 100644 github_sync.trigger create mode 100644 kubernetes-power-manager-images/debian/build.sh create mode 100644 kubernetes-power-manager-images/debian/power-node-agent.stable_docker_image create mode 100644 kubernetes-power-manager-images/debian/power-operator.stable_docker_image create mode 100644 kubernetes-power-manager-images/files/0001-Enable-the-usage-of-Device-Plugins.patch create mode 100644 kubernetes-power-manager-images/files/0002-Corrects-cpu-accounting-and-container-deletion-from-workload.patch create mode 100644 python3-k8sapp-kubernetes-power-manager/debian/deb_folder/changelog create mode 100644 python3-k8sapp-kubernetes-power-manager/debian/deb_folder/control create mode 100644 python3-k8sapp-kubernetes-power-manager/debian/deb_folder/copyright create mode 100644 python3-k8sapp-kubernetes-power-manager/debian/deb_folder/python3-k8sapp-kubernetes-power-manager-wheels.install create mode 100644 python3-k8sapp-kubernetes-power-manager/debian/deb_folder/python3-k8sapp-kubernetes-power-manager.install create mode 100644 python3-k8sapp-kubernetes-power-manager/debian/deb_folder/rules create mode 100644 python3-k8sapp-kubernetes-power-manager/debian/deb_folder/source/format create mode 100644 python3-k8sapp-kubernetes-power-manager/debian/meta_data.yaml create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/.gitignore create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/.stestr.conf create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/LICENSE create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/README.rst create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/__init__.py create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/common/__init__.py create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/common/constants.py create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/common/utils.py create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/helm/__init__.py create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/helm/kubernetes_power_manager.py create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/lifecycle/__init__.py create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/lifecycle/lifecycle_kubernetes_power_manager.py create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/tests/__init__.py create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/tests/test_kubernetes_power_manager.py create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/tests/test_plugins.py create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/pylint.rc create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/requirements.txt create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/setup.cfg create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/setup.py create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/test-requirements.txt create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/tox.ini create mode 100644 python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/upper-constraints.txt create mode 100644 stx-kubernetes-power-manager-helm/debian/deb_folder/changelog create mode 100644 stx-kubernetes-power-manager-helm/debian/deb_folder/control create mode 100644 stx-kubernetes-power-manager-helm/debian/deb_folder/copyright create mode 100644 stx-kubernetes-power-manager-helm/debian/deb_folder/rules create mode 100644 stx-kubernetes-power-manager-helm/debian/deb_folder/source/format create mode 100644 stx-kubernetes-power-manager-helm/debian/deb_folder/stx-kubernetes-power-manager-helm.install create mode 100644 stx-kubernetes-power-manager-helm/debian/meta_data.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/README create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/files/metadata.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/base/helmrepository.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/base/kustomization.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/helmrelease.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/kubernetes-power-manager-static-overrides.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/kubernetes-power-manager-system-overrides.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/kustomization.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kustomization.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/Makefile create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/Chart.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/README.md create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/cstates.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerconfigs.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powernodes.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerpods.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerprofiles.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerworkloads.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/timeofdaycronjobs.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/timeofdays.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/uncores.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/agent-ds-configmap.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/cstate.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/manager.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/powerconfig.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/powerworkload.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/rbac.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/sharedprofile.yaml create mode 100644 stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/values.yaml diff --git a/.zuul.yaml b/.zuul.yaml index 5235c0f..848d788 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,17 +1,85 @@ --- - project: - vars: - ensure_tox_version: '<4' check: jobs: - openstack-tox-linters + - k8sapp-kubernetes-power-manager-tox-py39 + - k8sapp-kubernetes-power-manager-tox-flake8 + - k8sapp-kubernetes-power-manager-tox-pylint + - k8sapp-kubernetes-power-manager-tox-bandit gate: jobs: - openstack-tox-linters + - k8sapp-kubernetes-power-manager-tox-py39 + - k8sapp-kubernetes-power-manager-tox-flake8 + - k8sapp-kubernetes-power-manager-tox-pylint + - k8sapp-kubernetes-power-manager-tox-bandit post: jobs: - stx-app-kubernetes-power-manager-upload-git-mirror +- job: + name: k8sapp-kubernetes-power-manager-tox-py39 + parent: tox-py39 + description: | + Run py39 for kubernetes-power-manager + nodeset: debian-bullseye + required-projects: + - starlingx/config + - starlingx/fault + - starlingx/update + - starlingx/utilities + - starlingx/root + files: + - python3-k8sapp-kubernetes-power-manager/* + vars: + tox_envlist: py39 + tox_extra_args: -c python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/tox.ini + tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/starlingx/root/build-tools/requirements/debian/upper-constraints.txt' + +- job: + name: k8sapp-kubernetes-power-manager-tox-flake8 + parent: tox + description: | + Run flake8 for kubernetes-power-manager + nodeset: debian-bullseye + files: + - python3-k8sapp-kubernetes-power-manager/* + vars: + tox_envlist: flake8 + tox_extra_args: -c python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/tox.ini + +- job: + name: k8sapp-kubernetes-power-manager-tox-pylint + parent: tox + description: | + Run pylint test for kubernetes-power-manager + nodeset: debian-bullseye + required-projects: + - starlingx/config + - starlingx/fault + - starlingx/update + - starlingx/utilities + - starlingx/root + files: + - python3-k8sapp-kubernetes-power-manager/* + vars: + tox_envlist: pylint + tox_extra_args: -c python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/tox.ini + tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/starlingx/root/build-tools/requirements/debian/upper-constraints.txt' + +- job: + name: k8sapp-kubernetes-power-manager-tox-bandit + parent: tox + description: | + Run bandit test for kubernetes-power-manager + nodeset: debian-bullseye + files: + - python3-k8sapp-kubernetes-power-manager/* + vars: + tox_envlist: bandit + tox_extra_args: -c python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/tox.ini + - job: name: stx-app-kubernetes-power-manager-upload-git-mirror parent: upload-git-mirror diff --git a/bindep.txt b/bindep.txt new file mode 100644 index 0000000..3ffe69f --- /dev/null +++ b/bindep.txt @@ -0,0 +1,10 @@ +# This is a cross-platform list tracking distribution packages needed for install and tests; +# see https://docs.openstack.org/infra/bindep/ for additional information. + +libffi-dev [platform:dpkg] +libldap2-dev [platform:dpkg] +libxml2-dev [platform:dpkg] +libxslt1-dev [platform:dpkg] +libsasl2-dev [platform:dpkg] +libffi-devel [platform:rpm] +python3-all-dev [platform:dpkg] diff --git a/debian_build_layer.cfg b/debian_build_layer.cfg new file mode 100644 index 0000000..c581999 --- /dev/null +++ b/debian_build_layer.cfg @@ -0,0 +1 @@ +flock diff --git a/debian_iso_image.inc b/debian_iso_image.inc new file mode 100644 index 0000000..20d29f8 --- /dev/null +++ b/debian_iso_image.inc @@ -0,0 +1 @@ +stx-kubernetes-power-manager-helm diff --git a/debian_pkg_dirs b/debian_pkg_dirs new file mode 100644 index 0000000..f9b8e4c --- /dev/null +++ b/debian_pkg_dirs @@ -0,0 +1,2 @@ +python3-k8sapp-kubernetes-power-manager +stx-kubernetes-power-manager-helm diff --git a/debian_stable_docker_images.inc b/debian_stable_docker_images.inc new file mode 100644 index 0000000..2315f03 --- /dev/null +++ b/debian_stable_docker_images.inc @@ -0,0 +1,2 @@ +kubernetes-power-manager-images + diff --git a/github_sync.trigger b/github_sync.trigger new file mode 100644 index 0000000..e87118a --- /dev/null +++ b/github_sync.trigger @@ -0,0 +1,3 @@ +# to trigger the upload job to sync to GitHub +1 + diff --git a/kubernetes-power-manager-images/debian/build.sh b/kubernetes-power-manager-images/debian/build.sh new file mode 100644 index 0000000..1d83c25 --- /dev/null +++ b/kubernetes-power-manager-images/debian/build.sh @@ -0,0 +1,54 @@ +#!/bin/sh +# +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +IMAGE=$1 +IMAGE_TAG=$2 + +echo "=============== build script ================" +echo image: "${IMAGE}" +echo image_tag: "${IMAGE_TAG}" +pwd + +if [ -z "${IMAGE_TAG}" ]; then + echo "Image tag must be specified. build ${IMAGE} Aborting..." >&2 + exit 1 +fi + +build_power_operator_image() { + export POWER_OPERATOR_IMAGE=$1 + echo "power_operator_image: ${POWER_OPERATOR_IMAGE}" + pwd + docker build -t "${POWER_OPERATOR_IMAGE}" -f build/Dockerfile . + echo "power-operator image build done" + return 0 +} + +build_power_node_agent_image() { + export POWER_NODE_AGENT_IMAGE=$1 + echo "power_node_agent_image: ${POWER_NODE_AGENT_IMAGE}" + pwd + docker build -t "${POWER_NODE_AGENT_IMAGE}" -f build/Dockerfile.nodeagent . + echo "power-node-agent image build done" + return 0 +} + +case ${IMAGE} in + power_operator) + echo "Build image: power-operator" + build_power_operator_image "${IMAGE_TAG}" + ;; + power_node_agent) + echo "build image: power-node-agent" + build_power_node_agent_image "${IMAGE_TAG}" + ;; + *) + echo "Unsupported ARGS in ${0}: ${IMAGE}" >&2 + exit 1 + ;; +esac + +exit 0 diff --git a/kubernetes-power-manager-images/debian/power-node-agent.stable_docker_image b/kubernetes-power-manager-images/debian/power-node-agent.stable_docker_image new file mode 100644 index 0000000..0571b16 --- /dev/null +++ b/kubernetes-power-manager-images/debian/power-node-agent.stable_docker_image @@ -0,0 +1,9 @@ +BUILDER=script +LABEL=power-node-agent +SOURCE_REPO=https://github.com/intel/kubernetes-power-manager.git +SOURCE_REF=v2.3.0 +SOURCE_PATCHES="../files/0001-Enable-the-usage-of-Device-Plugins.patch ../files/0002-Corrects-cpu-accounting-and-container-deletion-from-workload.patch" +COMMAND=bash +SCRIPT=build.sh +ARGS=power_node_agent + diff --git a/kubernetes-power-manager-images/debian/power-operator.stable_docker_image b/kubernetes-power-manager-images/debian/power-operator.stable_docker_image new file mode 100644 index 0000000..3a4e4ef --- /dev/null +++ b/kubernetes-power-manager-images/debian/power-operator.stable_docker_image @@ -0,0 +1,9 @@ +BUILDER=script +LABEL=power-operator +SOURCE_REPO=https://github.com/intel/kubernetes-power-manager.git +SOURCE_REF=v2.3.0 +SOURCE_PATCHES="../files/0001-Enable-the-usage-of-Device-Plugins.patch ../files/0002-Corrects-cpu-accounting-and-container-deletion-from-workload.patch" +COMMAND=bash +SCRIPT=build.sh +ARGS=power_operator + diff --git a/kubernetes-power-manager-images/files/0001-Enable-the-usage-of-Device-Plugins.patch b/kubernetes-power-manager-images/files/0001-Enable-the-usage-of-Device-Plugins.patch new file mode 100644 index 0000000..e1fcc79 --- /dev/null +++ b/kubernetes-power-manager-images/files/0001-Enable-the-usage-of-Device-Plugins.patch @@ -0,0 +1,323 @@ +From e6a18836cadf16b4d2058dbbdd1b97847aff94b8 Mon Sep 17 00:00:00 2001 +From: Eduardo Alberti +Date: Thu, 10 Aug 2023 20:14:04 +0000 +Subject: [PATCH] Enable the usage of Device Plugins + +In certain situations, processing resources are made available +through Device Plugins, either because they are structures to +be isolated, or because they require some special treatment. +In these cases, Intel Power Manager is not able to recognize +device plugins and treat them as CPU resources. + +This modification aims to enable the use of device plugins as +processing resources, whenever this indication is convenient. +To do so, the user must declare, during the application of +PowerConfig, which "customDevices" must be considered during +the admission of the pod. + +This modification also exempts the user from using the "CPU" +directive in resource requests whenever an entry of custom +Device is indicated. + +Pull request: +https://github.com/intel/kubernetes-power-manager/pull/38 + +Signed-off-by: Eduardo Alberti +--- + api/v1/powerconfig_types.go | 3 + + api/v1/powernode_types.go | 3 + + .../bases/power.intel.com_powerconfigs.yaml | 5 ++ + .../crd/bases/power.intel.com_powernodes.yaml | 5 ++ + controllers/powerconfig_controller.go | 18 +++- + controllers/powernode_controller.go | 11 +++ + controllers/powerpod_controller.go | 84 +++++++++++++++---- + 7 files changed, 110 insertions(+), 19 deletions(-) + +diff --git a/api/v1/powerconfig_types.go b/api/v1/powerconfig_types.go +index 905d2d7..c74c3a4 100644 +--- a/api/v1/powerconfig_types.go ++++ b/api/v1/powerconfig_types.go +@@ -33,6 +33,9 @@ type PowerConfigSpec struct { + + // The PowerProfiles that will be created by the Operator + PowerProfiles []string `json:"powerProfiles,omitempty"` ++ ++ // The CustomDevices include alternative devices that represents CPU resources ++ CustomDevices []string `json:"customDevices,omitempty"` + } + + // PowerConfigStatus defines the observed state of PowerConfig +diff --git a/api/v1/powernode_types.go b/api/v1/powernode_types.go +index 7ee0bd6..9d95de8 100644 +--- a/api/v1/powernode_types.go ++++ b/api/v1/powernode_types.go +@@ -42,6 +42,9 @@ type PowerNodeSpec struct { + // Information about the containers in the cluster utilizing some PowerWorkload + PowerContainers []Container `json:"powerContainers,omitempty"` + ++ // The CustomDevices include alternative devices that represents CPU resources ++ CustomDevices []string `json:"customDevices,omitempty"` ++ + // The PowerProfiles in the cluster that are currently being used by Pods + //ActiveProfiles map[string]bool `json:"activeProfiles,omitempty"` + +diff --git a/config/crd/bases/power.intel.com_powerconfigs.yaml b/config/crd/bases/power.intel.com_powerconfigs.yaml +index 0d259af..05db64b 100644 +--- a/config/crd/bases/power.intel.com_powerconfigs.yaml ++++ b/config/crd/bases/power.intel.com_powerconfigs.yaml +@@ -46,6 +46,11 @@ spec: + items: + type: string + type: array ++ customDevices: ++ description: Custom Devices define other CPU Resources to be considered in Pod's spec ++ items: ++ type: string ++ type: array + type: object + status: + description: PowerConfigStatus defines the observed state of PowerConfig +diff --git a/config/crd/bases/power.intel.com_powernodes.yaml b/config/crd/bases/power.intel.com_powernodes.yaml +index 3db1edd..ef6165b 100644 +--- a/config/crd/bases/power.intel.com_powernodes.yaml ++++ b/config/crd/bases/power.intel.com_powernodes.yaml +@@ -77,6 +77,11 @@ spec: + type: string + unaffectedCores: + type: string ++ customDevices: ++ description: Custom Devices define other CPU Resources to be considered in Pod's spec ++ items: ++ type: string ++ type: array + type: object + status: + description: PowerNodeStatus defines the observed state of PowerNode +diff --git a/controllers/powerconfig_controller.go b/controllers/powerconfig_controller.go +index 6adf96e..a354ffe 100644 +--- a/controllers/powerconfig_controller.go ++++ b/controllers/powerconfig_controller.go +@@ -188,6 +188,13 @@ func (r *PowerConfigReconciler) Reconcile(c context.Context, req ctrl.Request) ( + labelledNodeList := &corev1.NodeList{} + listOption := config.Spec.PowerNodeSelector + ++ // Searching for Custom Devices in PowerConfig ++ CustomDevices := config.Spec.CustomDevices ++ if len(CustomDevices) > 0 { ++ logger.V(5).Info("The behaviour of Power Node Agent will be affected by the following devices.", ++ "Custom Devices", CustomDevices) ++ } ++ + logger.V(5).Info("confirming desired nodes match the power node selector") + err = r.Client.List(context.TODO(), labelledNodeList, client.MatchingLabels(listOption)) + if err != nil { +@@ -216,7 +223,8 @@ func (r *PowerConfigReconciler) Reconcile(c context.Context, req ctrl.Request) ( + } + + powerNodeSpec := &powerv1.PowerNodeSpec{ +- NodeName: node.Name, ++ NodeName: node.Name, ++ CustomDevices: CustomDevices, + } + + powerNode.Spec = *powerNodeSpec +@@ -229,9 +237,17 @@ func (r *PowerConfigReconciler) Reconcile(c context.Context, req ctrl.Request) ( + return ctrl.Result{}, err + } + } ++ ++ powerNode.Spec.CustomDevices = CustomDevices ++ err := r.Client.Update(context.TODO(), powerNode) ++ if err != nil { ++ logger.Error(err, "Failed to update PowerNode with custom Devices.") ++ return ctrl.Result{}, err ++ } + } + + config.Status.Nodes = r.State.PowerNodeList ++ config.Spec.CustomDevices = CustomDevices + logger.V(5).Info("configured power node added to the power node list") + err = r.Client.Status().Update(context.TODO(), config) + if err != nil { +diff --git a/controllers/powernode_controller.go b/controllers/powernode_controller.go +index ee8fba9..9c26a11 100644 +--- a/controllers/powernode_controller.go ++++ b/controllers/powernode_controller.go +@@ -78,6 +78,12 @@ func (r *PowerNodeReconciler) Reconcile(c context.Context, req ctrl.Request) (ct + } + return ctrl.Result{RequeueAfter: queuetime}, err + } ++ ++ CustomDevices := powerNode.Spec.CustomDevices ++ if len(CustomDevices) > 0 { ++ logger.V(5).Info("The PowerNode contains the following custom devices.", "Custom Devices", powerNode.Spec.CustomDevices) ++ } ++ + powerProfiles := &powerv1.PowerProfileList{} + logger.V(5).Info("retrieving the power profile list") + err = r.Client.List(context.TODO(), powerProfiles) +@@ -171,6 +177,11 @@ func (r *PowerNodeReconciler) Reconcile(c context.Context, req ctrl.Request) (ct + cores := prettifyCoreList(reservedSystemCpus) + powerNode.Spec.UneffectedCores = cores + } ++ ++ if len(CustomDevices) > 0 { ++ powerNode.Spec.CustomDevices = CustomDevices ++ } ++ + err = r.Client.Update(context.TODO(), powerNode) + if err != nil { + return ctrl.Result{RequeueAfter: queuetime}, err +diff --git a/controllers/powerpod_controller.go b/controllers/powerpod_controller.go +index 64334de..c348a42 100644 +--- a/controllers/powerpod_controller.go ++++ b/controllers/powerpod_controller.go +@@ -151,11 +151,26 @@ func (r *PowerPodReconciler) Reconcile(c context.Context, req ctrl.Request) (ctr + return ctrl.Result{}, podNotRunningErr + } + ++ // Get customDevices that need to be considered in the pod ++ logger.V(5).Info("Retrivieng custom resources from PowerNode") ++ powernode := &powerv1.PowerNode{} ++ err = r.Get(context.TODO(), client.ObjectKey{ ++ Namespace: IntelPowerNamespace, ++ Name: nodeName, ++ }, powernode) ++ if err != nil { ++ if errors.IsNotFound(err) { ++ return ctrl.Result{}, nil ++ } ++ logger.Error(err, "error while trying to retrieve the PowerNode") ++ return ctrl.Result{}, err ++ } ++ + // Get the containers of the pod that are requesting exclusive CPUs +- logger.V(5).Info("retrieving the containers requested for the exclusive CPUs") +- containersRequestingExclusiveCPUs := getContainersRequestingExclusiveCPUs(pod, &logger) +- if len(containersRequestingExclusiveCPUs) == 0 { +- logger.Info("no containers are requesting exclusive CPUs") ++ logger.V(5).Info("Retrieving the containers requested for the exclusive CPUs or this/these Custom Devices", "Custom Devices", powernode.Spec.CustomDevices) ++ admissibleContainers := getAdmissibleContainers(pod, &logger, powernode.Spec.CustomDevices) ++ if len(admissibleContainers) == 0 { ++ logger.Info("No containers are requesting exclusive CPUs or Custom Resources") + return ctrl.Result{}, nil + } + podUID := pod.GetUID() +@@ -171,7 +186,7 @@ func (r *PowerPodReconciler) Reconcile(c context.Context, req ctrl.Request) (ctr + logger.Error(err, "error retrieving the power profiles from the cluster") + return ctrl.Result{}, err + } +- powerProfilesFromContainers, powerContainers, err := r.getPowerProfileRequestsFromContainers(containersRequestingExclusiveCPUs, powerProfileCRs.Items, pod, &logger) ++ powerProfilesFromContainers, powerContainers, err := r.getPowerProfileRequestsFromContainers(admissibleContainers, powerProfileCRs.Items, pod, &logger, powernode.Spec.CustomDevices) + logger.V(5).Info("retrieving the power profiles and cores from the pod requests") + if err != nil { + logger.Error(err, "error retrieving the power profile from the pod requests") +@@ -248,7 +263,7 @@ func (r *PowerPodReconciler) Reconcile(c context.Context, req ctrl.Request) (ctr + return ctrl.Result{}, nil + } + +-func (r *PowerPodReconciler) getPowerProfileRequestsFromContainers(containers []corev1.Container, profileCRs []powerv1.PowerProfile, pod *corev1.Pod, logger *logr.Logger) (map[string][]uint, []powerv1.Container, error) { ++func (r *PowerPodReconciler) getPowerProfileRequestsFromContainers(containers []corev1.Container, profileCRs []powerv1.PowerProfile, pod *corev1.Pod, logger *logr.Logger, CustomDevices []string) (map[string][]uint, []powerv1.Container, error) { + + logger.V(5).Info("get the power profiles from the containers") + _ = context.Background() +@@ -257,7 +272,7 @@ func (r *PowerPodReconciler) getPowerProfileRequestsFromContainers(containers [] + powerContainers := make([]powerv1.Container, 0) + for _, container := range containers { + logger.V(5).Info("retrieving the requested power profile from the container spec") +- profile, err := getContainerProfileFromRequests(container, logger) ++ profile, err := getContainerProfileFromRequests(container, logger, CustomDevices) + if err != nil { + return map[string][]uint{}, []powerv1.Container{}, err + } +@@ -366,7 +381,7 @@ func isContainerInList(name string, containers []powerv1.Container, logger *logr + return false + } + +-func getContainerProfileFromRequests(container corev1.Container, logger *logr.Logger) (string, error) { ++func getContainerProfileFromRequests(container corev1.Container, logger *logr.Logger, CustomDevices []string) (string, error) { + profileName := "" + moreThanOneProfileError := errors.NewServiceUnavailable("cannot have more than one power profile per container") + resourceRequestsMismatchError := errors.NewServiceUnavailable("mismatch between CPU requests and the power profile requests") +@@ -388,9 +403,31 @@ func getContainerProfileFromRequests(container corev1.Container, logger *logr.Lo + powerProfileResourceName := corev1.ResourceName(fmt.Sprintf("%s%s", ResourcePrefix, profileName)) + numRequestsPowerProfile := container.Resources.Requests[powerProfileResourceName] + numLimitsPowerProfile := container.Resources.Limits[powerProfileResourceName] +- numRequestsCPU := container.Resources.Requests[CPUResource] +- numLimistCPU := container.Resources.Limits[CPUResource] +- if numRequestsCPU != numRequestsPowerProfile || numLimistCPU != numLimitsPowerProfile { ++ ++ // Selecting resources to search ++ numRequestsCPU := 0 ++ numLimitsCPU := 0 ++ ++ // If the Isolcpu resource is requested, change the CPU request to ++ // allow the check ++ for _, deviceName := range CustomDevices { ++ numRequestsDevice := container.Resources.Requests[corev1.ResourceName(deviceName)] ++ numRequestsCPU += int(numRequestsDevice.Value()) ++ ++ numLimitsDevice := container.Resources.Limits[corev1.ResourceName(deviceName)] ++ numLimitsCPU += int(numLimitsDevice.Value()) ++ } ++ ++ if numRequestsCPU == 0 { ++ numResquestsDevice := container.Resources.Requests[CPUResource] ++ numRequestsCPU = int(numResquestsDevice.Value()) ++ ++ numLimitsDevice := container.Resources.Limits[CPUResource] ++ numLimitsCPU = int(numLimitsDevice.Value()) ++ } ++ ++ if numRequestsCPU != int(numRequestsPowerProfile.Value()) || ++ numLimitsCPU != int(numLimitsPowerProfile.Value()) { + return "", resourceRequestsMismatchError + } + } +@@ -398,18 +435,18 @@ func getContainerProfileFromRequests(container corev1.Container, logger *logr.Lo + return profileName, nil + } + +-func getContainersRequestingExclusiveCPUs(pod *corev1.Pod, logger *logr.Logger) []corev1.Container { ++func getAdmissibleContainers(pod *corev1.Pod, logger *logr.Logger, CustomDevices []string) []corev1.Container { + +- logger.V(5).Info("receiving containers requesting exclusive CPUs") +- containersRequestingExclusiveCPUs := make([]corev1.Container, 0) ++ logger.V(5).Info("Receiving Containers requesting Exclusive CPUs or Custom Devices") ++ admissibleContainers := make([]corev1.Container, 0) + containerList := append(pod.Spec.InitContainers, pod.Spec.Containers...) + for _, container := range containerList { +- if doesContainerRequireExclusiveCPUs(pod, &container) { +- containersRequestingExclusiveCPUs = append(containersRequestingExclusiveCPUs, container) ++ if doesContainerRequireExclusiveCPUs(pod, &container) || validateCustomDevices(pod, &container, CustomDevices) { ++ admissibleContainers = append(admissibleContainers, container) + } + } +- logger.V(5).Info("the containers requesting exclusive CPUs are: ", containersRequestingExclusiveCPUs) +- return containersRequestingExclusiveCPUs ++ logger.V(5).Info("Containers requesting Exclusive CPUs or Curstom Devices are: ", "Containers", admissibleContainers) ++ return admissibleContainers + + } + +@@ -422,6 +459,17 @@ func doesContainerRequireExclusiveCPUs(pod *corev1.Pod, container *corev1.Contai + return cpuQuantity.Value()*1000 == cpuQuantity.MilliValue() + } + ++func validateCustomDevices(pod *corev1.Pod, container *corev1.Container, CustomDevices []string) bool { ++ presence := false ++ for _, devicePlugin := range CustomDevices { ++ numResources := container.Resources.Requests[corev1.ResourceName(devicePlugin)] ++ if numResources.Value() > 0 { ++ presence = numResources.Value()*1000 == numResources.MilliValue() ++ } ++ } ++ return presence ++} ++ + func getContainerID(pod *corev1.Pod, containerName string) string { + for _, containerStatus := range append(pod.Status.InitContainerStatuses, pod.Status.ContainerStatuses...) { + if containerStatus.Name == containerName { +-- +2.34.1 + diff --git a/kubernetes-power-manager-images/files/0002-Corrects-cpu-accounting-and-container-deletion-from-workload.patch b/kubernetes-power-manager-images/files/0002-Corrects-cpu-accounting-and-container-deletion-from-workload.patch new file mode 100644 index 0000000..c250df1 --- /dev/null +++ b/kubernetes-power-manager-images/files/0002-Corrects-cpu-accounting-and-container-deletion-from-workload.patch @@ -0,0 +1,223 @@ +From 114bd1192e2538ddd9e2a7fe051ce6cdc6a97767 Mon Sep 17 00:00:00 2001 +From: Eduardo Alberti +Date: Fri, 11 Aug 2023 14:18:51 +0000 +Subject: [PATCH] Corrects cpu accounting and container deletion from + workload + +CPU accounting should consider the total available CPUs, not +only the ones allocated to the pod. +Additionally while deleting pods from workload, we should +match the precise Pod to avoid issues with same Pod Name +in a different namespace. + +This modification uses the sytem CPU for accounting instead +of runtime NumCPUs. + +This modification also handles the pod considering the Pod ID +and not only pod name. + +Pull request +https://github.com/intel/kubernetes-power-manager/pull/57 +https://github.com/intel/kubernetes-power-manager/pull/56 + +Signed-off-by: Eduardo Alberti +--- + controllers/powerpod_controller.go | 18 ++++++---- + controllers/powerprofile_controller.go | 48 ++++++++++++++++++++++++-- + pkg/podstate/podstate.go | 20 +++++++++++ + 3 files changed, 77 insertions(+), 9 deletions(-) + +diff --git a/controllers/powerpod_controller.go b/controllers/powerpod_controller.go +index c348a42..6691006 100644 +--- a/controllers/powerpod_controller.go ++++ b/controllers/powerpod_controller.go +@@ -92,10 +92,10 @@ func (r *PowerPodReconciler) Reconcile(c context.Context, req ctrl.Request) (ctr + if !pod.ObjectMeta.DeletionTimestamp.IsZero() || pod.Status.Phase == corev1.PodSucceeded { + // If the pod's deletion timestamp is not zero, then the pod has been deleted + +- powerPodState := r.State.GetPodFromState(pod.GetName(), pod.GetNamespace()) ++ powerPodState := r.State.GetPodFromStateUID(string(pod.GetUID())) + +- logger.V(5).Info("removing the pod from the internal state") +- if err = r.State.DeletePodFromState(pod.GetName(), pod.GetNamespace()); err != nil { ++ logger.V(5).Info("Removing Pod from internal state", "Pod Name", pod.GetName(), "UID", pod.GetUID()) ++ if err = r.State.DeletePodFromStateUID(string(pod.GetUID())); err != nil { + logger.Error(err, "error removing the pod from the internal state") + return ctrl.Result{}, err + } +@@ -105,6 +105,7 @@ func (r *PowerPodReconciler) Reconcile(c context.Context, req ctrl.Request) (ctr + for _, container := range powerPodState.Containers { + workload := container.Workload + cpus := container.ExclusiveCPUs ++ logger.V(5).Info("Removing", "Workload", workload, "CPUs", cpus) + if _, exists := workloadToCPUsRemoved[workload]; exists { + workloadToCPUsRemoved[workload] = append(workloadToCPUsRemoved[workload], cpus...) + } else { +@@ -112,7 +113,7 @@ func (r *PowerPodReconciler) Reconcile(c context.Context, req ctrl.Request) (ctr + } + } + for workloadName, cpus := range workloadToCPUsRemoved { +- logger.V(5).Info("retrieving the workload instance %s", workloadName) ++ logger.V(5).Info("retrieving the workload instance", "Workload Name", workloadName) + workload := &powerv1.PowerWorkload{} + err = r.Get(context.TODO(), client.ObjectKey{ + Namespace: IntelPowerNamespace, +@@ -224,6 +225,7 @@ func (r *PowerPodReconciler) Reconcile(c context.Context, req ctrl.Request) (ctr + + workloadContainer := container + workloadContainer.Pod = pod.Name ++ workloadContainer.Workload = workloadName + containerList = append(containerList, workloadContainer) + } + for i, newContainer := range containerList { +@@ -295,9 +297,11 @@ func (r *PowerPodReconciler) getPowerProfileRequestsFromContainers(containers [] + containerID := getContainerID(pod, container.Name) + coreIDs, err := r.PodResourcesClient.GetContainerCPUs(pod.GetName(), container.Name) + if err != nil { ++ logger.V(5).Info("Error getting CoreIDs.", "ContainerID", containerID) + return map[string][]uint{}, []powerv1.Container{}, err + } + cleanCoreList := getCleanCoreList(coreIDs) ++ logger.V(5).Info("Reserving cores to container.", "ContainerID", containerID, "Cores", cleanCoreList) + + logger.V(5).Info("creating the power container") + powerContainer := &powerv1.Container{} +@@ -362,7 +366,7 @@ func getNewWorkloadContainerList(nodeContainers []powerv1.Container, podStateCon + + logger.V(5).Info("checking if there are new containers for the workload") + for _, container := range nodeContainers { +- if !isContainerInList(container.Name, podStateContainers, logger) { ++ if !isContainerInList(container.Name, container.Id, podStateContainers, logger) { + newNodeContainers = append(newNodeContainers, container) + } + } +@@ -371,9 +375,9 @@ func getNewWorkloadContainerList(nodeContainers []powerv1.Container, podStateCon + } + + // Helper function - if container is in a list of containers +-func isContainerInList(name string, containers []powerv1.Container, logger *logr.Logger) bool { ++func isContainerInList(name string, uid string, containers []powerv1.Container, logger *logr.Logger) bool { + for _, container := range containers { +- if container.Name == name { ++ if container.Name == name && container.Id == uid { + return true + } + } +diff --git a/controllers/powerprofile_controller.go b/controllers/powerprofile_controller.go +index c47591c..32afa9e 100644 +--- a/controllers/powerprofile_controller.go ++++ b/controllers/powerprofile_controller.go +@@ -20,6 +20,7 @@ import ( + "context" + "fmt" + "os" ++ "path" + rt "runtime" + "strconv" + "strings" +@@ -41,6 +42,7 @@ import ( + const ( + MaxFrequencyFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq" + MinFrequencyFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq" ++ basePath = "/sys/devices/system/cpu" + ) + + // performance ===> priority level 0 +@@ -331,8 +333,8 @@ func (r *PowerProfileReconciler) createExtendedResources(nodeName string, profil + return err + } + +- numCPUsOnNode := float64(rt.NumCPU()) +- logger.V(5).Info("configuring based on the percentage associated to the specific power profile") ++ numCPUsOnNode := float64(getNumberOfCpus(logger)) ++ logger.V(5).Info("Configuring based on the percentage associated to the specific power profile", "CPUS", numCPUsOnNode) + numExtendedResources := int64(numCPUsOnNode * profilePercentages[eppValue]["resource"]) + profilesAvailable := resource.NewQuantity(numExtendedResources, resource.DecimalSI) + extendedResourceName := corev1.ResourceName(fmt.Sprintf("%s%s", ExtendedResourcePrefix, profileName)) +@@ -419,3 +421,45 @@ func checkGovs(profileGovernor string) bool { + } + return false + } ++ ++func getNumberOfCpus(logger *logr.Logger) uint { ++ // First, try to get CPUs from sysfs. If the sysfs isn't available ++ // return Number of CPUs from runtime ++ cpusAvailable, err := readStringFromFile(path.Join(basePath, "online")) ++ if err != nil { ++ logger.V(3).Info("NUMCPU: Error during file reading", "Runtime CPUS", rt.NumCPU()) ++ return uint(rt.NumCPU()) ++ } ++ ++ // Delete \n character and split the string to get ++ // first and last element ++ cpusAvailable = strings.Replace(cpusAvailable, "\n", "", -1) ++ cpuSlice := strings.Split(cpusAvailable, "-") ++ if len(cpuSlice) < 2 { ++ logger.V(3).Info("NUMCPU: Error during CPU slicing", "Runtime CPUS", rt.NumCPU()) ++ return uint(rt.NumCPU()) ++ } ++ ++ // Calculate number of CPUs, if an error occurs ++ // return the number of CPUs from runtime ++ firstElement, err := strconv.Atoi(cpuSlice[0]) ++ if err != nil { ++ logger.V(3).Info("NUMCPU: Error during first element convertion", "Runtime CPUS", rt.NumCPU()) ++ return uint(rt.NumCPU()) ++ } ++ secondElement, err := strconv.Atoi(cpuSlice[1]) ++ if err != nil { ++ logger.V(3).Info("NUMCPU: Error during second element convertion", "Runtime CPUS", rt.NumCPU()) ++ return uint(rt.NumCPU()) ++ } ++ logger.V(3).Info("NUMCPU: success", "Online CPUS", uint((secondElement-firstElement)+1)) ++ return uint((secondElement - firstElement) + 1) ++} ++ ++func readStringFromFile(filePath string) (string, error) { ++ valueByte, err := os.ReadFile(filePath) ++ if err != nil { ++ return "", err ++ } ++ return string(valueByte), nil ++} +diff --git a/pkg/podstate/podstate.go b/pkg/podstate/podstate.go +index 2cbd093..0b66c22 100644 +--- a/pkg/podstate/podstate.go ++++ b/pkg/podstate/podstate.go +@@ -38,6 +38,16 @@ func (s *State) GetPodFromState(podName string, podNamespace string) powerv1.Gua + return powerv1.GuaranteedPod{} + } + ++func (s *State) GetPodFromStateUID(podUID string) powerv1.GuaranteedPod { ++ for _, existingPod := range s.GuaranteedPods { ++ if existingPod.UID == podUID { ++ return existingPod ++ } ++ } ++ ++ return powerv1.GuaranteedPod{} ++} ++ + func (s *State) GetCPUsFromPodState(podState powerv1.GuaranteedPod) []uint { + cpus := make([]uint, 0) + for _, container := range podState.Containers { +@@ -47,6 +57,16 @@ func (s *State) GetCPUsFromPodState(podState powerv1.GuaranteedPod) []uint { + return cpus + } + ++func (s *State) DeletePodFromStateUID(podUID string) error { ++ for i, pod := range s.GuaranteedPods { ++ if pod.UID == podUID { ++ s.GuaranteedPods = append(s.GuaranteedPods[:i], s.GuaranteedPods[i+1:]...) ++ } ++ } ++ ++ return nil ++} ++ + func (s *State) DeletePodFromState(podName string, podNamespace string) error { + for i, pod := range s.GuaranteedPods { + if pod.Name == podName && pod.Namespace == podNamespace { +-- +2.34.1 + diff --git a/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/changelog b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/changelog new file mode 100644 index 0000000..8e0250e --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/changelog @@ -0,0 +1,5 @@ +python3-k8sapp-kubernetes-power-manager (1.0-0) unstable; urgency=medium + + * Initial Release + + -- Thiago Miranda Mon, 1 Aug 2022 08:00:00 +0000 diff --git a/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/control b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/control new file mode 100644 index 0000000..e380ab4 --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/control @@ -0,0 +1,26 @@ +Source: python3-k8sapp-kubernetes-power-manager +Section: libs +Priority: optional +Maintainer: StarlingX Developers +Build-Depends: debhelper-compat (= 13), + dh-python, + python3-all, + python3-pbr, + python3-setuptools, + python3-wheel +Standards-Version: 4.5.1 +Homepage: https://www.starlingx.io + +Package: python3-k8sapp-kubernetes-power-manager +Section: libs +Architecture: any +Depends: ${misc:Depends}, ${python3:Depends} +Description: StarlingX Sysinv Kubernetes Power Manager Extensions + This package contains sysinv plugins for the Kubernetes Power Manager K8S app. + +Package: python3-k8sapp-kubernetes-power-manager-wheels +Section: libs +Architecture: any +Depends: ${misc:Depends}, ${python3:Depends}, python3-wheel +Description: StarlingX Sysinv Kubernetes Power Manager Extension Wheels + This package contains python wheels for the Kubernetes Power Manager K8S app plugins. diff --git a/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/copyright b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/copyright new file mode 100644 index 0000000..524fb26 --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/copyright @@ -0,0 +1,41 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: python3-k8sapp-kubernetes-power-manager +Source: https://opendev.org/starlingx/app-kubernetes-power-manager/ + +Files: * +Copyright: (c) 2023 Wind River Systems, Inc +License: Apache-2 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + https://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian-based systems the full text of the Apache version 2.0 license + can be found in `/usr/share/common-licenses/Apache-2.0'. + +# If you want to use GPL v2 or later for the /debian/* files use +# the following clauses, or change it to suit. Delete these two lines +Files: debian/* +Copyright: 2023 Wind River Systems, Inc +License: Apache-2 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + https://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian-based systems the full text of the Apache version 2.0 license + can be found in `/usr/share/common-licenses/Apache-2.0'. diff --git a/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/python3-k8sapp-kubernetes-power-manager-wheels.install b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/python3-k8sapp-kubernetes-power-manager-wheels.install new file mode 100644 index 0000000..19a9e4c --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/python3-k8sapp-kubernetes-power-manager-wheels.install @@ -0,0 +1 @@ +plugins/*.whl diff --git a/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/python3-k8sapp-kubernetes-power-manager.install b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/python3-k8sapp-kubernetes-power-manager.install new file mode 100644 index 0000000..91d1d9d --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/python3-k8sapp-kubernetes-power-manager.install @@ -0,0 +1 @@ +usr/lib/python3/dist-packages/k8sapp_* diff --git a/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/rules b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/rules new file mode 100644 index 0000000..c542e00 --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/rules @@ -0,0 +1,33 @@ +#!/usr/bin/make -f +# export DH_VERBOSE = 1 + +export APP_NAME = kubernetes-power-manager +export PYBUILD_NAME = k8sapp-kubernetes-power-manager + +export DEB_VERSION = $(shell dpkg-parsechangelog | egrep '^Version:' | cut -f 2 -d ' ') +export MAJOR = $(shell echo $(DEB_VERSION) | cut -f 1 -d '-') +export MINOR = $(shell echo $(DEB_VERSION) | cut -f 4 -d '.') +export PBR_VERSION = $(MAJOR).$(MINOR) + +export ROOT = $(CURDIR)/debian/tmp +export SKIP_PIP_INSTALL = 1 + +%: + dh $@ --with=python3 --buildsystem=pybuild + +override_dh_auto_install: + env | sort + + python3 setup.py install \ + --install-layout=deb \ + --root $(ROOT) + + python3 setup.py bdist_wheel \ + --universal \ + -d $(ROOT)/plugins + +override_dh_python3: + dh_python3 --shebang=/usr/bin/python3 + +override_dh_auto_test: + PYTHONDIR=$(CURDIR) stestr run diff --git a/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/source/format b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/source/format new file mode 100644 index 0000000..c3d9f24 --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/debian/deb_folder/source/format @@ -0,0 +1,2 @@ +3.0 (quilt) + diff --git a/python3-k8sapp-kubernetes-power-manager/debian/meta_data.yaml b/python3-k8sapp-kubernetes-power-manager/debian/meta_data.yaml new file mode 100644 index 0000000..7306827 --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/debian/meta_data.yaml @@ -0,0 +1,9 @@ +--- +debname: python3-k8sapp-kubernetes-power-manager +debver: 1.0-0 +src_path: k8sapp_kubernetes_power_manager +revision: + dist: $STX_DIST + GITREVCOUNT: + SRC_DIR: ${MY_REPO}/stx/app-kubernetes-power-manager + BASE_SRCREV: 789f333dec616b91ddb871cf21fdb07e6bf8d750 diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/.gitignore b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/.gitignore new file mode 100644 index 0000000..78c457c --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/.gitignore @@ -0,0 +1,35 @@ +# Compiled files +*.py[co] +*.a +*.o +*.so + +# Sphinx +_build +doc/source/api/ + +# Packages/installer info +*.egg +*.egg-info +dist +build +eggs +parts +var +sdist +develop-eggs +.installed.cfg + +# Other +*.DS_Store +.stestr +.testrepository +.tox +.venv +.*.swp +.coverage +bandit.xml +cover +AUTHORS +ChangeLog +*.sqlite diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/.stestr.conf b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/.stestr.conf new file mode 100644 index 0000000..d841e3e --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/.stestr.conf @@ -0,0 +1,4 @@ +[DEFAULT] +test_path=./k8sapp_kubernetes_power_manager/tests +top_dir=./k8sapp_kubernetes_power_manager +#parallel_class=True \ No newline at end of file diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/LICENSE b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/LICENSE new file mode 100644 index 0000000..d6e2801 --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2019 Wind River Systems, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/README.rst b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/README.rst new file mode 100644 index 0000000..9047f30 --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/README.rst @@ -0,0 +1,5 @@ +k8sapp-kubernetes-power-manager +=============================== + +This project contains StarlingX Kubernetes application specific python plugins +for the Kubernetes Power Manager. diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/__init__.py b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/common/__init__.py b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/common/constants.py b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/common/constants.py new file mode 100644 index 0000000..a5f2596 --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/common/constants.py @@ -0,0 +1,47 @@ +# +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +HELM_APP_KUBERNETES_POWER_MANAGER = 'kubernetes-power-manager' +HELM_RELEASE_KUBERNETES_POWER_MANAGER = 'kubernetes-power-manager' +HELM_CHART_KUBERNETES_POWER_MANAGER = 'kubernetes-power-manager' +HELM_NS_KUBERNETES_POWER_MANAGER = 'intel-power' +HELM_APP_KUBERNETES_POWER_MANAGER_AGENT = 'power-node-agent' + +HELM_APP_KUBERNETES_POWER_MANAGER_CRD_CSTATES = 'cstates.power.intel.com' +HELM_APP_KUBERNETES_POWER_MANAGER_CRD_POWERCONFIGS = 'powerconfigs.power.intel.com' +HELM_APP_KUBERNETES_POWER_MANAGER_CRD_POWERNODES = 'powernodes.power.intel.com' +HELM_APP_KUBERNETES_POWER_MANAGER_CRD_POWERPODS = 'powerpods.power.intel.com' +HELM_APP_KUBERNETES_POWER_MANAGER_CRD_POWERPROFILES = 'powerprofiles.power.intel.com' +HELM_APP_KUBERNETES_POWER_MANAGER_CRD_POWERWORKLOADS = 'powerworkloads.power.intel.com' +HELM_APP_KUBERNETES_POWER_MANAGER_CRD_TIMEOFDAYCRONJOBS = 'timeofdaycronjobs.power.intel.com' +HELM_APP_KUBERNETES_POWER_MANAGER_CRD_TIMEOFDAYS = 'timeofdays.power.intel.com' +HELM_APP_KUBERNETES_POWER_MANAGER_CRD_UNCORES = 'uncores.power.intel.com' + +HELM_APP_KUBERNETES_POWER_MANAGER_CRDS = [ + HELM_APP_KUBERNETES_POWER_MANAGER_CRD_CSTATES, + HELM_APP_KUBERNETES_POWER_MANAGER_CRD_POWERCONFIGS, + HELM_APP_KUBERNETES_POWER_MANAGER_CRD_POWERNODES, + HELM_APP_KUBERNETES_POWER_MANAGER_CRD_POWERPODS, + HELM_APP_KUBERNETES_POWER_MANAGER_CRD_POWERPROFILES, + HELM_APP_KUBERNETES_POWER_MANAGER_CRD_POWERWORKLOADS, + HELM_APP_KUBERNETES_POWER_MANAGER_CRD_TIMEOFDAYCRONJOBS, + HELM_APP_KUBERNETES_POWER_MANAGER_CRD_TIMEOFDAYS, + HELM_APP_KUBERNETES_POWER_MANAGER_CRD_UNCORES +] + +HELM_COMPONENT_LABEL = 'app.starlingx.io/component' +HELM_NFD_REQUIRED_PARAM = 'nfd-required' + +# These parameters refer to the Node Feature Discovery (NFD) application. +# They should be kept in sync. +# https://opendev.org/starlingx/app-node-feature-discovery. +HELM_APP_NFD = 'node-feature-discovery' +HELM_NS_NFD = 'node-feature-discovery' +HELM_CHART_NFD = 'node-feature-discovery' + +APPLICATION_CSTATE = "C1" +CSTATE_C0 = "POLL" +PLATFORM_CSTATE = "C6" diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/common/utils.py b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/common/utils.py new file mode 100644 index 0000000..5b75962 --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/common/utils.py @@ -0,0 +1,39 @@ +# +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +@staticmethod +def merge_dict(source_dict, overrides_dict): + """Recursively merge two nested dictionaries. The 'overrides_dict' + is merged into 'source_dict' + + """ + + for k, v in overrides_dict.items(): + if isinstance(v, dict): + source_dict[k] = merge_dict(source_dict.get(k, {}), v) + else: + source_dict[k] = v + + return source_dict + + +@staticmethod +def get_value_from_nested_dict(nested_dict, composite_key, default_value=None, + key_separator='.'): + """Searches a composite key in the multidimensional dictionary + + :param nested_dict: multidimensional dict + :param composite_key: key to search + :param default_value: default value to return + :param key_separator: character used to split the key + :return key value or the default value if not found + """ + + for key in composite_key.split(key_separator): + nested_dict = nested_dict.get(key, {}) + + return nested_dict or default_value diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/helm/__init__.py b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/helm/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/helm/kubernetes_power_manager.py b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/helm/kubernetes_power_manager.py new file mode 100644 index 0000000..b5428fd --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/helm/kubernetes_power_manager.py @@ -0,0 +1,210 @@ +# +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from k8sapp_kubernetes_power_manager.common import constants as app_constants + +from oslo_log import log as logging +from sysinv.common import constants +from sysinv.common import exception +from sysinv.common import kubernetes +from sysinv.db import api as sys_dbapi +from sysinv.helm import base + + +LOG = logging.getLogger(__name__) + + +class KubernetesPowerManagerHelm(base.FluxCDBaseHelm): + + CHART = app_constants.HELM_CHART_KUBERNETES_POWER_MANAGER + HELM_RELEASE = app_constants.HELM_RELEASE_KUBERNETES_POWER_MANAGER + SERVICE_NAME = 'kubernetes-power-manager' + + SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \ + [app_constants.HELM_NS_KUBERNETES_POWER_MANAGER] + + SUPPORTED_APP_NAMESPACES = { + app_constants.HELM_APP_KUBERNETES_POWER_MANAGER: + (base.BaseHelm.SUPPORTED_NAMESPACES + + [app_constants.HELM_NS_KUBERNETES_POWER_MANAGER]), + } + + def get_namespaces(self): + return self.SUPPORTED_NAMESPACES + + def get_overrides(self, namespace=None): + ihosts = self._get_admissible_ihosts() + + overrides = { + app_constants.HELM_NS_KUBERNETES_POWER_MANAGER: { + 'sharedProfile': self._get_shared_profile_override(ihosts), + 'cstatesProfile': self._get_cstates_override(ihosts) + } + } + + if namespace in self.SUPPORTED_NAMESPACES: + return overrides[namespace] + + if namespace: + raise exception.InvalidHelmNamespace(chart=self.CHART, + namespace=namespace) + + return overrides + + def _get_platform_cpus_from_host(self, host_uuid): + """Return a list of platform cpus of a host + + Args: + host_uuid: The UUID of a host + + Returns: + list: A list of platform cpus + """ + dbapi = sys_dbapi.get_instance() + icpus = dbapi.icpu_get_by_ihost(host_uuid) + platform_cpus = [] + for icpu in icpus: + allocated_function = icpu.get('allocated_function') + if allocated_function == constants.PLATFORM_FUNCTION: + platform_cpus.append(int(icpu.cpu)) + return platform_cpus + + def _get_shared_profile_override(self, ihosts): + """Return a dictionary with Shared Profile information. + + Args: + ihosts: The list of ihosts of the cluster + + Returns: + dict: Dictionary with min, max, reserved_cpus, and governor + key/values + """ + override = {} + for ihost in ihosts: + override[ihost.hostname] = { + 'min': int(ihost.min_cpu_mhz_allowed), + 'max': int(ihost.max_cpu_mhz_allowed), + 'reservedCPUs': self._get_platform_cpus_from_host( + ihost.uuid + ), + 'governor': 'performance', + 'shared': True, + } + return override + + def _get_cstates_override(self, ihosts): + """Return a dictionary with CStates information, organized by pool type + + Args: + ihosts: The list of ihosts of the cluster + + Returns: + dict: Dictionary with sharedPoolCstates and individualCoreCStates + dictionaries + """ + override = {} + + for ihost in ihosts: + if ihost.cstates_available is None: + continue + + cstates_list = ihost.cstates_available.split(',') + + override[ihost.hostname] = { + "sharedPoolCStates": self._make_shared_object(cstates_list), + "individualCoreCStates": self._make_cpu_object( + cstates_list, + self._get_platform_cpus_from_host(ihost.uuid) + ) + } + + return override + + def _make_shared_object(self, cstates): + """Return the CPU object for CState override construction + + Args: + cstates (list): The list of CStates available on the host + + Returns: + dict: Dictionary that contains the CState set for Shared Pool. + """ + # If CPU list is empty we need to prepare the Shared Pool block + new_cpu_dict = {} + target_cstate = None + + # Get the APPLICATION_CSTATE name, or the highest CState number + if [cstate for cstate in cstates if + app_constants.APPLICATION_CSTATE in cstate]: + target_cstate = [cstate for cstate in cstates if + app_constants.APPLICATION_CSTATE in cstate][0] + else: + target_cstate = (app_constants.CSTATE_C0 + if app_constants.CSTATE_C0 in cstates + else cstates[0]) + + target = True + for cstate in cstates: + if cstate == app_constants.CSTATE_C0: + new_cpu_dict[cstate] = True + continue + new_cpu_dict[cstate] = target + if cstate == target_cstate: + target = False + + return new_cpu_dict + + def _make_cpu_object(self, cstates, cpu_list): + """Return the CPU object for CState override construction + + Args: + cstates (list): The list of CStates available on the host + cpu_list (list): The list of platforma cpus + + Returns: + dict: Dictionary that contains the CState set for Individual Cores + C States + """ + # If CPU list is empty we need to prepare the Shared Pool block + new_cpu_dict = {} + target_cstate = None + + # Get the PLATFORM_CSTATE name, or the lowest CState number + if [cstate for cstate in cstates if + app_constants.PLATFORM_CSTATE in cstate]: + target_cstate = [cstate for cstate in cstates if + app_constants.PLATFORM_CSTATE in cstate][0] + else: + target_cstate = cstates[-2] + + cstate_dic = {} + target = True + for cstate in cstates: + if cstate == app_constants.CSTATE_C0: + cstate_dic[cstate] = True + continue + cstate_dic[cstate] = target + if cstate == target_cstate: + target = False + + for cpu_add in cpu_list: + new_cpu_dict[str(cpu_add)] = cstate_dic + + return new_cpu_dict + + def _get_admissible_ihosts(self): + """Return "power-management" labeled ihosts""" + kube = kubernetes.KubeOperator() + nodes = kube.kube_get_nodes() + dbapi = sys_dbapi.get_instance() + ihosts = [] + for node in nodes: + node_labels = node.metadata.labels + if constants.KUBE_POWER_MANAGER_LABEL in node_labels: + ihosts.append( + dbapi.ihost_get_by_hostname(node.metadata.name)) + + return ihosts diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/lifecycle/__init__.py b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/lifecycle/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/lifecycle/lifecycle_kubernetes_power_manager.py b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/lifecycle/lifecycle_kubernetes_power_manager.py new file mode 100644 index 0000000..a57a69d --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/lifecycle/lifecycle_kubernetes_power_manager.py @@ -0,0 +1,240 @@ +# +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +""" System inventory App lifecycle operator.""" + +import yaml + +from k8sapp_kubernetes_power_manager.common import constants as app_constants + +from oslo_log import log as logging +from sysinv.common import constants as cst +from sysinv.common import exception +from sysinv.common import kubernetes +from sysinv.common import utils as cutils +from sysinv.helm import lifecycle_base as base + + +LOG = logging.getLogger(__name__) + + +class KubernetesPowerManagerAppLifecycleOperator(base.AppLifecycleOperator): + def app_lifecycle_actions(self, context, conductor_obj, app_op, + app, hook_info): + """Perform lifecycle actions for an operation + + :param context: request context, can be None + :param conductor_obj: conductor object, can be None + :param app_op: AppOperator object + :param app: AppOperator.Application object + :param hook_info: LifecycleHookInfo object + + """ + + # Semantic Check + if (hook_info.lifecycle_type == cst.APP_LIFECYCLE_TYPE_SEMANTIC_CHECK + and hook_info.operation == cst.APP_APPLY_OP + and hook_info.relative_timing == cst.APP_LIFECYCLE_TIMING_PRE): + return self._pre_semantic_check(app, app_op) + + # FluxCD Request + if (hook_info.lifecycle_type == cst.APP_LIFECYCLE_TYPE_FLUXCD_REQUEST + and hook_info.operation == cst.APP_APPLY_OP + and hook_info.relative_timing == cst.APP_LIFECYCLE_TIMING_PRE): + return self._pre_fluxcd_request(app, app_op) + + # Operation + if (hook_info.lifecycle_type == cst.APP_LIFECYCLE_TYPE_OPERATION + and hook_info.operation == cst.APP_REMOVE_OP + and hook_info.relative_timing == cst.APP_LIFECYCLE_TIMING_POST): + return self._post_remove(app, app_op) + + super(KubernetesPowerManagerAppLifecycleOperator, + self).app_lifecycle_actions( + context, conductor_obj, app_op, app, hook_info + ) + + def _pre_fluxcd_request(self, app, app_op): + LOG.debug(f"Executing pre_fluxcd_request for {app.name} app") + + # Applying label on namespace before running FluxCD manifests + # to ensure that all pods in this namespace run on correct cores + self._update_component_label(app, app_op) + + def _pre_semantic_check(self, app, app_op): + LOG.debug(f"Executing pre_semantic_check for {app.name} app") + + dbapi = app_op._dbapi + try: + nfd_kube_app = dbapi.kube_app_get(app_constants.HELM_APP_NFD) + LOG.info("Node Feature Discovery (NFD) Application found - " + f"Version: {nfd_kube_app.app_version} - " + "Status: {nfd_kube_app.status}") + + nfd_installed = nfd_kube_app.status == cst.APP_APPLY_SUCCESS + + except exception.KubeAppNotFound: + nfd_installed = False + + if not nfd_installed: + if self._is_nfd_required(dbapi): + raise exception.LifecycleSemanticCheckException( + "Node Feature Discovery (NFD) Application is required. " + "You can bypass this check by setting the " + f"{app_constants.HELM_NFD_REQUIRED_PARAM} parameter to " + "False using overrides.") + + LOG.info("Bypass flag for Node Feature Discovery (NFD) " + "Application found.") + + def _post_remove(self, app, app_op): + LOG.debug(f"Executing post_remove for {app.name} app") + + k8s_client_core = app_op._kube._get_kubernetesclient_core() + + # Remove all daemonsets (agents) started by the controller and any + # orphan pods in namespace + self._delete_pods(app_op, k8s_client_core) + + # Helm doesn't remove CRDs. To clean up after application-remove, + # we need to explicitly delete the CRDs + for crd in app_constants.HELM_APP_KUBERNETES_POWER_MANAGER_CRDS: + cmd = ['kubectl', '--kubeconfig', kubernetes.KUBERNETES_ADMIN_CONF, + 'delete', 'crd', crd] + stdout, stderr = cutils.trycmd(*cmd) + message = (f"{app.name} app: cmd={cmd} stdout={stdout} " + f"stderr={stderr}") + if stderr != '': + raise ValueError(f"An error occur during the CRDs removal." + f"{message}") + LOG.debug(message) + + # Remove the namespace + app_op._kube.kube_delete_namespace( + app_constants.HELM_NS_KUBERNETES_POWER_MANAGER) + + def _is_nfd_required(self, dbapi): + + """Checks the state of the parameter that controls whether the NFD + application is required + + :param dbapi: dbapi + :return True if enabled otherwise False + """ + val = True + try: + app = dbapi.kube_app_get( + app_constants.HELM_APP_KUBERNETES_POWER_MANAGER) + + user_overrides = self._get_user_overrides(app, dbapi) + if app_constants.HELM_NFD_REQUIRED_PARAM in user_overrides: + if isinstance( + user_overrides[app_constants.HELM_NFD_REQUIRED_PARAM], + bool): + val = user_overrides[app_constants.HELM_NFD_REQUIRED_PARAM] + + LOG.error(f"The value of parameter " + f"{app_constants.HELM_NFD_REQUIRED_PARAM} must be " + "true or false.") + + except exception.KubeAppNotFound as e: + LOG.error("Failed to access app info " + f"{app_constants.HELM_APP_KUBERNETES_POWER_MANAGER}: " + f"{e}") + + except exception.HelmOverrideNotFound as e: + LOG.error("Failed to access user overrides from chart " + f"{app_constants.HELM_CHART_KUBERNETES_POWER_MANAGER}: " + f"{e}") + + return val + + def _update_component_label(self, app, app_op): + """Create the StarlingX component label in namespace + + :param app_op: AppOperator object + :param app: AppOperator.Application object + """ + + user_overrides = self._get_user_overrides(app._kube_app, app_op._dbapi) + component_label = user_overrides.get( + app_constants.HELM_COMPONENT_LABEL, + 'platform') + + if component_label in ['application', 'platform']: + # Get namespace attributes + k8s_client_core = app_op._kube._get_kubernetesclient_core() + namespace = k8s_client_core.read_namespace( + app_constants.HELM_NS_KUBERNETES_POWER_MANAGER) + + # Previous value + previous_component_label = namespace.metadata.labels.get( + app_constants.HELM_COMPONENT_LABEL) + + # Set label in namespace + namespace.metadata.labels.update( + { + app_constants.HELM_COMPONENT_LABEL: + component_label + }) + app_op._kube.kube_patch_namespace( + app_constants.HELM_NS_KUBERNETES_POWER_MANAGER, + namespace) + + # Restarts all pods in namespace if label has changed + if (previous_component_label is not None and + previous_component_label != component_label): + self._delete_pods(app_op, k8s_client_core) + + else: + raise ValueError(f"Value {component_label} for label:namespace" + f"{app_constants.HELM_COMPONENT_LABEL}:" + f"{app_constants.HELM_NS_KUBERNETES_POWER_MANAGER}" + " is not supported") + + def _get_user_overrides(self, kube_app, dbapi): + """Get user overrides from db + + :param kube_app: Kubernetes Application instance + :param dbapi: dbapi + :return User overrides in dict format + """ + + user_overrides = {} + + overrides = dbapi.helm_override_get( + kube_app.id, + app_constants.HELM_CHART_KUBERNETES_POWER_MANAGER, + app_constants.HELM_NS_KUBERNETES_POWER_MANAGER) + + if overrides.user_overrides: + user_overrides = yaml.safe_load(overrides.user_overrides) + + return user_overrides + + def _delete_pods(self, app_op, k8s_client_core): + """Delete all pods within the application namespace + + :param app_op: AppOperator object + :param k8s_client_core: Kubernetes client object + """ + + try: + # pod list + pods = k8s_client_core.list_namespaced_pod( + app_constants.HELM_NS_KUBERNETES_POWER_MANAGER) + + # On namespace label change, delete pods to force restart + for pod in pods.items: + app_op._kube.kube_delete_pod( + name=pod.metadata.name, + namespace=app_constants.HELM_NS_KUBERNETES_POWER_MANAGER, + grace_periods_seconds=0 + ) + + except Exception: + LOG.error("Failed to delete pods in namespace %s", + app_constants.HELM_NS_KUBERNETES_POWER_MANAGER) diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/tests/__init__.py b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/tests/test_kubernetes_power_manager.py b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/tests/test_kubernetes_power_manager.py new file mode 100644 index 0000000..ffe742c --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/tests/test_kubernetes_power_manager.py @@ -0,0 +1,30 @@ +# +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from k8sapp_kubernetes_power_manager.tests import test_plugins + +from sysinv.db import api as dbapi +from sysinv.tests.db import base as dbbase +from sysinv.tests.db import utils as dbutils +from sysinv.tests.helm import base + + +class KubernetesPowerManagerTestCase( + test_plugins.K8SAppKubernetesPowerManagerAppMixin, + base.HelmTestCaseMixin): + + def setUp(self): + super(KubernetesPowerManagerTestCase, self).setUp() + self.app = dbutils.create_test_app(name='kubernetes-power-manager') + self.dbapi = dbapi.get_instance() + + +class KubernetesPowerManagerTestCaseDummy( + KubernetesPowerManagerTestCase, + dbbase.ProvisionedControllerHostTestCase): + # without a test zuul will fail + def test_dummy(self): + pass diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/tests/test_plugins.py b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/tests/test_plugins.py new file mode 100644 index 0000000..4135de4 --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/k8sapp_kubernetes_power_manager/tests/test_plugins.py @@ -0,0 +1,37 @@ +# +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from k8sapp_kubernetes_power_manager.common import constants as app_constants +from sysinv.tests.db import base as dbbase + + +class K8SAppKubernetesPowerManagerAppMixin(object): + app_name = app_constants.HELM_APP_KUBERNETES_POWER_MANAGER + path_name = app_name + '.tgz' + + def setUp(self): + super(K8SAppKubernetesPowerManagerAppMixin, self).setUp() + + +# Test Configuration: +# - Controller +# - IPv6 +# - Kubernetes Power Manager App +class K8sAppKubernetesPowerManagerControllerTestCase( + K8SAppKubernetesPowerManagerAppMixin, + dbbase.BaseIPv6Mixin, + dbbase.ControllerHostTestCase): + pass + + +# Test Configuration: +# - AIO +# - IPv4 +# - Kubernetes Power Manager App +class K8SAppKubernetesPowerManagerAIOTestCase( + K8SAppKubernetesPowerManagerAppMixin, + dbbase.AIOSimplexHostTestCase): + pass diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/pylint.rc b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/pylint.rc new file mode 100644 index 0000000..bae3bbd --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/pylint.rc @@ -0,0 +1,337 @@ +[MASTER] +# Specify a configuration file. +rcfile=pylint.rc + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Add files or directories to the blacklist. Should be base names, not paths. +ignore= + +# Pickle collected data for later comparisons. +persistent=yes + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Use multiple processes to speed up Pylint. +jobs=4 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +extension-pkg-whitelist=lxml.etree,greenlet + + + +[MESSAGES CONTROL] +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). +# See "Messages Control" section of +# https://pylint.readthedocs.io/en/latest/user_guide +disable= + # C codes refer to Convention + C0103, # invalid-name + C0104, # disallowed-nameA + C0112, # empty-docstring + C0114, # missing-module-docstring + C0115, # missing-class-docstring + C0116, # missing-function-docstring + C0123, # unidiomatic-typecheck !!! + C0201, # consider-iterating-dictionary + C0202, # bad-classmethod-argument + C0206, # consider-using-dict-items + C0207, # use-maxsplit-arg + C0209, # consider-using-f-string + C0301, # line-too-long + C0302, # too-many-lines + C0325, # superfluous-parens + C0411, # wrong-import-order + C0412, # ungrouped-imports + C0413, # wrong-import-position + C0414, # useless-import-alias !!! + C0415, # import-outside-toplevel + C1802, # use-implicit-booleaness-not-len !!! + C2801, # unnecessary-dunder-call !!! + C3002, # unnecessary-direct-lambda-call !!! + # R codes refer to refactoring + R0022, # useless-option-value !!! + R0205, # useless-object-inheritance + R0402, # consider-using-from-import + R0901, # too-many-ancestors + R0902, # too-many-instance-attributes + R0903, # too-few-public-methods + R0904, # too-many-public-methods + R0911, # too-many-return-statements + R0912, # too-many-branches + R0913, # too-many-arguments + R0914, # too-many-locals + R0915, # too-many-statements + R0916, # too-many-boolean-expressions + R1702, # too-many-nested-blocks + R1703, # simplifiable-if-statement + R1704, # redefined-argument-from-local !!! + R1705, # no-else-return + R1707, # trailing-comma-tuple !!! + R1708, # stop-iteration-return !!! + R1710, # inconsistent-return-statements + R1711, # useless-return + R1714, # consider-using-in + R1717, # consider-using-dict-comprehension !!! + R1718, # consider-using-set-comprehension + R1719, # simplifiable-if-expression + R1720, # no-else-raise + R1721, # unnecessary-comprehension + R1722, # consider-using-sys-exit !!! + R1723, # no-else-break + R1724, # no-else-continue + R1725, # super-with-arguments + R1726, # simplifiable-condition !!! + R1728, # consider-using-generator + R1729, # use-a-generator + R1730, # consider-using-min-builtin !!! + R1731, # consider-using-max-builtin !!! + R1732, # consider-using-with + R1733, # unnecessary-dict-index-lookup !! + R1734, # use-list-literal + R1735, # use-dict-literal + # W codes are warnings + W0101, # unreachable + W0105, # pointless-string-statement + W0106, # expression-not-assigned + W0107, # unnecessary-pass + W0108, # unnecessary-lambda + W0109, # duplicate-key !!! + W0123, # eval-used + W0125, # using-constant-test !!! + W0133, # pointless-exception-statement !!! + W0143, # comparison-with-callable !!! + W0150, # lost-exception + W0201, # attribute-defined-outside-init + W0211, # bad-staticmethod-argument + W0212, # protected-access + W0221, # arguments-differ + W0223, # abstract-method + W0231, # super-init-not-called + W0235, # useless-super-delegation + W0237, # arguments-renamed !!! + W0311, # bad-indentation + W0402, # deprecated-module + W0404, # reimported + W0511, # fixme + W0602, # global-variable-not-assigned !!! + W0603, # global-statement + W0612, # unused-variable + W0613, # unused-argument + W0621, # redefined-outer-name + W0622, # redefined-builtin + W0631, # undefined-loop-variable + W0703, # broad-except (pylint 2.16 renamed to broad-except-caught) + W0706, # try-except-raise + W0707, # raise-missing-from + W0719, # broad-exception-raised + W1113, # keyword-arg-before-vararg + W1310, # format-string-without-interpolation !!! + W1401, # anomalous-backslash-in-string + W1406, # redundant-u-string-prefix + W1505, # deprecated-method + W1514, # unspecified-encoding + W3101, # missing-timeout + E0601, # used-before-assignment !!! + E0605, # invalid-all-format !!! + E1101, # no-member + E1111, # assignment-from-no-return + E1121, # too-many-function-args !!! + E1123, # unexpected-keyword-arg !!! + E1136, # unsubscriptable-object !!! + +[REPORTS] +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html +output-format=text + +# Tells whether to display a full report or only the messages +reports=no + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + + +[SIMILARITIES] +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + + +[FORMAT] +# Maximum number of characters on a single line. +max-line-length=85 + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually 4 spaces or "\t" (1 tab). +indent-string=' ' + + +[TYPECHECK] +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis +ignored-modules=distutils,eventlet.green.subprocess,six,six.moves + +# List of classes names for which member attributes should not be checked +# (useful for classes with attributes dynamically set). +# pylint is confused by sqlalchemy Table, as well as sqlalchemy Enum types +# ie: (unprovisioned, identity) +# LookupDict in requests library confuses pylint +ignored-classes=SQLObject, optparse.Values, thread._local, _thread._local, + Table, unprovisioned, identity, LookupDict + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E0201 when accessed. Python regular +# expressions are accepted. +generated-members=REQUEST,acl_users,aq_parent + + +[BASIC] +# Regular expression which should only match correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression which should only match correct module level names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression which should only match correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression which should only match correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct method names +method-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct instance attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct list comprehension / +# generator expression variable names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Regular expression which should only match functions or classes name which do +# not require a docstring +no-docstring-rgx=__.*__ + + +[MISCELLANEOUS] +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[VARIABLES] +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the beginning of the name of dummy variables +# (i.e. not used). +dummy-variables-rgx=_|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + + +[IMPORTS] +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,string,TERMIOS,Bastion,rexec + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + + +[DESIGN] +# Maximum number of arguments for function / method +max-args=5 + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + + +[CLASSES] +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + + +[EXCEPTIONS] +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/requirements.txt b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/requirements.txt new file mode 100644 index 0000000..84f499a --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/requirements.txt @@ -0,0 +1,2 @@ +pbr>=5.1.0 +PyYAML>=3.13 diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/setup.cfg b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/setup.cfg new file mode 100644 index 0000000..863abf5 --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/setup.cfg @@ -0,0 +1,36 @@ +[metadata] +name = k8sapp-kubernetes-power-manager +summary = StarlingX sysinv extensions for Kubernetes Power Manager +long_description = file: README.rst +long_description_content_type = text/x-rst +license = Apache 2.0 +author = StarlingX +author_email = starlingx-discuss@lists.starlingx.io +url = https://www.starlingx.io/ +classifiers = + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.9 + +[files] +packages = + k8sapp_kubernetes_power_manager + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[entry_points] +systemconfig.helm_applications = + kubernetes-power-manager = systemconfig.helm_plugins.kubernetes_power_manager + +systemconfig.app_lifecycle = + kubernetes-power-manager = k8sapp_kubernetes_power_manager.lifecycle.lifecycle_kubernetes_power_manager:KubernetesPowerManagerAppLifecycleOperator + +systemconfig.helm_plugins.kubernetes_power_manager = + 001_kubernetes-power-manager = k8sapp_kubernetes_power_manager.helm.kubernetes_power_manager:KubernetesPowerManagerHelm + +[bdist_wheel] +universal = 1 diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/setup.py b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/setup.py new file mode 100644 index 0000000..66445af --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/setup.py @@ -0,0 +1,12 @@ +# +# copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import setuptools + + +setuptools.setup( + setup_requires=['pbr>=2.0.0', 'PyYAML>=3.13'], + pbr=True) diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/test-requirements.txt b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/test-requirements.txt new file mode 100644 index 0000000..081c1ec --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/test-requirements.txt @@ -0,0 +1,21 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +hacking>=1.1.0,<=2.0.0 # Apache-2.0 +astroid +bandit<1.7.2;python_version>="3.0" +coverage>=3.6 +fixtures>=3.0.0 # Apache-2.0/BSD +mock>=2.0.0 # BSD +python-subunit>=0.0.18 +requests-mock>=0.6.0 # Apache-2.0 +sphinx +oslosphinx +oslotest>=3.2.0 # Apache-2.0 +stestr>=1.0.0 # Apache-2.0 +testrepository>=0.0.18 +testtools!=1.2.0,>=0.9.36 +isort<5;python_version>="3.0" +pylint +pycryptodomex +flake8<3.8.0 diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/tox.ini b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/tox.ini new file mode 100644 index 0000000..1d7a2b2 --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/tox.ini @@ -0,0 +1,103 @@ +[tox] +envlist = flake8,py39,pylint,bandit +minversion = 1.6 +skipsdist = True + +# tox does not work if the path to the workdir is too long, so move it to /tmp +toxworkdir = /tmp/{env:USER}_k8sapp_kubernetes_power_manager_tox +stxdir = {toxinidir}/../../.. +distshare={toxworkdir}/.tox/distshare + +[testenv] +sitepackages = True +basepython = python3.9 + +allowlist_externals = bash + find + +install_command = pip install -v -v -v \ + -c{toxinidir}/upper-constraints.txt \ + -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/starlingx/root/raw/branch/master/build-tools/requirements/debian/upper-constraints.txt} \ + {opts} {packages} + +commands = + find . -type f -name "*.pyc" -delete + +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + PYTHONDONTWRITEBYTECODE=1 + OS_TEST_PATH=./k8sapp_kubernetes_power_manager/tests + LANG=en_US.UTF-8 + LANGUAGE=en_US:en + LC_ALL=C + SYSINV_TEST_ENV=True + EVENTS_YAML=./k8sapp_kubernetes_power_manager/tests/events_for_testing.yaml + TOX_WORK_DIR={toxworkdir} + PYLINTHOME={toxworkdir} + +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + -e{[tox]stxdir}/config/sysinv/sysinv/sysinv + -e{[tox]stxdir}/config/tsconfig/tsconfig + -e{[tox]stxdir}/fault/fm-api/source + -e{[tox]stxdir}/fault/python-fmclient/fmclient + -e{[tox]stxdir}/utilities/ceph/python-cephclient/python-cephclient + -e{[tox]stxdir}/update/sw-patch/cgcs-patch + +[flake8] +# H series are hacking +# H403 multi line docstrings should end on a new line +# H404 multi line docstring should start without a leading new line +# H405 multi line docstring summary not separated with an empty line +ignore = H403,H404,H405,W503,W504 +# Ignoring +per-file-ignores = + k8sapp_kubernetes_power_manager/common/constants.py: E501 +exclude = build,dist,tools,.eggs +max-line-length=80 + +[testenv:flake8] +deps = -r{toxinidir}/test-requirements.txt +commands = + flake8 {posargs} . + +[testenv:py39] +commands = + {[testenv]commands} + stestr run {posargs} + stestr slowest + +[testenv:venv] +commands = {posargs} + +[bandit] + +[testenv:bandit] +deps = -r{toxinidir}/test-requirements.txt + bandit +commands = bandit --ini tox.ini -n 5 -r k8sapp_kubernetes_power_manager + +[testenv:pylint] +commands = + pylint {posargs} k8sapp_kubernetes_power_manager --rcfile=./pylint.rc + +[testenv:cover] +setenv = {[testenv]setenv} + PYTHON=coverage run --parallel-mode +commands = + {[testenv]commands} + coverage erase + stestr run {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[testenv:pip-missing-reqs] +# do not install test-requirements as that will pollute the virtualenv for +# determining missing packages +# this also means that pip-missing-reqs must be installed separately, outside +# of the requirements.txt files +deps = pip_missing_reqs + -rrequirements.txt +commands=pip-missing-reqs -d --ignore-file=/k8sapp_kubernetes_power_manager/tests k8sapp_kubernetes_power_manager diff --git a/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/upper-constraints.txt b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/upper-constraints.txt new file mode 100644 index 0000000..9c30188 --- /dev/null +++ b/python3-k8sapp-kubernetes-power-manager/k8sapp_kubernetes_power_manager/upper-constraints.txt @@ -0,0 +1 @@ +# Override upstream constraints based on StarlingX load diff --git a/stx-kubernetes-power-manager-helm/debian/deb_folder/changelog b/stx-kubernetes-power-manager-helm/debian/deb_folder/changelog new file mode 100644 index 0000000..9b308db --- /dev/null +++ b/stx-kubernetes-power-manager-helm/debian/deb_folder/changelog @@ -0,0 +1,5 @@ +stx-kubernetes-power-manager-helm (1.0-0) unstable; urgency=medium + + * Initial Release. + + -- Thiago Miranda Mon, 1 Aug 2023 08:00:00 +0000 diff --git a/stx-kubernetes-power-manager-helm/debian/deb_folder/control b/stx-kubernetes-power-manager-helm/debian/deb_folder/control new file mode 100644 index 0000000..cd6f581 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/debian/deb_folder/control @@ -0,0 +1,17 @@ +Source: stx-kubernetes-power-manager-helm +Section: libs +Priority: optional +Maintainer: StarlingX Developers +Build-Depends: debhelper-compat (= 13), + helm, + python3-k8sapp-kubernetes-power-manager, + python3-k8sapp-kubernetes-power-manager-wheels +Standards-Version: 4.5.1 +Homepage: https://www.starlingx.io + +Package: stx-kubernetes-power-manager-helm +Section: libs +Architecture: any +Depends: ${misc:Depends} +Description: StarlingX Intel Power Management Chart + This package contains helm chart for the Intel Power Management application. diff --git a/stx-kubernetes-power-manager-helm/debian/deb_folder/copyright b/stx-kubernetes-power-manager-helm/debian/deb_folder/copyright new file mode 100644 index 0000000..b5f3ddb --- /dev/null +++ b/stx-kubernetes-power-manager-helm/debian/deb_folder/copyright @@ -0,0 +1,41 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: stx-kubernetes-power-manager-helm +Source: https://opendev.org/starlingx/app-kubernetes-power-manager/ + +Files: * +Copyright: (c) 2023 Wind River Systems, Inc +License: Apache-2 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + https://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian-based systems the full text of the Apache version 2.0 license + can be found in `/usr/share/common-licenses/Apache-2.0'. + +# If you want to use GPL v2 or later for the /debian/* files use +# the following clauses, or change it to suit. Delete these two lines +Files: debian/* +Copyright: 2023 Wind River Systems, Inc +License: Apache-2 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + https://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian-based systems the full text of the Apache version 2.0 license + can be found in `/usr/share/common-licenses/Apache-2.0'. diff --git a/stx-kubernetes-power-manager-helm/debian/deb_folder/rules b/stx-kubernetes-power-manager-helm/debian/deb_folder/rules new file mode 100644 index 0000000..495c6d4 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/debian/deb_folder/rules @@ -0,0 +1,48 @@ +#!/usr/bin/make -f +# export DH_VERBOSE = 1 + +export ROOT = debian/tmp +export APP_FOLDER = $(ROOT)/usr/local/share/applications/helm + +export DEB_VERSION = $(shell dpkg-parsechangelog | egrep '^Version:' | cut -f 2 -d ' ') +export RELEASE = $(shell echo $(DEB_VERSION) | cut -f 1 -d '-') +export REVISION = $(shell echo $(DEB_VERSION) | cut -f 4 -d '.') + +export APP_NAME = kubernetes-power-manager +export APP_VERSION = $(RELEASE)-$(REVISION) +export APP_TARBALL = $(APP_NAME)-$(APP_VERSION).tgz +export HELM_REPO = stx-platform +export STAGING = staging + +%: + dh $@ + +override_dh_auto_build: + # Setup the staging directory. + cd helm-charts && make + mkdir -p $(STAGING) + cp files/metadata.yaml $(STAGING) + cp -R fluxcd-manifests $(STAGING) + mkdir -p $(STAGING)/charts + cp helm-charts/*.tgz $(STAGING)/charts + # Populate metadata + sed -i 's/@APP_NAME@/$(APP_NAME)/g' $(STAGING)/metadata.yaml + sed -i 's/@APP_VERSION@/$(APP_VERSION)/g' $(STAGING)/metadata.yaml + sed -i 's/@HELM_REPO@/$(HELM_REPO)/g' $(STAGING)/metadata.yaml + # Copy the plugins: installed in the buildroot + mkdir -p $(STAGING)/plugins + cp /plugins/*.whl $(STAGING)/plugins + # Create the app package. + cd $(STAGING) && find . -type f ! -name '*.md5' -print0 | xargs -0 md5sum > checksum.md5 + tar cfz $(APP_TARBALL) -C $(STAGING)/ . + # Cleanup staging + rm -rf $(STAGING) + +override_dh_auto_install: + # Install the app tar file. + install -d -m 755 $(APP_FOLDER) + install -p -D -m 755 $(APP_TARBALL) $(APP_FOLDER) + +override_dh_auto_test: + +override_dh_usrlocal: diff --git a/stx-kubernetes-power-manager-helm/debian/deb_folder/source/format b/stx-kubernetes-power-manager-helm/debian/deb_folder/source/format new file mode 100644 index 0000000..163aaf8 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/debian/deb_folder/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/stx-kubernetes-power-manager-helm/debian/deb_folder/stx-kubernetes-power-manager-helm.install b/stx-kubernetes-power-manager-helm/debian/deb_folder/stx-kubernetes-power-manager-helm.install new file mode 100644 index 0000000..1b47c6e --- /dev/null +++ b/stx-kubernetes-power-manager-helm/debian/deb_folder/stx-kubernetes-power-manager-helm.install @@ -0,0 +1 @@ +usr/local/share/applications/helm/* diff --git a/stx-kubernetes-power-manager-helm/debian/meta_data.yaml b/stx-kubernetes-power-manager-helm/debian/meta_data.yaml new file mode 100644 index 0000000..94b7695 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/debian/meta_data.yaml @@ -0,0 +1,13 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +debname: stx-kubernetes-power-manager-helm +debver: 1.0-0 +src_path: stx-kubernetes-power-manager-helm +revision: + dist: $STX_DIST + GITREVCOUNT: + SRC_DIR: ${MY_REPO}/stx/app-kubernetes-power-manager + BASE_SRCREV: 789f333dec616b91ddb871cf21fdb07e6bf8d750 diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/README b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/README new file mode 100644 index 0000000..2c4b25d --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/README @@ -0,0 +1,14 @@ +This directory contains all StarlingX charts that need to be built for this application. +The Helm Charts are derived from Kubernetes yaml files for Kubernetes Power Manager. + +Additional information can be found at https://github.com/intel/kubernetes-power-manager. + +As the Kubernetes Power Manager versions are updated, maintainers of this repo will need to update +the helm charts. + +======== Important note: StarlingX App Removal ============== +During application installation, some custom resources are installed. Such features are removed +during the application removal process. In case of failure during the application removal process, +(rare occurrence) such as in cases of unavailability of some resource or failure of Kubernetes, +it is necessary to assess, through sysinv.log, whether any of these resources remained installed. +Removal can be performed manually by the user via the command line. \ No newline at end of file diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/files/metadata.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/files/metadata.yaml new file mode 100644 index 0000000..02572c0 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/files/metadata.yaml @@ -0,0 +1,23 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +app_name: @APP_NAME@ +app_version: @APP_VERSION@ +helm_repo: @HELM_REPO@ + +maintain_user_overrides: true + +upgrades: + auto_update: true + +behavior: + platform_managed_app: yes + evaluate_reapply: + after: + - node-feature-discovery + + triggers: + - type: host-label-assign + - type: host-modify diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/base/helmrepository.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/base/helmrepository.yaml new file mode 100644 index 0000000..7a2f52e --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/base/helmrepository.yaml @@ -0,0 +1,13 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: HelmRepository + +metadata: + name: stx-platform +spec: + url: http://192.168.205.1:8080/helm_charts/stx-platform + interval: 1m diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/base/kustomization.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/base/kustomization.yaml new file mode 100644 index 0000000..f129201 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/base/kustomization.yaml @@ -0,0 +1,10 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - helmrepository.yaml diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/helmrelease.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/helmrelease.yaml new file mode 100644 index 0000000..8e7268c --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/helmrelease.yaml @@ -0,0 +1,42 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease + +metadata: + name: kubernetes-power-manager + labels: + chart_group: kubernetes-power-manager-charts + +spec: + releaseName: kubernetes-power-manager + chart: + spec: + chart: kubernetes-power-manager + version: 2.3.0 + sourceRef: + kind: HelmRepository + name: stx-platform + + interval: 1m + timeout: 30m + + test: + enable: false + + install: + disableHooks: false + + upgrade: + disableHooks: false + + valuesFrom: + - kind: Secret + name: kubernetes-power-manager-static-overrides + valuesKey: kubernetes-power-manager-static-overrides.yaml + - kind: Secret + name: kubernetes-power-manager-system-overrides + valuesKey: kubernetes-power-manager-system-overrides.yaml diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/kubernetes-power-manager-static-overrides.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/kubernetes-power-manager-static-overrides.yaml new file mode 100644 index 0000000..a1273cd --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/kubernetes-power-manager-static-overrides.yaml @@ -0,0 +1,5 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/kubernetes-power-manager-system-overrides.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/kubernetes-power-manager-system-overrides.yaml new file mode 100644 index 0000000..a1273cd --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/kubernetes-power-manager-system-overrides.yaml @@ -0,0 +1,5 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/kustomization.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/kustomization.yaml new file mode 100644 index 0000000..a67e458 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kubernetes-power-manager/kustomization.yaml @@ -0,0 +1,21 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - helmrelease.yaml + +secretGenerator: + - name: kubernetes-power-manager-system-overrides + files: + - kubernetes-power-manager-system-overrides.yaml + - name: kubernetes-power-manager-static-overrides + files: + - kubernetes-power-manager-static-overrides.yaml + +generatorOptions: + disableNameSuffixHash: true diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kustomization.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kustomization.yaml new file mode 100644 index 0000000..c8a25f1 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/fluxcd-manifests/kustomization.yaml @@ -0,0 +1,13 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: intel-power + +resources: + - base + - kubernetes-power-manager diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/Makefile b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/Makefile new file mode 100644 index 0000000..e477717 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/Makefile @@ -0,0 +1,43 @@ +# +# Copyright 2017 The Openstack-Helm Authors. +# +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# It's necessary to set this because some environments don't link sh -> bash. + +SHELL := /bin/bash +TASK := build + +EXCLUDES := doc tests tools logs tmp +CHARTS := $(filter-out $(EXCLUDES), $(patsubst %/.,%,$(wildcard */.))) + +.PHONY: $(EXCLUDES) $(CHARTS) + +all: $(CHARTS) + +$(CHARTS): + @if [ -d $@ ]; then \ + echo; \ + echo "===== Processing [$@] chart ====="; \ + make $(TASK)-$@; \ + fi + +init-%: + if [ -f $*/Makefile ]; then make -C $*; fi + +lint-%: init-% + if [ -d $* ]; then helm lint $*; fi + +build-%: lint-% + if [ -d $* ]; then helm package $*; fi + +clean: + @echo "Clean all build artifacts" + rm -f */templates/_partials.tpl */templates/_globals.tpl + rm -f *tgz */charts/*tgz */requirements.lock + rm -rf */charts */tmpcharts + +%: + @: diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/Chart.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/Chart.yaml new file mode 100644 index 0000000..d74b853 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/Chart.yaml @@ -0,0 +1,11 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: v2 +description: A Helm chart for Kubernetes Power Manager +name: kubernetes-power-manager +type: application +version: 2.3.0 +appVersion: 2.3.0 diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/README.md b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/README.md new file mode 100644 index 0000000..732088e --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/README.md @@ -0,0 +1,39 @@ +# README + +This directory contains StarlingX chart that need to be built for this +application. The Helm Chart are derived from Kubernetes yaml files for +Kubernetes Power Manager. + +The original sources were retrieved from: + +https://github.com/intel/kubernetes-power-manager/archive/refs/tags/v2.3.0.tar.gz. + +Additional information can be found at https://github.com/intel/kubernetes-power-manager/. + +As the Kubernetes Power Manager versions are updated, maintainers of this repo +will need to update the helm chart. + +## Install +1. Install the helm chart +helm install /path/to/kubernetes-power-manager.tgz + +2. Verify the instalation +helm list + +3. Customize Configuration (Optional) +helm install -f custom-values.yaml /path/to/kubernetes-power-manager.tgz +Use helm show values stx-platform/kubernetes-power-manager to list all possibilities. + +This helm chart will install Kubernetes Power Manager CRDs and other manifests +needed to run power-operator on the controller and power-node-agent on each +available node. + +## Test + +To run properly, Node Feature Discovery (NFD) is required. After the +installation process of the Kubernetes Power Manager, the system must configured +in order to remove the Intel Max CState limitation from GRUB command line. + +Use kubectl to show if the cstates are applyed + kubectl get cstates -A + kubectl get cstates controller-0 -n intel-power -o yaml diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/cstates.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/cstates.yaml new file mode 100644 index 0000000..e0d504a --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/cstates.yaml @@ -0,0 +1,67 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: cstates.power.intel.com +spec: + group: power.intel.com + names: + kind: CStates + listKind: CStatesList + plural: cstates + singular: cstates + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CStates is the Schema for the cstates API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CStatesSpec defines the desired state of CStates + properties: + exclusivePoolCStates: + additionalProperties: + additionalProperties: + type: boolean + type: object + type: object + individualCoreCStates: + additionalProperties: + additionalProperties: + type: boolean + type: object + type: object + sharedPoolCStates: + additionalProperties: + type: boolean + type: object + type: object + status: + description: CStatesStatus defines the observed state of CStates + type: object + type: object + served: true + storage: true + subresources: + status: {} + diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerconfigs.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerconfigs.yaml new file mode 100644 index 0000000..c5a8316 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerconfigs.yaml @@ -0,0 +1,73 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: powerconfigs.power.intel.com +spec: + group: power.intel.com + names: + kind: PowerConfig + listKind: PowerConfigList + plural: powerconfigs + singular: powerconfig + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PowerConfig is the Schema for the powerconfigs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PowerConfigSpec defines the desired state of PowerConfig + properties: + powerNodeSelector: + additionalProperties: + type: string + description: The label on the Nodes you the Operator will look for + to deploy the Node Agent + type: object + powerProfiles: + description: The PowerProfiles that will be created by the Operator + items: + type: string + type: array + customDevices: + description: Custom Devices define other CPU Resources to be considered in Pod's spec + items: + type: string + type: array + type: object + status: + description: PowerConfigStatus defines the observed state of PowerConfig + properties: + nodes: + description: The Nodes that the Node Agent has been deployed to + items: + type: string + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} + diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powernodes.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powernodes.yaml new file mode 100644 index 0000000..a42728c --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powernodes.yaml @@ -0,0 +1,157 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: powernodes.power.intel.com +spec: + group: power.intel.com + names: + kind: PowerNode + listKind: PowerNodeList + plural: powernodes + singular: powernode + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PowerNode is the Schema for the powernodes API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PowerNodeSpec defines the desired state of PowerNode + properties: + nodeName: + description: The name of the node + type: string + powerContainers: + description: Information about the containers in the cluster utilizing + some PowerWorkload + items: + properties: + exclusiveCpus: + description: The exclusive CPUs given to this Container + items: + type: integer + type: array + id: + description: The ID of the Container + type: string + name: + description: The name of the Container + type: string + pod: + description: The name of the Pod the Container is running on + type: string + powerProfile: + description: The PowerProfile that the Container is utilizing + type: string + workload: + description: The PowerWorkload that the Container is utilizing + type: string + type: object + type: array + powerProfiles: + items: + type: string + type: array + powerWorkloads: + items: + type: string + type: array + sharedPool: + type: string + unaffectedCores: + type: string + customDevices: + description: Custom Devices define other CPU Resources to be considered in Pod's spec + items: + type: string + type: array + type: object + status: + description: PowerNodeStatus defines the observed state of PowerNode + properties: + powerNodeCPUState: + description: The state of the Guaranteed Pods and Shared Pool in a + cluster + properties: + guaranteedPods: + description: Pods that are requesting CPUs in the Guaranteed QoS + class + items: + properties: + containers: + description: The Containers that are running in the Pod + items: + properties: + exclusiveCpus: + description: The exclusive CPUs given to this Container + items: + type: integer + type: array + id: + description: The ID of the Container + type: string + name: + description: The name of the Container + type: string + pod: + description: The name of the Pod the Container is + running on + type: string + powerProfile: + description: The PowerProfile that the Container is + utilizing + type: string + workload: + description: The PowerWorkload that the Container + is utilizing + type: string + type: object + type: array + name: + description: The name of the Pod + type: string + namespace: + type: string + node: + description: The name of the Node the Pod is running on + type: string + uid: + description: The UID of the Pod + type: string + type: object + type: array + sharedPool: + description: The CPUs that are currently part of the Shared pool + on a Node + items: + type: integer + type: array + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} + diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerpods.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerpods.yaml new file mode 100644 index 0000000..10688bf --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerpods.yaml @@ -0,0 +1,50 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: powerpods.power.intel.com +spec: + group: power.intel.com + names: + kind: PowerPod + listKind: PowerPodList + plural: powerpods + singular: powerpod + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PowerPod is the Schema for the powerpods API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PowerPodSpec defines the desired state of PowerPod + type: object + status: + description: PowerPodStatus defines the observed state of PowerPod + type: object + type: object + served: true + storage: true + subresources: + status: {} + diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerprofiles.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerprofiles.yaml new file mode 100644 index 0000000..467fc8d --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerprofiles.yaml @@ -0,0 +1,77 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: powerprofiles.power.intel.com +spec: + group: power.intel.com + names: + kind: PowerProfile + listKind: PowerProfileList + plural: powerprofiles + singular: powerprofile + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PowerProfile is the Schema for the powerprofiles API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PowerProfileSpec defines the desired state of PowerProfile + properties: + epp: + description: The priority value associated with this Power Profile + type: string + governor: + default: powersave + description: Governor to be used + type: string + max: + description: Max frequency cores can run at + type: integer + min: + description: Min frequency cores can run at + type: integer + name: + description: The name of the PowerProfile + type: string + shared: + type: boolean + required: + - name + type: object + status: + description: PowerProfileStatus defines the observed state of PowerProfile + properties: + id: + description: The ID given to the power profile + type: integer + required: + - id + type: object + type: object + served: true + storage: true + subresources: + status: {} + diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerworkloads.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerworkloads.yaml new file mode 100644 index 0000000..0cc20ec --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/powerworkloads.yaml @@ -0,0 +1,116 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: powerworkloads.power.intel.com +spec: + group: power.intel.com + names: + kind: PowerWorkload + listKind: PowerWorkloadList + plural: powerworkloads + singular: powerworkload + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PowerWorkload is the Schema for the powerworkloads API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PowerWorkloadSpec defines the desired state of PowerWorkload + properties: + allCores: + description: AllCores determines if the Workload is to be applied + to all cores (i.e. use the Default Workload) + type: boolean + name: + description: The name of the workload + type: string + powerNodeSelector: + additionalProperties: + type: string + description: The labels signifying the nodes the user wants to use + type: object + powerProfile: + description: PowerProfile is the Profile that this PowerWorkload is + based on + type: string + reservedCPUs: + description: Reserved CPUs are the CPUs that have been reserved by + Kubelet for use by the Kubernetes admin process This list must match + the list in the user's Kubelet configuration + items: + type: integer + type: array + workloadNodes: + properties: + containers: + items: + properties: + exclusiveCpus: + description: The exclusive CPUs given to this Container + items: + type: integer + type: array + id: + description: The ID of the Container + type: string + name: + description: The name of the Container + type: string + pod: + description: The name of the Pod the Container is running + on + type: string + powerProfile: + description: The PowerProfile that the Container is utilizing + type: string + workload: + description: The PowerWorkload that the Container is utilizing + type: string + type: object + type: array + cpuIds: + items: + type: integer + type: array + name: + type: string + type: object + required: + - name + type: object + status: + description: PowerWorkloadStatus defines the observed state of PowerWorkload + properties: + 'node:': + description: The Node that this Shared PowerWorkload is associated + with + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/timeofdaycronjobs.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/timeofdaycronjobs.yaml new file mode 100644 index 0000000..3b0c614 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/timeofdaycronjobs.yaml @@ -0,0 +1,220 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: timeofdaycronjobs.power.intel.com +spec: + group: power.intel.com + names: + kind: TimeOfDayCronJob + listKind: TimeOfDayCronJobList + plural: timeofdaycronjobs + singular: timeofdaycronjob + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: TimeOfDayCronJob is the Schema for the timeofdaycronjobs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TimeOfDayCronJobSpec defines the desired state of TimeOfDayCronJob + properties: + cState: + description: CStatesSpec defines the desired state of CStates + properties: + exclusivePoolCStates: + additionalProperties: + additionalProperties: + type: boolean + type: object + type: object + individualCoreCStates: + additionalProperties: + additionalProperties: + type: boolean + type: object + type: object + sharedPoolCStates: + additionalProperties: + type: boolean + type: object + type: object + hour: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + type: integer + minute: + type: integer + pods: + items: + properties: + labels: + description: A label selector is a label query over a set of + resources. The result of matchLabels and matchExpressions + are ANDed. An empty label selector matches all objects. A + null label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + target: + type: string + required: + - labels + - target + type: object + type: array + profile: + type: string + reservedCPUs: + items: + type: integer + type: array + second: + type: integer + timeZone: + type: string + required: + - hour + - minute + - profile + - timeZone + type: object + status: + properties: + active: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + items: + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + type: array + lastScheduleTime: + format: date-time + type: string + lastSuccessfulTime: + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/timeofdays.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/timeofdays.yaml new file mode 100644 index 0000000..aa3213e --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/timeofdays.yaml @@ -0,0 +1,160 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: timeofdays.power.intel.com +spec: + group: power.intel.com + names: + kind: TimeOfDay + listKind: TimeOfDayList + plural: timeofdays + singular: timeofday + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: TimeOfDay is the Schema for the timeofdays API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TimeOfDaySpec defines the desired state of TimeOfDay + properties: + reservedCPUs: + items: + type: integer + type: array + schedule: + description: Schedule for adjusting performance mode + items: + properties: + cState: + description: CStatesSpec defines the desired state of CStates + properties: + exclusivePoolCStates: + additionalProperties: + additionalProperties: + type: boolean + type: object + type: object + individualCoreCStates: + additionalProperties: + additionalProperties: + type: boolean + type: object + type: object + sharedPoolCStates: + additionalProperties: + type: boolean + type: object + type: object + pods: + items: + properties: + labels: + description: A label selector is a label query over a + set of resources. The result of matchLabels and matchExpressions + are ANDed. An empty label selector matches all objects. + A null label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + target: + type: string + required: + - labels + - target + type: object + type: array + powerProfile: + type: string + time: + type: string + required: + - time + type: object + type: array + timeZone: + description: Time Zone to use for scheduling + type: string + required: + - schedule + type: object + status: + description: TimeOfDayStatus defines the observed state of TimeOfDay + properties: + lastSchedule: + description: The time of the last update + type: string + nextSchedule: + description: The time of the next update + type: string + powerProfile: + description: PowerProfile associated with Time of Day + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/uncores.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/uncores.yaml new file mode 100644 index 0000000..07cc7a0 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/crds/uncores.yaml @@ -0,0 +1,71 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: uncores.power.intel.com +spec: + group: power.intel.com + names: + kind: Uncore + listKind: UncoreList + plural: uncores + singular: uncore + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Uncore is the Schema for the uncores API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: UncoreSpec defines the desired state of Uncore + properties: + dieSelector: + items: + properties: + die: + type: integer + max: + type: integer + min: + type: integer + package: + type: integer + required: + - max + - min + - package + type: object + type: array + sysMax: + type: integer + sysMin: + type: integer + type: object + status: + description: UncoreStatus defines the observed state of Uncore + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/agent-ds-configmap.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/agent-ds-configmap.yaml new file mode 100644 index 0000000..8770cb6 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/agent-ds-configmap.yaml @@ -0,0 +1,74 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: power-node-agent-ds + namespace: intel-power +data: + power-node-agent-ds.yaml: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: power-node-agent + namespace: intel-power + spec: + selector: + matchLabels: + name: power-node-agent-pod + template: + metadata: + namespace: intel-power + labels: + name: power-node-agent-pod + spec: + serviceAccountName: intel-power-node-agent +{{- with .Values.agent.tolerations }} + tolerations: +{{ toYaml . | indent 12 }} +{{- end }} +{{- with .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml . | indent 12 }} +{{- end }} + containers: + - image: {{ .Values.agent.image.repository }}:{{ .Values.agent.image.tag }} + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + name: power-node-agent + args: [ "--zap-log-level","3" ] + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + limits: + cpu: 100m + memory: 64Mi + requests: + cpu: 0 + memory: 64Mi + volumeMounts: + - mountPath: /sys/devices/system/cpu + name: cpusetup + - mountPath: /sys/fs + name: cgroup + readOnly: true + - mountPath: /var/lib/kubelet/pod-resources/ + name: kubesock + readOnly: true + volumes: + - name: cpusetup + hostPath: + path: /sys/devices/system/cpu + - name: cgroup + hostPath: + path: /sys/fs + - name: kubesock + hostPath: + path: /var/lib/kubelet/pod-resources diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/cstate.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/cstate.yaml new file mode 100644 index 0000000..72aa3ed --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/cstate.yaml @@ -0,0 +1,11 @@ +{{- $top := . -}} +{{- range $node, $cstates := .Values.cstatesProfile }} +--- +apiVersion: power.intel.com/v1 +kind: CStates +metadata: + name: {{ $node }} + namespace: intel-power +spec: + {{- toYaml $cstates | nindent 2 }} +{{- end -}} diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/manager.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/manager.yaml new file mode 100644 index 0000000..68eb307 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/manager.yaml @@ -0,0 +1,90 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: intel-power + labels: + control-plane: controller-manager +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + labels: + control-plane: controller-manager + spec: + serviceAccountName: intel-power-operator + terminationGracePeriodSeconds: 10 + + containers: + - name: manager + imagePullPolicy: IfNotPresent + image: {{ .Values.manager.image.repository }}:{{ .Values.manager.image.tag }} + + command: + - /manager + + securityContext: + allowPrivilegeEscalation: true + capabilities: + drop: ["ALL"] + + resources: + limits: + cpu: 100m + memory: 30Mi + requests: + cpu: 0 + memory: 20Mi + + volumeMounts: + - mountPath: /sys/fs + name: cgroup + mountPropagation: HostToContainer + readOnly: false + - mountPath: /power-manifests/power-node-agent-ds.yaml + name: power-node-agent-ds + subPath: power-node-agent-ds.yaml + readOnly: true + + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: "node-role.kubernetes.io/master" + operator: Exists + - weight: 1 + preference: + matchExpressions: + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + +{{- with .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml . | indent 6 }} +{{- end }} + + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + + volumes: + - name: cgroup + hostPath: + path: /sys/fs + - name: power-node-agent-ds + configMap: + name: power-node-agent-ds diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/powerconfig.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/powerconfig.yaml new file mode 100644 index 0000000..4f725b6 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/powerconfig.yaml @@ -0,0 +1,21 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: power.intel.com/v1 +kind: PowerConfig + +metadata: + name: power-config + namespace: intel-power + +spec: + powerNodeSelector: + power-management: enabled + powerProfiles: + - "performance" + - "balance-performance" + - "balance-power" + customDevices: + - "windriver.com/isolcpus" diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/powerworkload.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/powerworkload.yaml new file mode 100644 index 0000000..7f630aa --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/powerworkload.yaml @@ -0,0 +1,19 @@ +{{- $top := . -}} +{{- range $node, $powerworkload := .Values.sharedProfile }} +--- +apiVersion: power.intel.com/v1 +kind: PowerWorkload +metadata: + name: shared-{{ $node }}-workload + namespace: intel-power +spec: + name: shared-{{ $node }}-workload + allCores: true + powerNodeSelector: + kubernetes.io/hostname: {{ $node }} + powerProfile: shared-{{ $node }} + reservedCPUs: + {{- range $powerworkload.reservedCPUs }} + - {{. | int }} + {{- end }} +{{- end -}} diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/rbac.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/rbac.yaml new file mode 100644 index 0000000..69b53da --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/rbac.yaml @@ -0,0 +1,266 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: intel-power-operator + namespace: intel-power + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: intel-power-node-agent + namespace: intel-power + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: operator-custom-resource-definitions-role + namespace: intel-power +rules: + - apiGroups: [ "", "power.intel.com", "apps", "coordination.k8s.io" ] + resources: [ "powerconfigs", "powerconfigs/status", "powerprofiles", "powerprofiles/status", "events", "daemonsets", "configmaps", "configmaps/status", "leases","uncores" ] + verbs: [ "*" ] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: operator-custom-resource-definitions-role-binding + namespace: intel-power +subjects: + - kind: ServiceAccount + name: intel-power-operator + namespace: intel-power +roleRef: + kind: Role + name: operator-custom-resource-definitions-role + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-nodes +rules: + - apiGroups: [ "", "power.intel.com", "apps" ] + resources: [ "nodes", "nodes/status", "configmaps", "configmaps/status", "powerconfigs", "powerconfigs/status", "powerprofiles", "powerprofiles/status", "powerworkloads", "powerworkloads/status", "powernodes", "powernodes/status", "events", "daemonsets","uncores" ] + verbs: [ "*" ] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-nodes-binding +subjects: + - kind: ServiceAccount + name: intel-power-operator + namespace: intel-power +roleRef: + kind: ClusterRole + name: operator-nodes + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: node-agent-cluster-resources +rules: + - apiGroups: [ "", "batch", "power.intel.com" ] + resources: [ "nodes", "nodes/status", "pods", "pods/status", "cronjobs", "cronjobs/status", "powerprofiles", "powerprofiles/status", "powerworkloads", "powerworkloads/status", "powernodes", "powernodes/status", "cstates", "cstates/status", "timeofdays", "timeofdays/status", "timeofdaycronjobs", "timeofdaycronjobs/status","uncores" ] + verbs: [ "*" ] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-agent-cluster-resources-binding +subjects: + - kind: ServiceAccount + name: intel-power-node-agent + namespace: intel-power +roleRef: + kind: ClusterRole + name: node-agent-cluster-resources + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - power.intel.com + resources: + - cstates + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - power.intel.com + resources: + - cstates/status + verbs: + - get + - patch + - update +- apiGroups: + - power.intel.com + resources: + - powerconfigs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - power.intel.com + resources: + - powerconfigs/status + verbs: + - get + - patch + - update +- apiGroups: + - power.intel.com + resources: + - powernodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - power.intel.com + resources: + - powernodes/status + verbs: + - get + - patch + - update +- apiGroups: + - power.intel.com + resources: + - powerpods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - power.intel.com + resources: + - powerpods/status + verbs: + - get + - patch + - update +- apiGroups: + - power.intel.com + resources: + - powerprofiles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - power.intel.com + resources: + - powerprofiles/status + verbs: + - get + - patch + - update +- apiGroups: + - power.intel.com + resources: + - powerworkloads + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - power.intel.com + resources: + - powerworkloads/status + verbs: + - get + - patch + - update +- apiGroups: + - power.intel.com + resources: + - timeofdays + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - power.intel.com + resources: + - timeofdays/status + verbs: + - get + - patch + - update +- apiGroups: + - power.intel.com + resources: + - uncores + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - power.intel.com + resources: + - uncores/finalizers + verbs: + - update +- apiGroups: + - power.intel.com + resources: + - uncores/status + verbs: + - get + - patch + - update diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/sharedprofile.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/sharedprofile.yaml new file mode 100644 index 0000000..a625a6b --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/templates/sharedprofile.yaml @@ -0,0 +1,26 @@ +{{- $top := . -}} +{{- range $node, $powerprofile := .Values.sharedProfile }} +--- +apiVersion: "power.intel.com/v1" +kind: PowerProfile +metadata: + name: shared-{{ $node }} + namespace: intel-power +spec: + name: shared-{{ $node }} +{{- if $powerprofile.shared }} + shared: {{ $powerprofile.shared }} +{{- end -}} +{{- if $powerprofile.epp }} + epp: {{ $powerprofile.epp }} +{{- end -}} +{{- if $powerprofile.max }} + max: {{ $powerprofile.max }} +{{- end -}} +{{- if $powerprofile.min }} + min: {{ $powerprofile.min }} +{{- end -}} +{{- if $powerprofile.governor }} + governor: {{ $powerprofile.governor }} +{{- end -}} +{{- end -}} diff --git a/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/values.yaml b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/values.yaml new file mode 100644 index 0000000..db6f040 --- /dev/null +++ b/stx-kubernetes-power-manager-helm/stx-kubernetes-power-manager-helm/helm-charts/kubernetes-power-manager/values.yaml @@ -0,0 +1,26 @@ +# Copyright (c) 2023 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# Default values for kubernetes-power-manager. + +--- +manager: + image: + repository: docker.io/starlingx/power-operator + tag: stx.9.0-v2.3.0 +agent: + image: + repository: docker.io/starlingx/power-node-agent + tag: stx.9.0-v2.3.0 + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + +imagePullSecrets: + - name: default-registry-key diff --git a/tox.ini b/tox.ini index 1483c87..ace4fb4 100644 --- a/tox.ini +++ b/tox.ini @@ -1,50 +1,56 @@ [tox] envlist = linters -minversion = 2.3 +minversion = 2.9 skipsdist = True -sitepackages=False +sitepackages = False [testenv] -install_command = pip install -U {opts} {packages} +install_command = pip install -U \ + {opts} {packages} \ + -c{env:TOX_CONSTRAINTS_FILE:https://opendev.org/starlingx/root/raw/branch/master/build-tools/requirements/debian/upper-constraints.txt} setenv = - VIRTUAL_ENV={envdir} - OS_STDOUT_CAPTURE=1 - OS_STDERR_CAPTURE=1 - OS_DEBUG=1 - OS_LOG_CAPTURE=1 + VIRTUAL_ENV={envdir} + OS_STDOUT_CAPTURE=1 + OS_STDERR_CAPTURE=1 + OS_DEBUG=1 + OS_LOG_CAPTURE=1 deps = - -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt + -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt allowlist_externals = - bash - + bash passenv = XDG_CACHE_HOME + [testenv:bashate] # Treat all E* codes as Errors rather than warnings using: -e 'E*' commands = - bash -c "find {toxinidir} \ - -not \( -type d -name .?\* -prune \) \ - -type f \ - -not -name \*~ \ - -not -name \*.md \ - -name \*.sh \ - -print0 | xargs -r -n 1 -0 bashate -v \ - -e 'E*'" + bash -c "find {toxinidir} \ + -not \( -type d -name .?\* -prune \) \ + -type f \ + -not -name \*~ \ + -not -name \*.md \ + -name \*.sh \ + -print0 | xargs -r -n 1 -0 bashate -v \ + -e 'E*'" + [testenv:linters] commands = {[testenv:bashate]commands} + [testenv:flake8] basepython = python3 description = Dummy environment to allow flake8 to be run in subdir tox + [testenv:pylint] basepython = python3 description = Dummy environment to allow pylint to be run in subdir tox + [testenv:bandit] basepython = python3 description = Dummy environment to allow bandit to be run in subdir tox