From c47f0964d9c9d43cb7292c38fd2d10f655b643e6 Mon Sep 17 00:00:00 2001 From: Kaustubh Dhokte Date: Tue, 1 Mar 2022 07:40:16 +0000 Subject: [PATCH] Debian: Add kubernetes 1.23.1 package Test Plan:(On Debian) Kubernetes 1.23.1 package builds successfully All packages build successfully Image builds successfully Depends-On: https://review.opendev.org/c/starlingx/compile/+/825651 Story: 2009830 Task: 44638 Signed-off-by: Kaustubh Dhokte Change-Id: I57de1d998412e61bb928a9ce1930bc2a1c600282 --- debian_pkg_dirs | 1 + .../debian/deb_folder/changelog | 160 +++++ .../debian/deb_folder/control | 90 +++ .../debian/deb_folder/copyright | 477 +++++++++++++++ .../debian/deb_folder/kubeadm.conf | 18 + .../debian/deb_folder/kubelet-cgroup-setup.sh | 132 ++++ .../kubernetes-1.23.1-client.install | 3 + ...kubernetes-1.23.1-client.lintian-overrides | 9 + .../deb_folder/kubernetes-1.23.1-kubeadm.dirs | 1 + .../kubernetes-1.23.1-kubeadm.install | 2 + .../deb_folder/kubernetes-1.23.1-master.dirs | 5 + .../kubernetes-1.23.1-master.install | 8 + ...kubernetes-1.23.1-master.lintian-overrides | 7 + .../deb_folder/kubernetes-1.23.1-misc.install | 11 + .../deb_folder/kubernetes-1.23.1-node.install | 2 + .../kubernetes-1.23.1-node.lintian-overrides | 4 + .../kubernetes-1.23.1-unit-test.install | 2 + ...th-for-coredns-only-for-default-repo.patch | 113 ++++ ...rt-for-kubernetes-to-ignore-isolcpus.patch | 79 +++ ...latform-pods-with-zero-CPU-resources.patch | 108 ++++ ...er-disable-CFS-quota-throttling-for-.patch | 287 +++++++++ ...er-infrastructure-pods-use-system-re.patch | 149 +++++ ...er-introduce-concept-of-isolated-CPU.patch | 562 ++++++++++++++++++ ...er-keep-normal-containers-off-reserv.patch | 311 ++++++++++ ...isolcpus-allocation-when-SMT-enabled.patch | 50 ++ ...s-make-isolcpus-allocation-SMT-aware.patch | 151 +++++ .../debian/deb_folder/patches/series | 9 + .../kubernetes-1.23.1/debian/deb_folder/rules | 138 +++++ .../debian/deb_folder/source/format | 1 + .../kubernetes-1.23.1/debian/meta_data.yaml | 9 + 30 files changed, 2899 insertions(+) create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/changelog create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/control create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/copyright create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/kubeadm.conf create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/kubelet-cgroup-setup.sh create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-client.install create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-client.lintian-overrides create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-kubeadm.dirs create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-kubeadm.install create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-master.dirs create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-master.install create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-master.lintian-overrides create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-misc.install create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-node.install create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-node.lintian-overrides create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-unit-test.install create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/Revert-use-subpath-for-coredns-only-for-default-repo.patch create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/enable-support-for-kubernetes-to-ignore-isolcpus.patch create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubeadm-create-platform-pods-with-zero-CPU-resources.patch create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-disable-CFS-quota-throttling-for-.patch create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-infrastructure-pods-use-system-re.patch create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-introduce-concept-of-isolated-CPU.patch create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-keep-normal-containers-off-reserv.patch create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-sort-isolcpus-allocation-when-SMT-enabled.patch create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubernetes-make-isolcpus-allocation-SMT-aware.patch create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/series create mode 100755 kubernetes/kubernetes-1.23.1/debian/deb_folder/rules create mode 100644 kubernetes/kubernetes-1.23.1/debian/deb_folder/source/format create mode 100644 kubernetes/kubernetes-1.23.1/debian/meta_data.yaml diff --git a/debian_pkg_dirs b/debian_pkg_dirs index 30b3f485d..ba4ea8fb4 100644 --- a/debian_pkg_dirs +++ b/debian_pkg_dirs @@ -55,6 +55,7 @@ kubernetes/etcd kubernetes/helm kubernetes/k8s-pod-recovery kubernetes/kubernetes-1.21.8 +kubernetes/kubernetes-1.23.1 kubernetes/kubernetes-unversioned kubernetes/plugins/isolcpus-device-plugin kubernetes/runc diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/changelog b/kubernetes/kubernetes-1.23.1/debian/deb_folder/changelog new file mode 100644 index 000000000..94defe96d --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/changelog @@ -0,0 +1,160 @@ +kubernetes-1.23.1 (1.23.1-1) unstable; urgency=medium + + * Updated for stx debian packaging + + -- Kaustubh Dhokte Tue, 22 Feb 2022 20:57:45 +0000 + + +kubernetes (1.21.8-1) unstable; urgency=medium + + * Updated for stx debian packaging + + -- Mihnea Saracin Fri, 29 Oct 2021 12:51:12 +0000 + + +kubernetes (1.12.10+dfsg-1) UNRELEASED; urgency=medium + + [ TODO ] [ FIXME ] + * copyright / vendor + * missing-sources + * rules: "make generated_files" + + * New upstream release. + * Patch away deprecated "docker/engine-api". + * rules: + + clean "zz_generated.deepcopy.go" files. + * New patch to fix selinux import. + + -- Dmitry Smirnov Mon, 18 Jun 2018 15:26:23 +1000 + +kubernetes (1.7.16+dfsg-1) unstable; urgency=medium + + [ Michael Stapelberg ] + * Switch to XS-Go-Import-Path + + [ Dmitry Smirnov ] + * Resurrected "mergo.patch" that has been mistakenly removed + (Closes: #878254). + * Re-enabled safeguard test for the above problem. + * New upstream release: + + CVE-2017-1002101 (Closes: #892801) + + CVE-2017-1002102 (Closes: #894051) + * Updated Vcs URLs for Salsa. + * Standards-Version: 4.1.4 + * Build-Depends: + - golang-go + + golang-any + + golang-github-appc-cni-dev + + golang-github-armon-circbuf-dev + + golang-github-azure-azure-sdk-for-go-dev + + golang-github-dgrijalva-jwt-go-v3-dev + + golang-github-docker-distribution-dev + + golang-github-docker-docker-dev + + golang-github-emicklei-go-restful-swagger12-dev + + golang-github-gogo-protobuf-dev + + golang-github-gorilla-websocket-dev + + golang-github-grpc-ecosystem-go-grpc-prometheus-dev + + golang-github-karlseguin-ccache-dev + - golang-github-opencontainers-runc-dev + + golang-github-opencontainers-docker-runc-dev + + golang-github-pmezard-go-difflib-dev + + golang-golang-x-time-dev + + golang-golang-x-tools-dev + + golang-google-grpc-dev + + golang-gopkg-warnings.v0-dev + + golang-goprotobuf-dev + + -- Dmitry Smirnov Sun, 06 May 2018 16:20:21 +1000 + +kubernetes (1.7.7+dfsg-3) unstable; urgency=medium + + * kubernetes-master should depend on etcd (Closes: #855218). + + -- Andrew Shadura Sun, 22 Oct 2017 19:40:46 +0100 + +kubernetes (1.7.7+dfsg-2) unstable; urgency=medium + + * Use CURDIR, not PWD, unbreaks the build at buildds. + + -- Andrew Shadura Fri, 06 Oct 2017 19:25:45 +0200 + +kubernetes (1.7.7+dfsg-1) unstable; urgency=medium + + [ Tim Potter ] + * Open work for new release + * Remove unused Files-Excluded entries from d/copyright + * Remove Skydns B-D as no longer used + * Don't build on ppc64 or ppc64le architectures + + [ Andrew Shadura ] + * New upstream release. + * Refresh patches. + * Update build dependencies. + * Symlink vendor packages to the build directory. + + -- Andrew Shadura Fri, 06 Oct 2017 18:54:06 +0200 + +kubernetes (1.5.5+dfsg-2) unstable; urgency=medium + + * Team upload. + * Don't build on ppc64le due to Go linker problems. See GitHub issue + https://github.com/golang/go/issues/15823. + * Don't build on ppc64 as it's not supported by upstream at the + moment. (Closes: #860505) + + -- Tim Potter Sat, 03 Jun 2017 08:00:51 +1000 + +kubernetes (1.5.5+dfsg-1) unstable; urgency=low + + [ Dmitry Smirnov ] + * Switch to bundled "rkt". + * rules: remove "-p" option from build and test overrides. + * control: drop obsolete "golang-clockwork-dev" alternative. + * New patch to disable test failing on [armel]. + * Upload to unstable. + + [ Tim Potter ] + * New upstream version. [March 2017] + * Big updates to d/rules and d/copyright to update to upstream + changes made since the 1.2.x release. + * Refresh patches to bring up to date with upstream changes since + 1.2.x. + * control: add lsb-base as dependency for sysvinit scripts. + * Suppress spelling-error-in-binary Lintian messages. + + -- Tim Potter Thu, 13 Apr 2017 16:45:57 +1000 + +kubernetes (1.2.5+dfsg-1) experimental; urgency=medium + + * New upstream release [June 2016]. + * Switch to private "github.com/golang/glog" due to log noise. + * Disabled failing tests; no longer ignore failures in tests. + * Build/test using 2 cores only. + * New patch to update appc/cni name space (fixes FTBFS). + * Removed obsolete "spf13-cobra.patch". + + -- Dmitry Smirnov Sun, 03 Jul 2016 04:12:28 +1000 + +kubernetes (1.2.4+dfsg-2) experimental; urgency=medium + + * Added new patch to fix incompatibility with "imdario/mergo" v0.2.2 + (Closes: #825753). + Thanks, Florian Ernst. + * Enable tests but ignore failures for now. + + -- Dmitry Smirnov Fri, 17 Jun 2016 01:41:38 +1000 + +kubernetes (1.2.4+dfsg-1) experimental; urgency=medium + + * New upstream release [May 2016]. + * New patch to print output of "uname -m" on unsupported architectures. + * New "docker.patch" to fix potential FTBFS. + + Build-Depends += "golang-github-docker-distribution-dev". + + -- Dmitry Smirnov Wed, 15 Jun 2016 21:03:01 +1000 + +kubernetes (1.2.3+dfsg-1) experimental; urgency=low + + * Initial release (Closes: #795652). + + -- Dmitry Smirnov Mon, 25 Apr 2016 22:40:12 +1000 diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/control b/kubernetes/kubernetes-1.23.1/debian/deb_folder/control new file mode 100644 index 000000000..4f46e6411 --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/control @@ -0,0 +1,90 @@ +Source: kubernetes-1.23.1 +Section: admin +Priority: optional +Maintainer: StarlingX Developers +Build-Depends: debhelper-compat (= 13), + build-essential, + bash-completion, + jq, + rsync, + go-bindata, + go-md2man, + golang-1.17 +Standards-Version: 4.4.1 +Homepage: http://kubernetes.io/ + +Package: kubernetes-1.23.1-client +Provides: kubernetes-utils +Architecture: amd64 +Depends: ${misc:Depends}, ${shlibs:Depends} +Recommends: ${misc:Recommends} +Built-Using: ${misc:Built-Using} +Description: Kubernetes Command Line Tool + The Kubernetes command line tool for interacting with the Kubernetes API. + +Package: kubernetes-1.23.1-master +Architecture: amd64 +Depends: ${misc:Depends}, ${shlibs:Depends}, + adduser, + lsb-base, + etcd +Recommends: ${misc:Recommends}, kubernetes-1.23.1-client +Built-Using: ${misc:Built-Using} +Description: Kubernetes services for master host + Container Cluster Manager from Google. Kubernetes is an open source system + for managing containerized applications across multiple hosts, providing + basic mechanisms for deployment, maintenance, and scaling of applications. + . + Linux kernel version 3.8 or above is required for proper operation of the + daemon process, and that any lower versions may have subtle and/or glaring + issues. + . + This package provides "kube-apiserver", "kube-controller-manager" and + "kube-scheduler" daemons. + +Package: kubernetes-1.23.1-node +Provides: cadvisor +Architecture: amd64 +Depends: ${misc:Depends}, ${shlibs:Depends}, + adduser, + conntrack, + conntrackd, + docker.io, + lsb-base, + socat, +Recommends: ${misc:Recommends}, kubernetes-1.23.1-client +Built-Using: ${misc:Built-Using} +Description: Kubernetes services for node host + Container Cluster Manager from Google. Kubernetes is an open source system + for managing containerized applications across multiple hosts, providing + basic mechanisms for deployment, maintenance, and scaling of applications. + . + Linux kernel version 3.8 or above is required for proper operation of the + daemon process, and that any lower versions may have subtle and/or glaring + issues. + +Package: kubernetes-1.23.1-kubeadm +Architecture: amd64 +Depends: ${misc:Depends}, containernetworking-plugins +Recommends: ${misc:Recommends}, kubernetes-1.23.1-client +Built-Using: ${misc:Built-Using} +Description: Kubernetes Cluster Bootstrapping Tool + The Kubernetes command line tool for bootstrapping a Kubernetes cluster. + +Package: kubernetes-1.23.1-misc +Architecture: amd64 +Depends: ${misc:Depends}, ${shlibs:Depends} +Recommends: ${misc:Recommends} +Built-Using: ${misc:Built-Using} +Description: dummy package + Kubernetes dummy package for misc stuff we don't want to install in production. + +Package: kubernetes-1.23.1-unit-test +Architecture: amd64 +Depends: ${misc:Depends}, ${shlibs:Depends}, + hostname, + rsync, + etcd (>= 2.0.9), + network-manager, +Recommends: ${misc:Recommends} +Built-Using: ${misc:Built-Using} diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/copyright b/kubernetes/kubernetes-1.23.1/debian/deb_folder/copyright new file mode 100644 index 000000000..dee773fdc --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/copyright @@ -0,0 +1,477 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: Kubernetes +Source: https://github.com/kubernetes/kubernetes + +Files: + debian/kubeadm.conf + debian/kubelet-cgroup-setup.sh +Copyright: + 2022 Wind River Systems, Inc. +License: Apache-2.0 + +Files: * +Copyright: + 2011-2016 Google Inc + 2014-2018 the Kubernetes Authors +License: Apache-2.0 +Comment: + examples/storage/mysql-galera/README.md + Copyrights in the above file are fragments of the sample output to console. + examples/storage/hazelcast/README.md + Copyrights in the above file are fragments of the sample output to console. + +Files: + vendor/github.com/coreos/rkt/api/v1alpha/* +Copyright: + 2015 The rkt Authors +License: Apache-2.0 + +Files: + third_party/forked/golang/* +Copyright: + 2009, 2012-2013 The Go Authors +License: BSD-3-Clause~Google + +Files: + third_party/protobuf/* +Copyright: 2008 Google Inc. +License: BSD-3-Clause~Google + +Files: + third_party/forked/shell2junit/* +Copyright: 2009 Manolo Carrasco (Manuel Carrasco Moñino) +License: Apache-2.0 + +Files: + third_party/htpasswd/* +Copyright: + 2008 Eli Carter + 2003-2013 Edgewall Software +License: BSD-3-Clause + +Files: + third_party/intemp/* +Copyright: + 2015 Karl Isenberg +License: Apache-2.0 + +Files: + third_party/swagger-ui/* +Copyright: + 2014 Reverb Technologies, Inc. +License: Apache-2.0 + +Files: + debian/missing-sources/backbone.js + third_party/swagger-ui/lib/backbone-min.js +Copyright: + 2010-2014 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors +License: Expat + +Files: + debian/missing-sources/handlebars.runtime-v2.0.0.js + third_party/swagger-ui/lib/handlebars*.js +Copyright: + 2011-2014 Yehuda Katz +License: Expat + +Files: + debian/missing-sources/highlight.js + third_party/swagger-ui/lib/highlight*.js +Copyright: + 2006, Ivan Sagalaev +License: BSD-3-Clause + +Files: + debian/missing-sources/jquery.js + third_party/swagger-ui/lib/jquery-1.8.0.min.js +Copyright: + 2012 jQuery Foundation and other contributors +License: Expat + +Files: + debian/missing-sources/jquery.ba-bbq.js + third_party/swagger-ui/lib/jquery.ba-bbq.min.js +Copyright: + 2010 "Cowboy" Ben Alman +License: Expat +Comment: Alternatively can be GPL-licensed. + +Files: + debian/missing-sources/jquery.slideto.js + third_party/swagger-ui/lib/jquery.slideto.min.js +Copyright: + 2015 Joakim Hedlund +License: Expat +Comment: https://github.com/Sleavely/jQuery-slideto/blob/master/LICENSE + +Files: + third_party/swagger-ui/lib/jquery.wiggle.min.js +Copyright: + WonderGroup, Jordan Thomas +License: Expat + +Files: + third_party/swagger-ui/lib/marked.js +Copyright: + 2011-2014 Christopher Jeffrey +License: Expat + +Files: + debian/missing-sources/underscore.js + third_party/swagger-ui/lib/underscore-min.js +Copyright: + 2009-2014 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors +License: Expat + +Files: + third_party/swagger-ui/lib/shred.bundle.js + third_party/swagger-ui/lib/shred/content.js +Copyright: + 2012-2015 Panda Strike, LLC and Dan Yoder + Alexandru Marasteanu +License: ISC + +Files: + vendor/bitbucket.org/bertimus9/systemstat/* +Copyright: + 2013 Phillip Bond +License: Expat + +Files: + vendor/bitbucket.org/ww/goautoneg/* +Copyright: + 2011 Open Knowledge Foundation Ltd. +License: BSD-3-Clause~OKF + +Files: + vendor/github.com/Azure/azure-sdk-for-go/* +Copyright: + 2016 Microsoft Corporation +License: Apache-2.0 + +Files: + vendor/github.com/Azure/go-autorest/* +Copyright: + 2015 Microsoft Corporation +License: Apache-2.0 + +Files: + vendor/github.com/chai2010/gettext-go/* +Copyright: + 2013 ChaiShushan +License: BSD-3-Clause~Google + +Files: + vendor/github.com/clusterhq/flocker-go/* +Copyright: + 2014-2016 ClusterHQ +License: Apache-2.0 + +Files: + vendor/github.com/containernetworking/cni/* +Copyright: + 2015-2016 CNI authors +License: Apache-2.0 + +Files: + vendor/github.com/codedellemc/goscaleio/* +Copyright: Dell ??? +License: Apache-2.0 +Comment: https://github.com/thecodeteam/goscaleio/issues/32 + +Files: + vendor/github.com/fatih/camelcase/* +Copyright: 2015 Fatih Arslan +License: Expat + +Files: + vendor/github.com/heketi/heketi/* +Copyright: + 2015-2016 The heketi Authors +License: Apache-2.0 or LGPL-3+ or GPL-2 + +Files: + vendor/github.com/libopenstorage/openstorage/* +Copyright: 2015 Openstorage.org +License: Apache-2.0 + +Files: + vendor/github.com/rancher/go-rancher/* +Copyright: 2014-2015 Rancher Labs, Inc. +License: Apache-2.0 + +Files: + vendor/github.com/rubiojr/go-vhd/* +Copyright: 2015 Sergio Rubio +License: Expat + +Files: + vendor/github.com/satori/uuid/* +Copyright: 2013-2016 Maxim Bublis +License: Expat + +Files: + vendor/github.com/dgrijalva/jwt-go/* +Copyright: + 2012 Dave Grijalva +License: Expat + +Files: + vendor/github.com/docker/distribution/* +Copyright: + 2014-2016 Docker, Inc. +License: Apache-2.0 + +Files: + vendor/github.com/docker/docker/* +Copyright: + 2012-2016 Docker, Inc. +License: Apache-2.0 + +Files: + vendor/github.com/gogo/protobuf/* +Copyright: + 2008 Google Inc. + 2010-2012, The Go Authors. + 2013, Vastech SA (PTY) LTD. +License: BSD-3-Clause + +Files: + vendor/github.com/golang/protobuf/* +Copyright: + 2008-2016, The Go Authors. + 2008 Google Inc. +License: BSD-3-Clause + +Files: + vendor/github.com/square/go-jose/* +Copyright: + 2014-2016 Square Inc. +License: Apache-2.0 + +Files: + vendor/github.com/square/go-jose/json/* +Copyright: + 2012 The Go Authors +License: BSD-3-Clause +Comment: The code is derived from the `encoding/json` package from Go 1.6 + +Files: + vendor/github.com/storageos/go-api/* +Copyright: 2015-2017 StorageOS +License: Expat + +Files: + vendor/github.com/storageos/go-api/client_unix.go + vendor/github.com/storageos/go-api/client_windows.go +Copyright: + 2013-2017 go-dockerclient authors +License: BSD-2-Clause + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + . + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + . + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Files: + vendor/google.golang.org/grpc/* +Copyright: + 2014-2016 Google Inc. +License: BSD-3-Clause + +Files: + debian/* +Copyright: + 2015-2019 Dmitry Smirnov + 2017 Tim Potter +License: GPL-3+ + +Files: debian/patches/* +Copyright: 2015-2016 Dmitry Smirnov +License: GPL-3+ or Apache-2.0 +Comment: patches can be licensed under the same terms as upstream. + +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the complete text of the Apache version 2.0 license + can be found in "/usr/share/common-licenses/Apache-2.0". + +License: BSD-3-Clause~Google + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + . + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + . + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +License: BSD-3-Clause + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + . + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +License: GPL-3+ + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + ․ + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + ․ + The complete text of the GNU General Public License version 3 + can be found in "/usr/share/common-licenses/GPL-3". + +License: BSD-3-Clause~OKF + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + . + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + . + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + . + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + . + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +License: Expat + Permission is hereby granted, free of charge, to any person obtaining a copy of + this software and associated documentation files (the "Software"), to deal in + the Software without restriction, including without limitation the rights to + use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + . + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + . + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +License: ISC + Permission to use, copy, modify, and/or distribute this software for + any purpose with or without fee is hereby granted, provided that the + above copyright notice and this permission notice appear in all + copies. + . + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + PERFORMANCE OF THIS SOFTWARE. + +License: LGPL-3+ + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + ․ + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + ․ + On Debian systems, the complete text of the GNU Lesser General Public + License Version 3 can be found in "/usr/share/common-licenses/LGPL-3". + +License: GPL-2 + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation, version 2 of the License. + ․ + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + ․ + On Debian systems, the complete text of the GNU General Public + License Version 2 can be found in "/usr/share/common-licenses/GPL-2". diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubeadm.conf b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubeadm.conf new file mode 100644 index 000000000..d8b77e9dc --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubeadm.conf @@ -0,0 +1,18 @@ +# Note: This dropin only works with kubeadm and kubelet v1.11+ +[Service] +Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" +Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" +# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically +EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env +# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use +# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. +EnvironmentFile=-/etc/default/kubelet +ExecStart= +ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS +ExecStartPre=-/usr/local/sbin/sanitize_kubelet_reserved_cpus.sh /etc/default/kubelet +ExecStartPre=-/usr/bin/kubelet-cgroup-setup.sh +ExecStartPost=/bin/bash -c 'echo $MAINPID > /var/run/kubelet.pid;' +ExecStopPost=/bin/rm -f /var/run/kubelet.pid +Restart=always +StartLimitInterval=0 +RestartSec=10 diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubelet-cgroup-setup.sh b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubelet-cgroup-setup.sh new file mode 100644 index 000000000..385314bfe --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubelet-cgroup-setup.sh @@ -0,0 +1,132 @@ +#!/bin/bash +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# This script does minimal cgroup setup for kubelet. This creates k8s-infra +# cgroup for a minimal set of resource controllers, and configures cpuset +# attributes to span all online cpus and nodes. This will do nothing if +# the k8s-infra cgroup already exists (i.e., assume already configured). +# NOTE: The creation of directories under /sys/fs/cgroup is volatile, and +# does not persist reboots. The cpuset.mems and cpuset.cpus is later updated +# by puppet kubernetes.pp manifest. +# + +# Define minimal path +PATH=/bin:/usr/bin:/usr/local/bin + +# Log info message to /var/log/daemon.log +function LOG { + logger -p daemon.info "$0($$): $@" +} + +# Log error message to /var/log/daemon.log +function ERROR { + logger -s -p daemon.error "$0($$): ERROR: $@" +} + +# Create minimal cgroup directories and configure cpuset attributes if required +function create_cgroup { + local cg_name=$1 + local cg_nodeset=$2 + local cg_cpuset=$3 + + local CGROUP=/sys/fs/cgroup + local CONTROLLERS_AUTO_DELETED=("pids" "hugetlb") + local CONTROLLERS_PRESERVED=("cpuset" "memory" "cpu,cpuacct" "systemd") + local cnt='' + local CGDIR='' + local RC=0 + + # Ensure that these cgroups are created every time as they are auto deleted + for cnt in ${CONTROLLERS_AUTO_DELETED[@]}; do + CGDIR=${CGROUP}/${cnt}/${cg_name} + if [ -d ${CGDIR} ]; then + LOG "Nothing to do, already configured: ${CGDIR}." + continue + fi + LOG "Creating: ${CGDIR}" + mkdir -p ${CGDIR} + RC=$? + if [ ${RC} -ne 0 ]; then + ERROR "Creating: ${CGDIR}, rc=${RC}" + exit ${RC} + fi + done + + # These cgroups are preserved so if any of these are encountered additional + # cgroup setup is not required + for cnt in ${CONTROLLERS_PRESERVED[@]}; do + CGDIR=${CGROUP}/${cnt}/${cg_name} + if [ -d ${CGDIR} ]; then + LOG "Nothing to do, already configured: ${CGDIR}." + exit ${RC} + fi + LOG "Creating: ${CGDIR}" + mkdir -p ${CGDIR} + RC=$? + if [ ${RC} -ne 0 ]; then + ERROR "Creating: ${CGDIR}, rc=${RC}" + exit ${RC} + fi + done + + # Customize cpuset attributes + LOG "Configuring cgroup: ${cg_name}, nodeset: ${cg_nodeset}, cpuset: ${cg_cpuset}" + CGDIR=${CGROUP}/cpuset/${cg_name} + local CGMEMS=${CGDIR}/cpuset.mems + local CGCPUS=${CGDIR}/cpuset.cpus + local CGTASKS=${CGDIR}/tasks + + # Assign cgroup memory nodeset + LOG "Assign nodeset ${cg_nodeset} to ${CGMEMS}" + /bin/echo ${cg_nodeset} > ${CGMEMS} + RC=$? + if [ ${RC} -ne 0 ]; then + ERROR "Unable to write to: ${CGMEMS}, rc=${RC}" + exit ${RC} + fi + + # Assign cgroup cpus + LOG "Assign cpuset ${cg_cpuset} to ${CGCPUS}" + /bin/echo ${cg_cpuset} > ${CGCPUS} + RC=$? + if [ ${RC} -ne 0 ]; then + ERROR "Assigning: ${cg_cpuset} to ${CGCPUS}, rc=${RC}" + exit ${RC} + fi + + # Set file ownership + chown root:root ${CGMEMS} ${CGCPUS} ${CGTASKS} + RC=$? + if [ ${RC} -ne 0 ]; then + ERROR "Setting owner for: ${CGMEMS}, ${CGCPUS}, ${CGTASKS}, rc=${RC}" + exit ${RC} + fi + + # Set file mode permissions + chmod 644 ${CGMEMS} ${CGCPUS} ${CGTASKS} + RC=$? + if [ ${RC} -ne 0 ]; then + ERROR "Setting mode for: ${CGMEMS}, ${CGCPUS}, ${CGTASKS}, rc=${RC}" + exit ${RC} + fi + + return ${RC} +} + +if [ $UID -ne 0 ]; then + ERROR "Require sudo/root." + exit 1 +fi + +# Configure default kubepods cpuset to span all online cpus and nodes. +ONLINE_NODESET=$(/bin/cat /sys/devices/system/node/online) +ONLINE_CPUSET=$(/bin/cat /sys/devices/system/cpu/online) + +# Configure kubelet cgroup to match cgroupRoot. +create_cgroup 'k8s-infra' ${ONLINE_NODESET} ${ONLINE_CPUSET} + +exit $? + diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-client.install b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-client.install new file mode 100644 index 000000000..166fe37dc --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-client.install @@ -0,0 +1,3 @@ +usr/local/kubernetes/1.23.1/stage2/usr/bin/kubectl +usr/local/kubernetes/1.23.1/stage2/usr/share/bash-completion/completions/kubectl + diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-client.lintian-overrides b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-client.lintian-overrides new file mode 100644 index 000000000..160b6783b --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-client.lintian-overrides @@ -0,0 +1,9 @@ +## Generated man pages: TODO +manpage-has-bad-whatis-entry usr/share/man/* +manpage-has-errors-from-man usr/share/man/man1/* + +## Bash-completion script does not have to be executable: +script-not-executable usr/share/bash-completion/completions/kubectl + +## Override annoying/useless messages +kubernetes-client: spelling-error-in-binary diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-kubeadm.dirs b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-kubeadm.dirs new file mode 100644 index 000000000..6fee8d92a --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-kubeadm.dirs @@ -0,0 +1 @@ +usr/local/kubernetes/1.23.1/stage2/etc/systemd/system/kubelet.service.d/ diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-kubeadm.install b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-kubeadm.install new file mode 100644 index 000000000..2cc51eed2 --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-kubeadm.install @@ -0,0 +1,2 @@ +usr/local/kubernetes/1.23.1/stage1/usr/bin/kubeadm +usr/local/kubernetes/1.23.1/stage2/etc/systemd/system/kubelet.service.d/kubeadm.conf diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-master.dirs b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-master.dirs new file mode 100644 index 000000000..373ba66e7 --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-master.dirs @@ -0,0 +1,5 @@ +etc/kubernetes-1.23.1 +etc/kubernetes-1.23.1/addons +etc/kubernetes-1.23.1/addons/volumesnapshots +etc/kubernetes-1.23.1/addons/volumesnapshots/crd +etc/kubernetes-1.23.1/addons/volumesnapshots/volume-snapshot-controller diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-master.install b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-master.install new file mode 100644 index 000000000..cf338e67d --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-master.install @@ -0,0 +1,8 @@ +usr/bin/kube-apiserver +usr/bin/kube-controller-manager +usr/bin/kube-scheduler +etc/kubernetes-1.23.1/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml +etc/kubernetes-1.23.1/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml +etc/kubernetes-1.23.1/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshots.yaml +etc/kubernetes-1.23.1/addons/volumesnapshots/volume-snapshot-controller/volume-snapshot-controller-deployment.yaml +etc/kubernetes-1.23.1/addons/volumesnapshots/volume-snapshot-controller/rbac-volume-snapshot-controller.yaml diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-master.lintian-overrides b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-master.lintian-overrides new file mode 100644 index 000000000..f73c63ffd --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-master.lintian-overrides @@ -0,0 +1,7 @@ +## No manual page for hyperkube +kubernetes-master: binary-without-manpage usr/bin/hyperkube + +## Override annoying/useless messages +kubernetes-master: spelling-error-in-binary +kubernetes-master: manpage-has-errors-from-man usr/share/man/man1/* +kubernetes-master: manpage-has-bad-whatis-entry usr/share/man/man1/* diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-misc.install b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-misc.install new file mode 100644 index 000000000..8aa6d765e --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-misc.install @@ -0,0 +1,11 @@ +#TODO with genmanpages +#usr/share/man/man1/kube-apiserver.1* +#usr/share/man/man1/kube-controller-manager.1* +#usr/share/man/man1/kube-scheduler.1* +#usr/share/man/man1/kube-proxy.1* +usr/bin/kube-proxy +#usr/share/man/man1/kubelet.1* +#usr/share/man/man1/kubeadm.1* +#usr/share/man/man1/kubeadm-* +#usr/share/man/man1/kubectl.1* +#usr/share/man/man1/kubectl-* diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-node.install b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-node.install new file mode 100644 index 000000000..00fa7568f --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-node.install @@ -0,0 +1,2 @@ +usr/local/kubernetes/1.23.1/stage2/usr/bin/kubelet +usr/local/kubernetes/1.23.1/stage2/usr/bin/kubelet-cgroup-setup.sh diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-node.lintian-overrides b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-node.lintian-overrides new file mode 100644 index 000000000..99d470def --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-node.lintian-overrides @@ -0,0 +1,4 @@ +## Override annoying/useless messages +kubernetes-node: spelling-error-in-binary +kubernetes-node: manpage-has-errors-from-man usr/share/man/man1/* +kubernetes-node: manpage-has-bad-whatis-entry usr/share/man/man1/* diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-unit-test.install b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-unit-test.install new file mode 100644 index 000000000..da8847677 --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/kubernetes-1.23.1-unit-test.install @@ -0,0 +1,2 @@ +var/lib/kubernetes-unit-test/ + diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/Revert-use-subpath-for-coredns-only-for-default-repo.patch b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/Revert-use-subpath-for-coredns-only-for-default-repo.patch new file mode 100644 index 000000000..f56c20ee0 --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/Revert-use-subpath-for-coredns-only-for-default-repo.patch @@ -0,0 +1,113 @@ +From f9dd597f4e8c8c66f08d661efcbd29479e4e069d Mon Sep 17 00:00:00 2001 +From: Gleb Aronsky +Date: Tue, 25 Jan 2022 13:56:30 -0500 +Subject: [PATCH] Revert "use subpath for coredns only for default repository" + +This reverts commit 38a41e1557649a7cc763bf737779db9aa03ec75e. + +Co-authored-by: Jim Gauld +Signed-off-by: Gleb Aronsky + +diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go +index c2b8f6e64be..b00ccea315e 100644 +--- a/cmd/kubeadm/app/constants/constants.go ++++ b/cmd/kubeadm/app/constants/constants.go +@@ -337,7 +337,7 @@ const ( + CoreDNSDeploymentName = "coredns" + + // CoreDNSImageName specifies the name of the image for CoreDNS add-on +- CoreDNSImageName = "coredns" ++ CoreDNSImageName = "coredns/coredns" + + // CoreDNSVersion is the version of CoreDNS to be deployed if it is used + CoreDNSVersion = "v1.8.6" +diff --git a/cmd/kubeadm/app/images/images.go b/cmd/kubeadm/app/images/images.go +index ee55eb6c995..bdb61caa373 100644 +--- a/cmd/kubeadm/app/images/images.go ++++ b/cmd/kubeadm/app/images/images.go +@@ -22,7 +22,6 @@ import ( + "k8s.io/klog/v2" + + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" +- kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" + kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" + ) +@@ -48,10 +47,6 @@ func GetDNSImage(cfg *kubeadmapi.ClusterConfiguration) string { + if cfg.DNS.ImageRepository != "" { + dnsImageRepository = cfg.DNS.ImageRepository + } +- // Handle the renaming of the official image from "k8s.gcr.io/coredns" to "k8s.gcr.io/coredns/coredns +- if dnsImageRepository == kubeadmapiv1beta2.DefaultImageRepository { +- dnsImageRepository = fmt.Sprintf("%s/coredns", dnsImageRepository) +- } + // DNS uses an imageTag that corresponds to the DNS version matching the Kubernetes version + dnsImageTag := constants.CoreDNSVersion + +diff --git a/cmd/kubeadm/app/images/images_test.go b/cmd/kubeadm/app/images/images_test.go +index 2b8affce236..91cd4294351 100644 +--- a/cmd/kubeadm/app/images/images_test.go ++++ b/cmd/kubeadm/app/images/images_test.go +@@ -22,7 +22,6 @@ import ( + "testing" + + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" +- kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" + ) + +@@ -227,51 +226,4 @@ func TestGetAllImages(t *testing.T) { + } + } + +-func TestGetDNSImage(t *testing.T) { +- var tests = []struct { +- expected string +- cfg *kubeadmapi.ClusterConfiguration +- }{ +- { +- expected: "foo.io/coredns:v1.8.6", +- cfg: &kubeadmapi.ClusterConfiguration{ +- ImageRepository: "foo.io", +- DNS: kubeadmapi.DNS{ +- Type: kubeadmapi.CoreDNS, +- }, +- }, +- }, +- { +- expected: kubeadmapiv1beta2.DefaultImageRepository + "/coredns/coredns:v1.8.6", +- cfg: &kubeadmapi.ClusterConfiguration{ +- ImageRepository: kubeadmapiv1beta2.DefaultImageRepository, +- DNS: kubeadmapi.DNS{ +- Type: kubeadmapi.CoreDNS, +- }, +- }, +- }, +- { +- expected: "foo.io/coredns/coredns:v1.8.6", +- cfg: &kubeadmapi.ClusterConfiguration{ +- ImageRepository: "foo.io", +- DNS: kubeadmapi.DNS{ +- Type: kubeadmapi.CoreDNS, +- ImageMeta: kubeadmapi.ImageMeta{ +- ImageRepository: "foo.io/coredns", +- }, +- }, +- }, +- }, +- } +- +- for _, test := range tests { +- actual := GetDNSImage(test.cfg) +- if actual != test.expected { +- t.Errorf( +- "failed to GetDNSImage:\n\texpected: %s\n\t actual: %s", +- test.expected, +- actual, +- ) +- } +- } + } +-- +2.25.1 + diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/enable-support-for-kubernetes-to-ignore-isolcpus.patch b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/enable-support-for-kubernetes-to-ignore-isolcpus.patch new file mode 100644 index 000000000..63cdfbdbb --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/enable-support-for-kubernetes-to-ignore-isolcpus.patch @@ -0,0 +1,79 @@ +From 087dcfa1a84ec38541fa9870937d76b80a707e2c Mon Sep 17 00:00:00 2001 +From: Chris Friesen +Date: Fri, 23 Oct 2020 17:46:10 -0600 +Subject: [PATCH 6/7] enable support for kubernetes to ignore isolcpus + +The normal mechanisms for allocating isolated CPUs do not allow +a mix of isolated and exclusive CPUs in the same container. In +order to allow this in *very* limited cases where the pod spec +is known in advance we will add the ability to disable the normal +isolcpus behaviour. + +If the file "/etc/kubernetes/ignore_isolcpus" exists, then kubelet +will basically forget everything it knows about isolcpus and just +treat them like regular CPUs. + +The admin user can then rely on the fact that CPU allocation is +deterministic to ensure that the isolcpus they configure end up being +allocated to the correct pods. + +Signed-off-by: Daniel Safta +--- + pkg/kubelet/cm/cpumanager/cpu_manager.go | 8 ++++++++ + pkg/kubelet/cm/cpumanager/policy_static.go | 7 +++++++ + 2 files changed, 15 insertions(+) + +diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go +index 2f5b06dc..d9ec63bb 100644 +--- a/pkg/kubelet/cm/cpumanager/cpu_manager.go ++++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go +@@ -20,6 +20,7 @@ import ( + "fmt" + "io/ioutil" + "math" ++ "os" + "strings" + "sync" + "time" +@@ -55,6 +56,13 @@ const cpuManagerStateFileName = "cpu_manager_state" + + // get the system-level isolated CPUs + func getIsolcpus() cpuset.CPUSet { ++ // This is a gross hack to basically turn off awareness of isolcpus to enable ++ // isolated cpus to be allocated to pods the same way as non-isolated CPUs. ++ if _, err := os.Stat("/etc/kubernetes/ignore_isolcpus"); err == nil { ++ klog.Infof("[cpumanager] turning off isolcpus awareness") ++ return cpuset.NewCPUSet() ++ } ++ + dat, err := ioutil.ReadFile("/sys/devices/system/cpu/isolated") + if err != nil { + klog.Errorf("[cpumanager] unable to read sysfs isolcpus subdir") +diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go +index 72a99496..ee389a85 100644 +--- a/pkg/kubelet/cm/cpumanager/policy_static.go ++++ b/pkg/kubelet/cm/cpumanager/policy_static.go +@@ -18,6 +18,7 @@ package cpumanager + + import ( + "fmt" ++ "os" + "strconv" + + v1 "k8s.io/api/core/v1" +@@ -658,6 +659,12 @@ func isKubeInfra(pod *v1.Pod) bool { + + // get the isolated CPUs (if any) from the devices associated with a specific container + func (p *staticPolicy) podIsolCPUs(pod *v1.Pod, container *v1.Container) cpuset.CPUSet { ++ // This is a gross hack to basically turn off awareness of isolcpus to enable ++ // isolated cpus to be allocated to pods the same way as non-isolated CPUs. ++ if _, err := os.Stat("/etc/kubernetes/ignore_isolcpus"); err == nil { ++ return cpuset.NewCPUSet() ++ } ++ + // NOTE: This is required for TestStaticPolicyAdd() since makePod() does + // not create UID. We also need a way to properly stub devicemanager. + if len(string(pod.UID)) == 0 { +-- +2.17.1 + diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubeadm-create-platform-pods-with-zero-CPU-resources.patch b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubeadm-create-platform-pods-with-zero-CPU-resources.patch new file mode 100644 index 000000000..14e530c73 --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubeadm-create-platform-pods-with-zero-CPU-resources.patch @@ -0,0 +1,108 @@ +From de653bd0823b248d623a39c17a3872e85ce952b0 Mon Sep 17 00:00:00 2001 +From: Chris Friesen +Date: Fri, 3 Sep 2021 18:05:15 -0400 +Subject: [PATCH 5/7] kubeadm: create platform pods with zero CPU resources + +We want to specify zero CPU resources when creating the manifests +for the static platform pods, as a workaround for the lack of +separate resource tracking for platform resources. + +We also specify zero CPU resources for the coredns deployment. +manifests.go appears to be the main file for this, not sure if the +others are used but I changed them just in case. + +Signed-off-by: Daniel Safta +--- + cluster/addons/dns/coredns/coredns.yaml.base | 2 +- + cluster/addons/dns/coredns/coredns.yaml.in | 2 +- + cluster/addons/dns/coredns/coredns.yaml.sed | 2 +- + cmd/kubeadm/app/phases/addons/dns/manifests.go | 2 +- + cmd/kubeadm/app/phases/controlplane/manifests.go | 6 +++--- + 5 files changed, 7 insertions(+), 7 deletions(-) + +diff --git a/cluster/addons/dns/coredns/coredns.yaml.base b/cluster/addons/dns/coredns/coredns.yaml.base +index 4ee054f8ba5..d2b58f4af0e 100644 +--- a/cluster/addons/dns/coredns/coredns.yaml.base ++++ b/cluster/addons/dns/coredns/coredns.yaml.base +@@ -138,7 +138,7 @@ spec: + limits: + memory: __DNS__MEMORY__LIMIT__ + requests: +- cpu: 100m ++ cpu: 0 + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: +diff --git a/cluster/addons/dns/coredns/coredns.yaml.in b/cluster/addons/dns/coredns/coredns.yaml.in +index 1f791e447c9..ff03a801646 100644 +--- a/cluster/addons/dns/coredns/coredns.yaml.in ++++ b/cluster/addons/dns/coredns/coredns.yaml.in +@@ -138,7 +138,7 @@ spec: + limits: + memory: 'dns_memory_limit' + requests: +- cpu: 100m ++ cpu: 0 + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: +diff --git a/cluster/addons/dns/coredns/coredns.yaml.sed b/cluster/addons/dns/coredns/coredns.yaml.sed +index 4d64278aaa4..38fc9196b28 100644 +--- a/cluster/addons/dns/coredns/coredns.yaml.sed ++++ b/cluster/addons/dns/coredns/coredns.yaml.sed +@@ -138,7 +138,7 @@ spec: + limits: + memory: $DNS_MEMORY_LIMIT + requests: +- cpu: 100m ++ cpu: 0 + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: +diff --git a/cmd/kubeadm/app/phases/addons/dns/manifests.go b/cmd/kubeadm/app/phases/addons/dns/manifests.go +index 3ac6856bfc6..0763b4c63db 100644 +--- a/cmd/kubeadm/app/phases/addons/dns/manifests.go ++++ b/cmd/kubeadm/app/phases/addons/dns/manifests.go +@@ -95,7 +95,7 @@ spec: + limits: + memory: 170Mi + requests: +- cpu: 100m ++ cpu: 0 + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: +diff --git a/cmd/kubeadm/app/phases/controlplane/manifests.go b/cmd/kubeadm/app/phases/controlplane/manifests.go +index 8181bea63a4..4c4b4448dd4 100644 +--- a/cmd/kubeadm/app/phases/controlplane/manifests.go ++++ b/cmd/kubeadm/app/phases/controlplane/manifests.go +@@ -60,7 +60,7 @@ func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmap + LivenessProbe: staticpodutil.LivenessProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/livez", int(endpoint.BindPort), v1.URISchemeHTTPS), + ReadinessProbe: staticpodutil.ReadinessProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/readyz", int(endpoint.BindPort), v1.URISchemeHTTPS), + StartupProbe: staticpodutil.StartupProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/livez", int(endpoint.BindPort), v1.URISchemeHTTPS, cfg.APIServer.TimeoutForControlPlane), +- Resources: staticpodutil.ComponentResources("250m"), ++ Resources: staticpodutil.ComponentResources("0"), + Env: kubeadmutil.GetProxyEnvVars(), + }, mounts.GetVolumes(kubeadmconstants.KubeAPIServer), + map[string]string{kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: endpoint.String()}), +@@ -72,7 +72,7 @@ func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmap + VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeControllerManager)), + LivenessProbe: staticpodutil.LivenessProbe(staticpodutil.GetControllerManagerProbeAddress(cfg), "/healthz", kubeadmconstants.KubeControllerManagerPort, v1.URISchemeHTTPS), + StartupProbe: staticpodutil.StartupProbe(staticpodutil.GetControllerManagerProbeAddress(cfg), "/healthz", kubeadmconstants.KubeControllerManagerPort, v1.URISchemeHTTPS, cfg.APIServer.TimeoutForControlPlane), +- Resources: staticpodutil.ComponentResources("200m"), ++ Resources: staticpodutil.ComponentResources("0"), + Env: kubeadmutil.GetProxyEnvVars(), + }, mounts.GetVolumes(kubeadmconstants.KubeControllerManager), nil), + kubeadmconstants.KubeScheduler: staticpodutil.ComponentPod(v1.Container{ +@@ -83,7 +83,7 @@ func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmap + VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeScheduler)), + LivenessProbe: staticpodutil.LivenessProbe(staticpodutil.GetSchedulerProbeAddress(cfg), "/healthz", kubeadmconstants.KubeSchedulerPort, v1.URISchemeHTTPS), + StartupProbe: staticpodutil.StartupProbe(staticpodutil.GetSchedulerProbeAddress(cfg), "/healthz", kubeadmconstants.KubeSchedulerPort, v1.URISchemeHTTPS, cfg.APIServer.TimeoutForControlPlane), +- Resources: staticpodutil.ComponentResources("100m"), ++ Resources: staticpodutil.ComponentResources("0"), + Env: kubeadmutil.GetProxyEnvVars(), + }, mounts.GetVolumes(kubeadmconstants.KubeScheduler), nil), + } +-- +2.17.1 + diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-disable-CFS-quota-throttling-for-.patch b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-disable-CFS-quota-throttling-for-.patch new file mode 100644 index 000000000..fc115417d --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-disable-CFS-quota-throttling-for-.patch @@ -0,0 +1,287 @@ +From cde296d121955a9ee4f148f775d73bb746a17310 Mon Sep 17 00:00:00 2001 +From: Gleb Aronsky +Date: Fri, 21 Jan 2022 17:03:57 -0500 +Subject: kubelet cpumanager disable CFS quota throttling for + Guaranteed pods + +This disables CFS CPU quota to avoid performance degradation due to +Linux kernel CFS quota implementation. Note that 4.18 kernel attempts +to solve the CFS throttling problem, but there are reports that it is +not completely effective. + +This disables CFS quota throttling for Guaranteed pods for both +parent and container cgroups by writing -1 to cgroup cpu.cfs_quota_us. +Disabling has a dramatic latency improvement for HTTP response times. + +This patch is refactored in 1.22.5 due to new internal_container_lifecycle +framework. We leverage the same mechanism to set Linux resources as: +cpu manager: specify the container CPU set during the creation + +Co-authored-by: Jim Gauld +Signed-off-by: Gleb Aronsky + +--- + pkg/kubelet/cm/cpumanager/cpu_manager.go | 7 +++ + pkg/kubelet/cm/cpumanager/fake_cpu_manager.go | 12 +++-- + pkg/kubelet/cm/helpers_linux.go | 12 ++++- + pkg/kubelet/cm/helpers_linux_test.go | 45 ++++++++++--------- + .../cm/internal_container_lifecycle_linux.go | 11 ++++- + 5 files changed, 61 insertions(+), 26 deletions(-) + +diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go +index a876d413870..6ad289336ea 100644 +--- a/pkg/kubelet/cm/cpumanager/cpu_manager.go ++++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go +@@ -72,6 +72,9 @@ type Manager interface { + // State returns a read-only interface to the internal CPU manager state. + State() state.Reader + ++ // GetCPUPolicy returns the assigned CPU manager policy ++ GetCPUPolicy() string ++ + // GetTopologyHints implements the topologymanager.HintProvider Interface + // and is consulted to achieve NUMA aware resource alignment among this + // and other resource controllers. +@@ -314,6 +317,10 @@ func (m *manager) State() state.Reader { + return m.state + } + ++func (m *manager) GetCPUPolicy() string { ++ return m.policy.Name() ++} ++ + func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { + // The pod is during the admission phase. We need to save the pod to avoid it + // being cleaned before the admission ended +diff --git a/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go b/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go +index 93369705135..8082bbeebcb 100644 +--- a/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go ++++ b/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go +@@ -17,7 +17,7 @@ limitations under the License. + package cpumanager + + import ( +- "k8s.io/api/core/v1" ++ v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/kubelet/cm/containermap" + "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" +@@ -28,7 +28,8 @@ import ( + ) + + type fakeManager struct { +- state state.State ++ policy Policy ++ state state.State + } + + func (m *fakeManager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error { +@@ -70,6 +71,10 @@ func (m *fakeManager) State() state.Reader { + return m.state + } + ++func (m *fakeManager) GetCPUPolicy() string { ++ return m.policy.Name() ++} ++ + func (m *fakeManager) GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet { + klog.InfoS("GetExclusiveCPUs", "podUID", podUID, "containerName", containerName) + return cpuset.CPUSet{} +@@ -88,6 +93,7 @@ func (m *fakeManager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet + // NewFakeManager creates empty/fake cpu manager + func NewFakeManager() Manager { + return &fakeManager{ +- state: state.NewMemoryState(), ++ policy: &nonePolicy{}, ++ state: state.NewMemoryState(), + } + } +diff --git a/pkg/kubelet/cm/helpers_linux.go b/pkg/kubelet/cm/helpers_linux.go +index 83c80501f5d..6e8f232e4d6 100644 +--- a/pkg/kubelet/cm/helpers_linux.go ++++ b/pkg/kubelet/cm/helpers_linux.go +@@ -25,7 +25,7 @@ import ( + + libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups" + +- "k8s.io/api/core/v1" ++ v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/api/v1/resource" +@@ -182,6 +182,16 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, + // build the result + result := &ResourceConfig{} + if qosClass == v1.PodQOSGuaranteed { ++ // Disable CFS CPU quota to avoid performance degradation due to ++ // Linux kernel CFS throttle implementation. ++ // NOTE: 4.18 kernel attempts to solve CFS throttling problem, ++ // but there are reports that it is not completely effective. ++ // This will configure cgroup CFS parameters at pod level: ++ // /sys/fs/cgroup/cpu/k8s-infra/kubepods//cpu.cfs_quota_us ++ // /sys/fs/cgroup/cpu/k8s-infra/kubepods//cpu.cfs_period_us ++ cpuQuota = int64(-1) ++ cpuPeriod = uint64(100000) ++ + result.CpuShares = &cpuShares + result.CpuQuota = &cpuQuota + result.CpuPeriod = &cpuPeriod +diff --git a/pkg/kubelet/cm/helpers_linux_test.go b/pkg/kubelet/cm/helpers_linux_test.go +index 101b21e682a..91c5782e3b4 100644 +--- a/pkg/kubelet/cm/helpers_linux_test.go ++++ b/pkg/kubelet/cm/helpers_linux_test.go +@@ -25,7 +25,7 @@ import ( + "testing" + "time" + +- "k8s.io/api/core/v1" ++ v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + utilfeature "k8s.io/apiserver/pkg/util/feature" + featuregatetesting "k8s.io/component-base/featuregate/testing" +@@ -64,8 +64,9 @@ func TestResourceConfigForPod(t *testing.T) { + burstablePartialShares := MilliCPUToShares(200) + burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod)) + guaranteedShares := MilliCPUToShares(100) +- guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod)) +- guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod)) ++ guaranteedQuotaPeriod := uint64(100000) ++ guaranteedQuota := int64(-1) ++ guaranteedTunedQuota := int64(-1) + memoryQuantity = resource.MustParse("100Mi") + cpuNoLimit := int64(-1) + guaranteedMemory := memoryQuantity.Value() +@@ -204,8 +205,8 @@ func TestResourceConfigForPod(t *testing.T) { + }, + }, + enforceCPULimits: true, +- quotaPeriod: defaultQuotaPeriod, +- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, ++ quotaPeriod: guaranteedQuotaPeriod, ++ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory}, + }, + "guaranteed-no-cpu-enforcement": { + pod: &v1.Pod{ +@@ -218,8 +219,8 @@ func TestResourceConfigForPod(t *testing.T) { + }, + }, + enforceCPULimits: false, +- quotaPeriod: defaultQuotaPeriod, +- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, ++ quotaPeriod: guaranteedQuotaPeriod, ++ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guarenteedQuotaPeriod, Memory: &guaranteedMemory}, + }, + "guaranteed-with-tuned-quota": { + pod: &v1.Pod{ +@@ -232,8 +233,8 @@ func TestResourceConfigForPod(t *testing.T) { + }, + }, + enforceCPULimits: true, +- quotaPeriod: tunedQuotaPeriod, +- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, ++ quotaPeriod: guaranteedQuotaPeriod, ++ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory}, + }, + "guaranteed-no-cpu-enforcement-with-tuned-quota": { + pod: &v1.Pod{ +@@ -246,8 +247,8 @@ func TestResourceConfigForPod(t *testing.T) { + }, + }, + enforceCPULimits: false, +- quotaPeriod: tunedQuotaPeriod, +- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, ++ quotaPeriod: guaranteedQuotaPeriod, ++ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory}, + }, + "burstable-partial-limits-with-init-containers": { + pod: &v1.Pod{ +@@ -309,8 +310,10 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { + burstablePartialShares := MilliCPUToShares(200) + burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod)) + guaranteedShares := MilliCPUToShares(100) +- guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod)) +- guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod)) ++ guaranteedQuotaPeriod := uint64(100000) ++ guaranteedQuota := int64(-1) ++ guaranteedTunedQuota := int64(-1) ++ + memoryQuantity = resource.MustParse("100Mi") + cpuNoLimit := int64(-1) + guaranteedMemory := memoryQuantity.Value() +@@ -449,8 +452,8 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { + }, + }, + enforceCPULimits: true, +- quotaPeriod: defaultQuotaPeriod, +- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, ++ quotaPeriod: guaranteedQuotaPeriod, ++ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory}, + }, + "guaranteed-no-cpu-enforcement": { + pod: &v1.Pod{ +@@ -463,8 +466,8 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { + }, + }, + enforceCPULimits: false, +- quotaPeriod: defaultQuotaPeriod, +- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, ++ quotaPeriod: guaranteedQuotaPeriod, ++ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory}, + }, + "guaranteed-with-tuned-quota": { + pod: &v1.Pod{ +@@ -477,8 +480,8 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { + }, + }, + enforceCPULimits: true, +- quotaPeriod: tunedQuotaPeriod, +- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, ++ quotaPeriod: guaranteedQuotaPeriod, ++ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory}, + }, + "guaranteed-no-cpu-enforcement-with-tuned-quota": { + pod: &v1.Pod{ +@@ -491,8 +494,8 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { + }, + }, + enforceCPULimits: false, +- quotaPeriod: tunedQuotaPeriod, +- expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, ++ quotaPeriod: guaranteedQuotaPeriod, ++ expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedQuotaPeriod, Memory: &guaranteedMemory}, + }, + } + +diff --git a/pkg/kubelet/cm/internal_container_lifecycle_linux.go b/pkg/kubelet/cm/internal_container_lifecycle_linux.go +index cb7c0cfa543..956696a51e8 100644 +--- a/pkg/kubelet/cm/internal_container_lifecycle_linux.go ++++ b/pkg/kubelet/cm/internal_container_lifecycle_linux.go +@@ -23,8 +23,9 @@ import ( + "strconv" + "strings" + +- "k8s.io/api/core/v1" ++ v1 "k8s.io/api/core/v1" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" ++ v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" + ) + + func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error { +@@ -35,6 +36,14 @@ func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, contain + } + } + ++ // Disable cgroup CFS throttle at the container level. ++ // /sys/fs/cgroup/cpu/k8s-infra/kubepods///cpu.cfs_quota_us ++ // /sys/fs/cgroup/cpu/k8s-infra/kubepods///cpu.cfs_period_us ++ if i.cpuManager.GetCPUPolicy() == "static" && v1qos.GetPodQOS(pod) == v1.PodQOSGuaranteed { ++ containerConfig.Linux.Resources.CpuPeriod = int64(100000) ++ containerConfig.Linux.Resources.CpuQuota = int64(-1) ++ } ++ + if i.memoryManager != nil { + numaNodes := i.memoryManager.GetMemoryNUMANodes(pod, container) + if numaNodes.Len() > 0 { +-- +2.25.1 + diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-infrastructure-pods-use-system-re.patch b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-infrastructure-pods-use-system-re.patch new file mode 100644 index 000000000..623c2a6e5 --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-infrastructure-pods-use-system-re.patch @@ -0,0 +1,149 @@ +From 2f16a9e2c8dc5995aad9a04636ff4f20cb7b51c7 Mon Sep 17 00:00:00 2001 +From: Gleb Aronsky +Date: Tue, 25 Jan 2022 12:35:48 -0500 +Subject: [PATCH] kubelet cpumanager infrastructure pods use system reserved + CPUs + +This assigns system infrastructure pods to the "reserved" cpuset +to isolate them from the shared pool of CPUs. + +Infrastructure pods include any pods that belong to the kube-system, +armada, cert-manager, vault, platform-deployment-manager, portieris, +notification or flux-helm namespaces. + +The implementation is a bit simplistic, it is assumed that the +"reserved" cpuset is large enough to handle all infrastructure pods +CPU allocations. + +This also prevents infrastucture pods from using Guaranteed resources. + +Co-authored-by: Jim Gauld +Signed-off-by: Gleb Aronsky +--- + pkg/kubelet/cm/cpumanager/policy_static.go | 47 +++++++++++++++++-- + .../cm/cpumanager/policy_static_test.go | 19 +++++++- + 2 files changed, 61 insertions(+), 5 deletions(-) + +diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go +index 9697f4d4bb0..aeac7fdc8cb 100644 +--- a/pkg/kubelet/cm/cpumanager/policy_static.go ++++ b/pkg/kubelet/cm/cpumanager/policy_static.go +@@ -53,6 +53,11 @@ func (e SMTAlignmentError) Type() string { + return ErrorSMTAlignment + } + ++// Define namespaces used by platform infrastructure pods ++var infraNamespaces = [...]string{ ++ "kube-system", "armada", "cert-manager", "platform-deployment-manager", "portieris", "vault", "notification", "flux-helm", ++} ++ + // staticPolicy is a CPU manager policy that does not change CPU + // assignments for exclusively pinned guaranteed containers after the main + // container process starts. +@@ -121,10 +126,11 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv + klog.InfoS("Static policy created with configuration", "options", opts) + + policy := &staticPolicy{ +- topology: topology, +- affinity: affinity, +- cpusToReuse: make(map[string]cpuset.CPUSet), +- options: opts, ++ topology: topology, ++ affinity: affinity, ++ excludeReserved: excludeReserved, ++ cpusToReuse: make(map[string]cpuset.CPUSet), ++ options: opts, + } + + allCPUs := topology.CPUDetails.CPUs() +@@ -263,6 +269,25 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c + } + + func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error { ++ // Process infra pods before guaranteed pods ++ if isKubeInfra(pod) { ++ // Container belongs in reserved pool. ++ // We don't want to fall through to the p.guaranteedCPUs() clause below so return either nil or error. ++ if _, ok := s.GetCPUSet(string(pod.UID), container.Name); ok { ++ klog.Infof("[cpumanager] static policy: reserved container already present in state, skipping (namespace: %s, pod UID: %s, pod: %s, container: %s)", pod.Namespace, string(pod.UID), pod.Name, container.Name) ++ return nil ++ } ++ ++ cpuset := p.reserved ++ if cpuset.IsEmpty() { ++ // If this happens then someone messed up. ++ return fmt.Errorf("[cpumanager] static policy: reserved container unable to allocate cpus (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v, reserved:%v", pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset, p.reserved) ++ } ++ s.SetCPUSet(string(pod.UID), container.Name, cpuset) ++ klog.Infof("[cpumanager] static policy: reserved: AddContainer (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v", pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset) ++ return nil ++ } ++ + if numCPUs := p.guaranteedCPUs(pod, container); numCPUs != 0 { + klog.InfoS("Static policy: Allocate", "pod", klog.KObj(pod), "containerName", container.Name) + // container belongs in an exclusively allocated pool +@@ -367,6 +392,10 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int + if cpuQuantity.Value()*1000 != cpuQuantity.MilliValue() { + return 0 + } ++ // Infrastructure pods use reserved CPUs even if they're in the Guaranteed QoS class ++ if isKubeInfra(pod) { ++ return 0 ++ } + // Safe downcast to do for all systems with < 2.1 billion CPUs. + // Per the language spec, `int` is guaranteed to be at least 32 bits wide. + // https://golang.org/ref/spec#Numeric_types +@@ -580,3 +609,13 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu + + return hints + } ++ ++// check if a given pod is in a platform infrastructure namespace ++func isKubeInfra(pod *v1.Pod) bool { ++ for _, namespace := range infraNamespaces { ++ if namespace == pod.Namespace { ++ return true ++ } ++ } ++ return false ++} +diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go +index 80bd04a1f92..34c5a23c553 100644 +--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go ++++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go +@@ -830,7 +830,8 @@ func TestStaticPolicyStartWithResvList(t *testing.T) { + } + + func TestStaticPolicyAddWithResvList(t *testing.T) { +- ++ infraPod := makePod("fakePod", "fakeContainer2", "200m", "200m") ++ infraPod.Namespace = "kube-system" + testCases := []staticPolicyTestWithResvList{ + { + description: "GuPodSingleCore, SingleSocketHT, ExpectError", +@@ -872,6 +873,22 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { + expCPUAlloc: true, + expCSet: cpuset.NewCPUSet(4, 5), + }, ++ { ++ description: "InfraPod, SingleSocketHT, ExpectAllocReserved", ++ topo: topoSingleSocketHT, ++ numReservedCPUs: 2, ++ reserved: cpuset.NewCPUSet(0, 1), ++ stAssignments: state.ContainerCPUAssignments{ ++ "fakePod": map[string]cpuset.CPUSet{ ++ "fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7), ++ }, ++ }, ++ stDefaultCPUSet: cpuset.NewCPUSet(4, 5), ++ pod: infraPod, ++ expErr: nil, ++ expCPUAlloc: true, ++ expCSet: cpuset.NewCPUSet(0, 1), ++ }, + } + + testExcl := true +-- +2.25.1 + diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-introduce-concept-of-isolated-CPU.patch b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-introduce-concept-of-isolated-CPU.patch new file mode 100644 index 000000000..d48d38cf9 --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-introduce-concept-of-isolated-CPU.patch @@ -0,0 +1,562 @@ +From 5264e7af1d645a5eb27a6d204f73c08cc72afa70 Mon Sep 17 00:00:00 2001 +From: Gleb Aronsky +Date: Tue, 25 Jan 2022 13:27:25 -0500 +Subject: [PATCH] kubelet cpumanager introduce concept of isolated CPUs + +This introduces the concept of "isolated CPUs", which are CPUs that +have been isolated at the kernel level via the "isolcpus" kernel boot +parameter. + +When starting the kubelet process, two separate sets of reserved CPUs +may be specified. With this change CPUs reserved via +'--system-reserved=cpu' will be used for infrastructure pods while the +isolated CPUs should be reserved via '--kube-reserved=cpu' to cause +kubelet to skip over them for "normal" CPU resource tracking. The +kubelet code will double-check that the specified isolated CPUs match +what the kernel exposes in "/sys/devices/system/cpu/isolated". + +A plugin (outside the scope of this commit) will expose the isolated +CPUs to kubelet via the device plugin API. + +If a pod specifies some number of "isolcpus" resources, the device +manager will allocate them. In this code we check whether such +resources have been allocated, and if so we set the container cpuset to +the isolated CPUs. This does mean that it really only makes sense to +specify "isolcpus" resources for best-effort or burstable pods, not for +guaranteed ones since that would throw off the accounting code. In +order to ensure the accounting still works as designed, if "isolcpus" +are specified for guaranteed pods, the affinity will be set to the +non-isolated CPUs. + +This patch was refactored in 1.21.3 due to upstream API change +node: podresources: make GetDevices() consistent +(commit ad68f9588c72d6477b5a290c548a9031063ac659). + +The routine podIsolCPUs() was refactored in 1.21.3 since the API +p.deviceManager.GetDevices() is returning multiple devices with +a device per cpu. The resultant cpuset needs to be the aggregate. + +The routine NewStaticPolicy was refactored in 1.22.5, adding a new argument +in its signature: cpuPolicyOptions map[string]string. This change is implies +shifting the new arguments(deviceManager, excludeReserved) with one position +to the right. + +Co-authored-by: Jim Gauld +Co-authored-by: Chris Friesen +Signed-off-by: Gleb Aronsky +--- + pkg/kubelet/cm/container_manager_linux.go | 1 + + pkg/kubelet/cm/cpumanager/cpu_manager.go | 35 +++++++- + pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 14 +++- + pkg/kubelet/cm/cpumanager/policy_static.go | 83 +++++++++++++++++-- + .../cm/cpumanager/policy_static_test.go | 50 ++++++++--- + 5 files changed, 164 insertions(+), 19 deletions(-) + +diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go +index 3892bae081d..9a1e9c406eb 100644 +--- a/pkg/kubelet/cm/container_manager_linux.go ++++ b/pkg/kubelet/cm/container_manager_linux.go +@@ -340,6 +340,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I + cm.GetNodeAllocatableReservation(), + nodeConfig.KubeletRootDir, + cm.topologyManager, ++ cm.deviceManager, + ) + if err != nil { + klog.ErrorS(err, "Failed to initialize cpu manager") +diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go +index 8336e7b0fd4..defa03cd180 100644 +--- a/pkg/kubelet/cm/cpumanager/cpu_manager.go ++++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go +@@ -18,7 +18,9 @@ package cpumanager + + import ( + "fmt" ++ "io/ioutil" + "math" ++ "strings" + "sync" + "time" + +@@ -32,6 +34,7 @@ import ( + "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" + "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" + "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ++ "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/config" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" +@@ -50,6 +53,25 @@ type policyName string + // cpuManagerStateFileName is the file name where cpu manager stores its state + const cpuManagerStateFileName = "cpu_manager_state" + ++// get the system-level isolated CPUs ++func getIsolcpus() cpuset.CPUSet { ++ dat, err := ioutil.ReadFile("/sys/devices/system/cpu/isolated") ++ if err != nil { ++ klog.Errorf("[cpumanager] unable to read sysfs isolcpus subdir") ++ return cpuset.NewCPUSet() ++ } ++ ++ // The isolated cpus string ends in a newline ++ cpustring := strings.TrimSuffix(string(dat), "\n") ++ cset, err := cpuset.Parse(cpustring) ++ if err != nil { ++ klog.Errorf("[cpumanager] unable to parse sysfs isolcpus string to cpuset") ++ return cpuset.NewCPUSet() ++ } ++ ++ return cset ++} ++ + // Manager interface provides methods for Kubelet to manage pod cpus. + type Manager interface { + // Start is called during Kubelet initialization. +@@ -153,7 +175,8 @@ func (s *sourcesReadyStub) AddSource(source string) {} + func (s *sourcesReadyStub) AllReady() bool { return true } + + // NewManager creates new cpu manager based on provided policy +-func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, specificCPUs cpuset.CPUSet, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) { ++func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, specificCPUs cpuset.CPUSet, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store, deviceManager devicemanager.Manager) (Manager, error) { ++ + var topo *topology.CPUTopology + var policy Policy + var err error +@@ -194,7 +217,15 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc + // NOTE: Set excludeReserved unconditionally to exclude reserved CPUs from default cpuset. + // This variable is primarily to make testing easier. + excludeReserved := true +- policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity, cpuPolicyOptions, excludeReserved) ++ ++ // isolCPUs is the set of kernel-isolated CPUs. They should be a subset of specificCPUs or ++ // of the CPUs that NewStaticPolicy() will pick if numReservedCPUs is set. It's only in the ++ // argument list here for ease of testing, it's really internal to the policy. ++ isolCPUs := getIsolcpus() ++ policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, isolCPUs, affinity, cpuPolicyOptions, deviceManager, excludeReserved) ++ if err != nil { ++ return nil, fmt.Errorf("new static policy error: %v", err) ++ } + + if err != nil { + return nil, fmt.Errorf("new static policy error: %w", err) +diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go +index 2c8349662c4..31e4d0585fb 100644 +--- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go ++++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go +@@ -37,6 +37,7 @@ import ( + "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" + "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" + "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ++ "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + ) + +@@ -215,6 +216,7 @@ func makeMultiContainerPod(initCPUs, appCPUs []struct{ request, limit string }) + } + + func TestCPUManagerAdd(t *testing.T) { ++ testDM, _ := devicemanager.NewManagerStub() + testExcl := false + testPolicy, _ := NewStaticPolicy( + &topology.CPUTopology{ +@@ -230,8 +232,10 @@ func TestCPUManagerAdd(t *testing.T) { + }, + 0, + cpuset.NewCPUSet(), ++ cpuset.NewCPUSet(), + topologymanager.NewFakeManager(), + nil, ++ testDM, + testExcl) + testCases := []struct { + description string +@@ -482,8 +486,9 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) { + } + + testExcl := false ++ testDM, _ := devicemanager.NewManagerStub() + for _, testCase := range testCases { +- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testExcl) ++ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testDM, testExcl) + + mockState := &mockState{ + assignments: testCase.stAssignments, +@@ -638,7 +643,9 @@ func TestCPUManagerGenerate(t *testing.T) { + } + defer os.RemoveAll(sDir) + +- mgr, err := NewManager(testCase.cpuPolicyName, nil, 5*time.Second, machineInfo, cpuset.NewCPUSet(), testCase.nodeAllocatableReservation, sDir, topologymanager.NewFakeManager()) ++ testDM, err := devicemanager.NewManagerStub() ++ mgr, err := NewManager(testCase.cpuPolicyName, nil, 5*time.Second, machineInfo, cpuset.NewCPUSet(), testCase.nodeAllocatableReservation, sDir, topologymanager.NewFakeManager(), testDM) ++ + if testCase.expectedError != nil { + if !strings.Contains(err.Error(), testCase.expectedError.Error()) { + t.Errorf("Unexpected error message. Have: %s wants %s", err.Error(), testCase.expectedError.Error()) +@@ -1232,6 +1239,7 @@ func TestReconcileState(t *testing.T) { + // the following tests are with --reserved-cpus configured + func TestCPUManagerAddWithResvList(t *testing.T) { + testExcl := false ++ testDM, _ := devicemanager.NewManagerStub() + testPolicy, _ := NewStaticPolicy( + &topology.CPUTopology{ + NumCPUs: 4, +@@ -1246,8 +1254,10 @@ func TestCPUManagerAddWithResvList(t *testing.T) { + }, + 1, + cpuset.NewCPUSet(0), ++ cpuset.NewCPUSet(), + topologymanager.NewFakeManager(), + nil, ++ testDM, + testExcl) + testCases := []struct { + description string +diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go +index aeac7fdc8cb..3799795963b 100644 +--- a/pkg/kubelet/cm/cpumanager/policy_static.go ++++ b/pkg/kubelet/cm/cpumanager/policy_static.go +@@ -18,6 +18,7 @@ package cpumanager + + import ( + "fmt" ++ "strconv" + + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +@@ -25,6 +26,7 @@ import ( + "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" + "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" + "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ++ "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" + ) +@@ -101,6 +103,10 @@ type staticPolicy struct { + topology *topology.CPUTopology + // set of CPUs that is not available for exclusive assignment + reserved cpuset.CPUSet ++ // subset of reserved CPUs with isolcpus attribute ++ isolcpus cpuset.CPUSet ++ // parent containerManager, used to get device list ++ deviceManager devicemanager.Manager + // If true, default CPUSet should exclude reserved CPUs + excludeReserved bool + // topology manager reference to get container Topology affinity +@@ -117,7 +123,8 @@ var _ Policy = &staticPolicy{} + // NewStaticPolicy returns a CPU manager policy that does not change CPU + // assignments for exclusively pinned guaranteed containers after the main + // container process starts. +-func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store, cpuPolicyOptions map[string]string, excludeReserved bool) (Policy, error) { ++func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, isolCPUs cpuset.CPUSet, affinity topologymanager.Store, cpuPolicyOptions map[string]string, deviceManager devicemanager.Manager, excludeReserved bool) (Policy, error) { ++ + opts, err := NewStaticPolicyOptions(cpuPolicyOptions) + if err != nil { + return nil, err +@@ -128,6 +135,8 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv + policy := &staticPolicy{ + topology: topology, + affinity: affinity, ++ isolcpus: isolCPUs, ++ deviceManager: deviceManager, + excludeReserved: excludeReserved, + cpusToReuse: make(map[string]cpuset.CPUSet), + options: opts, +@@ -154,6 +163,12 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv + klog.InfoS("Reserved CPUs not available for exclusive assignment", "reservedSize", reserved.Size(), "reserved", reserved) + policy.reserved = reserved + ++ if !isolCPUs.IsSubsetOf(reserved) { ++ klog.Errorf("[cpumanager] isolCPUs %v is not a subset of reserved %v", isolCPUs, reserved) ++ reserved = reserved.Union(isolCPUs) ++ klog.Warningf("[cpumanager] mismatch isolCPUs %v, force reserved %v", isolCPUs, reserved) ++ } ++ + return policy, nil + } + +@@ -187,8 +202,9 @@ func (p *staticPolicy) validateState(s state.State) error { + } else { + s.SetDefaultCPUSet(allCPUs) + } +- klog.Infof("[cpumanager] static policy: CPUSet: allCPUs:%v, reserved:%v, default:%v\n", +- allCPUs, p.reserved, s.GetDefaultCPUSet()) ++ klog.Infof("[cpumanager] static policy: CPUSet: allCPUs:%v, reserved:%v, isolcpus:%v, default:%v\n", ++ allCPUs, p.reserved, p.isolcpus, s.GetDefaultCPUSet()) ++ + return nil + } + +@@ -278,10 +294,11 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai + return nil + } + +- cpuset := p.reserved ++ cpuset := p.reserved.Clone().Difference(p.isolcpus) + if cpuset.IsEmpty() { + // If this happens then someone messed up. +- return fmt.Errorf("[cpumanager] static policy: reserved container unable to allocate cpus (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v, reserved:%v", pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset, p.reserved) ++ return fmt.Errorf("[cpumanager] static policy: reserved container unable to allocate cpus (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v, reserved:%v, isolcpus:%v", pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset, p.reserved, p.isolcpus) ++ + } + s.SetCPUSet(string(pod.UID), container.Name, cpuset) + klog.Infof("[cpumanager] static policy: reserved: AddContainer (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v", pod.Namespace, string(pod.UID), pod.Name, container.Name, cpuset) +@@ -325,8 +342,34 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai + } + s.SetCPUSet(string(pod.UID), container.Name, cpuset) + p.updateCPUsToReuse(pod, container, cpuset) ++ klog.Infof("[cpumanager] guaranteed: AddContainer "+ ++ "(namespace: %s, pod UID: %s, pod: %s, container: %s); numCPUS=%d, cpuset=%v", ++ pod.Namespace, string(pod.UID), pod.Name, container.Name, numCPUs, cpuset) ++ return nil ++ } + ++ if isolcpus := p.podIsolCPUs(pod, container); isolcpus.Size() > 0 { ++ // container has requested isolated CPUs ++ if set, ok := s.GetCPUSet(string(pod.UID), container.Name); ok { ++ if set.Equals(isolcpus) { ++ klog.Infof("[cpumanager] isolcpus container already present in state, skipping (namespace: %s, pod UID: %s, pod: %s, container: %s)", ++ pod.Namespace, string(pod.UID), pod.Name, container.Name) ++ return nil ++ } else { ++ klog.Infof("[cpumanager] isolcpus container state has cpus %v, should be %v (namespace: %s, pod UID: %s, pod: %s, container: %s)", ++ isolcpus, set, pod.Namespace, string(pod.UID), pod.Name, container.Name) ++ } ++ } ++ // Note that we do not do anything about init containers here. ++ // It looks like devices are allocated per-pod based on effective requests/limits ++ // and extra devices from initContainers are not freed up when the regular containers start. ++ // TODO: confirm this is still true for 1.20 ++ s.SetCPUSet(string(pod.UID), container.Name, isolcpus) ++ klog.Infof("[cpumanager] isolcpus: AddContainer (namespace: %s, pod UID: %s, pod: %s, container: %s); cpuset=%v", ++ pod.Namespace, string(pod.UID), pod.Name, container.Name, isolcpus) ++ return nil + } ++ + // container belongs in the shared pool (nothing to do; use default cpuset) + return nil + } +@@ -619,3 +662,33 @@ func isKubeInfra(pod *v1.Pod) bool { + } + return false + } ++ ++// get the isolated CPUs (if any) from the devices associated with a specific container ++func (p *staticPolicy) podIsolCPUs(pod *v1.Pod, container *v1.Container) cpuset.CPUSet { ++ // NOTE: This is required for TestStaticPolicyAdd() since makePod() does ++ // not create UID. We also need a way to properly stub devicemanager. ++ if len(string(pod.UID)) == 0 { ++ return cpuset.NewCPUSet() ++ } ++ resContDevices := p.deviceManager.GetDevices(string(pod.UID), container.Name) ++ cpuSet := cpuset.NewCPUSet() ++ for resourceName, resourceDevs := range resContDevices { ++ // this resource name needs to match the isolcpus device plugin ++ if resourceName == "windriver.com/isolcpus" { ++ for devID, _ := range resourceDevs { ++ cpuStrList := []string{devID} ++ if len(cpuStrList) > 0 { ++ // loop over the list of strings, convert each one to int, add to cpuset ++ for _, cpuStr := range cpuStrList { ++ cpu, err := strconv.Atoi(cpuStr) ++ if err != nil { ++ panic(err) ++ } ++ cpuSet = cpuSet.Union(cpuset.NewCPUSet(cpu)) ++ } ++ } ++ } ++ } ++ } ++ return cpuSet ++} +diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go +index 34c5a23c553..a0eb451b60e 100644 +--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go ++++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go +@@ -25,6 +25,7 @@ import ( + "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" + "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" + "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ++ "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" + ) +@@ -65,8 +66,9 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest { + } + + func TestStaticPolicyName(t *testing.T) { ++ testDM, _ := devicemanager.NewManagerStub() + testExcl := false +- policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testExcl) ++ policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testDM, testExcl) + + policyName := policy.Name() + if policyName != "static" { +@@ -76,6 +78,7 @@ func TestStaticPolicyName(t *testing.T) { + } + + func TestStaticPolicyStart(t *testing.T) { ++ testDM, _ := devicemanager.NewManagerStub() + testCases := []staticPolicyTest{ + { + description: "non-corrupted state", +@@ -151,7 +154,7 @@ func TestStaticPolicyStart(t *testing.T) { + } + for _, testCase := range testCases { + t.Run(testCase.description, func(t *testing.T) { +- p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testCase.excludeReserved) ++ p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testDM, testCase.excludeReserved) + + policy := p.(*staticPolicy) + st := &mockState{ +@@ -199,7 +202,7 @@ func TestStaticPolicyAdd(t *testing.T) { + largeTopoCPUSet := largeTopoBuilder.Result() + largeTopoSock0CPUSet := largeTopoSock0Builder.Result() + largeTopoSock1CPUSet := largeTopoSock1Builder.Result() +- ++ testDM, _ := devicemanager.NewManagerStub() + // these are the cases which must behave the same regardless the policy options. + // So we will permutate the options to ensure this holds true. + optionsInsensitiveTestCases := []staticPolicyTest{ +@@ -529,8 +532,9 @@ func TestStaticPolicyAdd(t *testing.T) { + } + + func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) { ++ testDM, _ := devicemanager.NewManagerStub() + testExcl := false +- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testCase.options, testExcl) ++ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testCase.options, testDM, testExcl) + + st := &mockState{ + assignments: testCase.stAssignments, +@@ -573,6 +577,7 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) { + + func TestStaticPolicyRemove(t *testing.T) { + excludeReserved := false ++ testDM, _ := devicemanager.NewManagerStub() + testCases := []staticPolicyTest{ + { + description: "SingleSocketHT, DeAllocOneContainer", +@@ -631,7 +636,7 @@ func TestStaticPolicyRemove(t *testing.T) { + } + + for _, testCase := range testCases { +- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testCase.excludeReserved) ++ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testDM, excludeReserved) + + st := &mockState{ + assignments: testCase.stAssignments, +@@ -654,6 +659,7 @@ func TestStaticPolicyRemove(t *testing.T) { + + func TestTopologyAwareAllocateCPUs(t *testing.T) { + excludeReserved := false ++ testDM, _ := devicemanager.NewManagerStub() + testCases := []struct { + description string + topo *topology.CPUTopology +@@ -722,7 +728,8 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) { + }, + } + for _, tc := range testCases { +- p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, excludeReserved) ++ p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testDM, excludeReserved) ++ + policy := p.(*staticPolicy) + st := &mockState{ + assignments: tc.stAssignments, +@@ -755,6 +762,7 @@ type staticPolicyTestWithResvList struct { + topo *topology.CPUTopology + numReservedCPUs int + reserved cpuset.CPUSet ++ isolcpus cpuset.CPUSet + stAssignments state.ContainerCPUAssignments + stDefaultCPUSet cpuset.CPUSet + pod *v1.Pod +@@ -765,6 +773,8 @@ type staticPolicyTestWithResvList struct { + } + + func TestStaticPolicyStartWithResvList(t *testing.T) { ++ testDM, _ := devicemanager.NewManagerStub() ++ testExcl := false + testCases := []staticPolicyTestWithResvList{ + { + description: "empty cpuset", +@@ -794,11 +804,9 @@ func TestStaticPolicyStartWithResvList(t *testing.T) { + expNewErr: fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of 0-1 did not equal 1)"), + }, + } +- testExcl := false + for _, testCase := range testCases { + t.Run(testCase.description, func(t *testing.T) { +- p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil, testExcl) +- ++ p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testDM, testExcl) + if !reflect.DeepEqual(err, testCase.expNewErr) { + t.Errorf("StaticPolicy Start() error (%v). expected error: %v but got: %v", + testCase.description, testCase.expNewErr, err) +@@ -838,6 +846,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { + topo: topoSingleSocketHT, + numReservedCPUs: 1, + reserved: cpuset.NewCPUSet(0), ++ isolcpus: cpuset.NewCPUSet(), + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7), + pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"), +@@ -850,6 +859,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { + topo: topoSingleSocketHT, + numReservedCPUs: 2, + reserved: cpuset.NewCPUSet(0, 1), ++ isolcpus: cpuset.NewCPUSet(), + stAssignments: state.ContainerCPUAssignments{}, + stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7), + pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"), +@@ -862,6 +872,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { + topo: topoSingleSocketHT, + numReservedCPUs: 2, + reserved: cpuset.NewCPUSet(0, 1), ++ isolcpus: cpuset.NewCPUSet(), + stAssignments: state.ContainerCPUAssignments{ + "fakePod": map[string]cpuset.CPUSet{ + "fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7), +@@ -878,6 +889,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { + topo: topoSingleSocketHT, + numReservedCPUs: 2, + reserved: cpuset.NewCPUSet(0, 1), ++ isolcpus: cpuset.NewCPUSet(), + stAssignments: state.ContainerCPUAssignments{ + "fakePod": map[string]cpuset.CPUSet{ + "fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7), +@@ -889,11 +901,29 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { + expCPUAlloc: true, + expCSet: cpuset.NewCPUSet(0, 1), + }, ++ { ++ description: "InfraPod, SingleSocketHT, Isolcpus, ExpectAllocReserved", ++ topo: topoSingleSocketHT, ++ numReservedCPUs: 2, ++ reserved: cpuset.NewCPUSet(0, 1), ++ isolcpus: cpuset.NewCPUSet(1), ++ stAssignments: state.ContainerCPUAssignments{ ++ "fakePod": map[string]cpuset.CPUSet{ ++ "fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7), ++ }, ++ }, ++ stDefaultCPUSet: cpuset.NewCPUSet(4, 5), ++ pod: infraPod, ++ expErr: nil, ++ expCPUAlloc: true, ++ expCSet: cpuset.NewCPUSet(0), ++ }, + } + + testExcl := true ++ testDM, _ := devicemanager.NewManagerStub() + for _, testCase := range testCases { +- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil, testExcl) ++ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, testCase.isolcpus, topologymanager.NewFakeManager(), nil, testDM, testExcl) + + st := &mockState{ + assignments: testCase.stAssignments, +-- +2.25.1 + diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-keep-normal-containers-off-reserv.patch b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-keep-normal-containers-off-reserv.patch new file mode 100644 index 000000000..e9b457928 --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-cpumanager-keep-normal-containers-off-reserv.patch @@ -0,0 +1,311 @@ +From f2186adb445f1420184aa5af5536bb777536a798 Mon Sep 17 00:00:00 2001 +From: Gleb Aronsky +Date: Mon, 24 Jan 2022 15:52:24 -0500 +Subject: [PATCH] kubelet cpumanager keep normal containers off reserved CPUs + +When starting the kubelet process, two separate sets of reserved CPUs +may be specified. With this change CPUs reserved via +'--system-reserved=cpu' +or '--kube-reserved=cpu' will be ignored by kubernetes itself. A small +tweak to the default CPU affinity ensures that "normal" Kubernetes +pods won't run on the reserved CPUs. + +Co-authored-by: Jim Gauld +Signed-off-by: Gleb Aronsky +--- + pkg/kubelet/cm/cpumanager/cpu_manager.go | 6 ++- + pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 11 ++++-- + pkg/kubelet/cm/cpumanager/policy_static.go | 29 +++++++++++--- + .../cm/cpumanager/policy_static_test.go | 38 ++++++++++++++----- + 4 files changed, 64 insertions(+), 20 deletions(-) + +diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go +index 6ad289336ea..8336e7b0fd4 100644 +--- a/pkg/kubelet/cm/cpumanager/cpu_manager.go ++++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go +@@ -191,7 +191,11 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc + // exclusively allocated. + reservedCPUsFloat := float64(reservedCPUs.MilliValue()) / 1000 + numReservedCPUs := int(math.Ceil(reservedCPUsFloat)) +- policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity, cpuPolicyOptions) ++ // NOTE: Set excludeReserved unconditionally to exclude reserved CPUs from default cpuset. ++ // This variable is primarily to make testing easier. ++ excludeReserved := true ++ policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity, cpuPolicyOptions, excludeReserved) ++ + if err != nil { + return nil, fmt.Errorf("new static policy error: %w", err) + } +diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go +index 9b3e24fc3b2..2c8349662c4 100644 +--- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go ++++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go +@@ -215,6 +215,7 @@ func makeMultiContainerPod(initCPUs, appCPUs []struct{ request, limit string }) + } + + func TestCPUManagerAdd(t *testing.T) { ++ testExcl := false + testPolicy, _ := NewStaticPolicy( + &topology.CPUTopology{ + NumCPUs: 4, +@@ -230,7 +231,8 @@ func TestCPUManagerAdd(t *testing.T) { + 0, + cpuset.NewCPUSet(), + topologymanager.NewFakeManager(), +- nil) ++ nil, ++ testExcl) + testCases := []struct { + description string + updateErr error +@@ -479,8 +481,9 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) { + }, + } + ++ testExcl := false + for _, testCase := range testCases { +- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil) ++ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testExcl) + + mockState := &mockState{ + assignments: testCase.stAssignments, +@@ -1228,6 +1231,7 @@ func TestReconcileState(t *testing.T) { + // above test cases are without kubelet --reserved-cpus cmd option + // the following tests are with --reserved-cpus configured + func TestCPUManagerAddWithResvList(t *testing.T) { ++ testExcl := false + testPolicy, _ := NewStaticPolicy( + &topology.CPUTopology{ + NumCPUs: 4, +@@ -1243,7 +1247,8 @@ func TestCPUManagerAddWithResvList(t *testing.T) { + 1, + cpuset.NewCPUSet(0), + topologymanager.NewFakeManager(), +- nil) ++ nil, ++ testExcl) + testCases := []struct { + description string + updateErr error +diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go +index f7ff26cd313..9697f4d4bb0 100644 +--- a/pkg/kubelet/cm/cpumanager/policy_static.go ++++ b/pkg/kubelet/cm/cpumanager/policy_static.go +@@ -96,6 +96,8 @@ type staticPolicy struct { + topology *topology.CPUTopology + // set of CPUs that is not available for exclusive assignment + reserved cpuset.CPUSet ++ // If true, default CPUSet should exclude reserved CPUs ++ excludeReserved bool + // topology manager reference to get container Topology affinity + affinity topologymanager.Store + // set of CPUs to reuse across allocations in a pod +@@ -110,7 +112,7 @@ var _ Policy = &staticPolicy{} + // NewStaticPolicy returns a CPU manager policy that does not change CPU + // assignments for exclusively pinned guaranteed containers after the main + // container process starts. +-func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store, cpuPolicyOptions map[string]string) (Policy, error) { ++func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store, cpuPolicyOptions map[string]string, excludeReserved bool) (Policy, error) { + opts, err := NewStaticPolicyOptions(cpuPolicyOptions) + if err != nil { + return nil, err +@@ -172,7 +174,15 @@ func (p *staticPolicy) validateState(s state.State) error { + } + // state is empty initialize + allCPUs := p.topology.CPUDetails.CPUs() +- s.SetDefaultCPUSet(allCPUs) ++ if p.excludeReserved { ++ // Exclude reserved CPUs from the default CPUSet to keep containers off them ++ // unless explicitly affined. ++ s.SetDefaultCPUSet(allCPUs.Difference(p.reserved)) ++ } else { ++ s.SetDefaultCPUSet(allCPUs) ++ } ++ klog.Infof("[cpumanager] static policy: CPUSet: allCPUs:%v, reserved:%v, default:%v\n", ++ allCPUs, p.reserved, s.GetDefaultCPUSet()) + return nil + } + +@@ -180,11 +190,12 @@ func (p *staticPolicy) validateState(s state.State) error { + // 1. Check if the reserved cpuset is not part of default cpuset because: + // - kube/system reserved have changed (increased) - may lead to some containers not being able to start + // - user tampered with file +- if !p.reserved.Intersection(tmpDefaultCPUset).Equals(p.reserved) { +- return fmt.Errorf("not all reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"", +- p.reserved.String(), tmpDefaultCPUset.String()) ++ if !p.excludeReserved { ++ if !p.reserved.Intersection(tmpDefaultCPUset).Equals(p.reserved) { ++ return fmt.Errorf("not all reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"", ++ p.reserved.String(), tmpDefaultCPUset.String()) ++ } + } +- + // 2. Check if state for static policy is consistent + for pod := range tmpAssignments { + for container, cset := range tmpAssignments[pod] { +@@ -211,6 +222,9 @@ func (p *staticPolicy) validateState(s state.State) error { + } + } + totalKnownCPUs = totalKnownCPUs.UnionAll(tmpCPUSets) ++ if p.excludeReserved { ++ totalKnownCPUs = totalKnownCPUs.Union(p.reserved) ++ } + if !totalKnownCPUs.Equals(p.topology.CPUDetails.CPUs()) { + return fmt.Errorf("current set of available CPUs \"%s\" doesn't match with CPUs in state \"%s\"", + p.topology.CPUDetails.CPUs().String(), totalKnownCPUs.String()) +@@ -296,6 +310,9 @@ func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerNa + klog.InfoS("Static policy: RemoveContainer", "podUID", podUID, "containerName", containerName) + if toRelease, ok := s.GetCPUSet(podUID, containerName); ok { + s.Delete(podUID, containerName) ++ if p.excludeReserved { ++ toRelease = toRelease.Difference(p.reserved) ++ } + // Mutate the shared pool, adding released cpus. + s.SetDefaultCPUSet(s.GetDefaultCPUSet().Union(toRelease)) + } +diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go +index d2b641fe3a0..80bd04a1f92 100644 +--- a/pkg/kubelet/cm/cpumanager/policy_static_test.go ++++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go +@@ -33,6 +33,7 @@ type staticPolicyTest struct { + description string + topo *topology.CPUTopology + numReservedCPUs int ++ excludeReserved bool + podUID string + options map[string]string + containerName string +@@ -64,7 +65,8 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest { + } + + func TestStaticPolicyName(t *testing.T) { +- policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil) ++ testExcl := false ++ policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testExcl) + + policyName := policy.Name() + if policyName != "static" { +@@ -94,6 +96,15 @@ func TestStaticPolicyStart(t *testing.T) { + stDefaultCPUSet: cpuset.NewCPUSet(), + expCSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), + }, ++ { ++ description: "empty cpuset exclude reserved", ++ topo: topoDualSocketHT, ++ numReservedCPUs: 2, ++ excludeReserved: true, ++ stAssignments: state.ContainerCPUAssignments{}, ++ stDefaultCPUSet: cpuset.NewCPUSet(), ++ expCSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11), ++ }, + { + description: "reserved cores 0 & 6 are not present in available cpuset", + topo: topoDualSocketHT, +@@ -140,7 +151,8 @@ func TestStaticPolicyStart(t *testing.T) { + } + for _, testCase := range testCases { + t.Run(testCase.description, func(t *testing.T) { +- p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil) ++ p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testCase.excludeReserved) ++ + policy := p.(*staticPolicy) + st := &mockState{ + assignments: testCase.stAssignments, +@@ -211,7 +223,7 @@ func TestStaticPolicyAdd(t *testing.T) { + "fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7), + }, + }, +- stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 4, 5), ++ stDefaultCPUSet: cpuset.NewCPUSet(4, 5), + pod: makePod("fakePod", "fakeContainer3", "2000m", "2000m"), + expErr: nil, + expCPUAlloc: true, +@@ -517,7 +529,8 @@ func TestStaticPolicyAdd(t *testing.T) { + } + + func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) { +- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testCase.options) ++ testExcl := false ++ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testCase.options, testExcl) + + st := &mockState{ + assignments: testCase.stAssignments, +@@ -559,6 +572,7 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) { + } + + func TestStaticPolicyRemove(t *testing.T) { ++ excludeReserved := false + testCases := []staticPolicyTest{ + { + description: "SingleSocketHT, DeAllocOneContainer", +@@ -617,7 +631,7 @@ func TestStaticPolicyRemove(t *testing.T) { + } + + for _, testCase := range testCases { +- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil) ++ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, testCase.excludeReserved) + + st := &mockState{ + assignments: testCase.stAssignments, +@@ -639,6 +653,7 @@ func TestStaticPolicyRemove(t *testing.T) { + } + + func TestTopologyAwareAllocateCPUs(t *testing.T) { ++ excludeReserved := false + testCases := []struct { + description string + topo *topology.CPUTopology +@@ -707,7 +722,7 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) { + }, + } + for _, tc := range testCases { +- p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil) ++ p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil, excludeReserved) + policy := p.(*staticPolicy) + st := &mockState{ + assignments: tc.stAssignments, +@@ -779,9 +794,11 @@ func TestStaticPolicyStartWithResvList(t *testing.T) { + expNewErr: fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of 0-1 did not equal 1)"), + }, + } ++ testExcl := false + for _, testCase := range testCases { + t.Run(testCase.description, func(t *testing.T) { +- p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil) ++ p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil, testExcl) ++ + if !reflect.DeepEqual(err, testCase.expNewErr) { + t.Errorf("StaticPolicy Start() error (%v). expected error: %v but got: %v", + testCase.description, testCase.expNewErr, err) +@@ -821,7 +838,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { + numReservedCPUs: 1, + reserved: cpuset.NewCPUSet(0), + stAssignments: state.ContainerCPUAssignments{}, +- stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), ++ stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7), + pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"), + expErr: fmt.Errorf("not enough cpus available to satisfy request"), + expCPUAlloc: false, +@@ -833,7 +850,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { + numReservedCPUs: 2, + reserved: cpuset.NewCPUSet(0, 1), + stAssignments: state.ContainerCPUAssignments{}, +- stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), ++ stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7), + pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"), + expErr: nil, + expCPUAlloc: true, +@@ -857,8 +874,9 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { + }, + } + ++ testExcl := true + for _, testCase := range testCases { +- policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil) ++ policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil, testExcl) + + st := &mockState{ + assignments: testCase.stAssignments, +-- +2.25.1 + diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-sort-isolcpus-allocation-when-SMT-enabled.patch b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-sort-isolcpus-allocation-when-SMT-enabled.patch new file mode 100644 index 000000000..a58e47d5b --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubelet-sort-isolcpus-allocation-when-SMT-enabled.patch @@ -0,0 +1,50 @@ +From ba9ab333c8b7dca5252e604837914293dc232732 Mon Sep 17 00:00:00 2001 +From: Jim Gauld +Date: Fri, 11 Feb 2022 11:06:35 -0500 +Subject: [PATCH] kubelet: sort isolcpus allocation when SMT enabled + +The existing device manager code returns CPUs as devices in unsorted +order. This numerically sorts isolcpus allocations when SMT/HT is +enabled on the host. This logs SMT pairs, singletons, and algorithm +order details to make the algorithm understandable. + +Signed-off-by: Jim Gauld +--- + pkg/kubelet/cm/devicemanager/manager.go | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +diff --git a/pkg/kubelet/cm/devicemanager/manager.go b/pkg/kubelet/cm/devicemanager/manager.go +index 609da8ed86b..a4b247714f7 100644 +--- a/pkg/kubelet/cm/devicemanager/manager.go ++++ b/pkg/kubelet/cm/devicemanager/manager.go +@@ -686,7 +686,16 @@ func order_devices_by_sibling(devices sets.String, needed int) ([]string, error) + return cpu_lst[0] + } + } ++ //Make post-analysis of selection algorithm obvious by numerical sorting ++ //the available isolated cpu_id. ++ cpu_ids := make([]int, 0, int(devices.Len())) + for cpu_id := range devices { ++ cpu_id_, _ := strconv.Atoi(cpu_id) ++ cpu_ids = append(cpu_ids, cpu_id_) ++ } ++ sort.Ints(cpu_ids) ++ for _, _cpu_id := range cpu_ids { ++ cpu_id := strconv.Itoa(_cpu_id) + // If we've already found cpu_id as a sibling, skip it. + if _, ok := _iterated_cpu[cpu_id]; ok { + continue +@@ -728,7 +737,9 @@ func order_devices_by_sibling(devices sets.String, needed int) ([]string, error) + } + } + } +- //klog.Infof("needed=%d ordered_cpu_list=%v", needed, dev_lst) ++ //This algorithm will get some attention. Show minimal details. ++ klog.Infof("order_devices_by_sibling: needed=%d, smtpairs=%v, singletons=%v, order=%v", ++ needed, sibling_lst, single_lst, dev_lst) + return dev_lst, nil + } + func smt_enabled() bool { +-- +2.25.1 + diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubernetes-make-isolcpus-allocation-SMT-aware.patch b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubernetes-make-isolcpus-allocation-SMT-aware.patch new file mode 100644 index 000000000..dbc28d503 --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/kubernetes-make-isolcpus-allocation-SMT-aware.patch @@ -0,0 +1,151 @@ +From 95b7b6e1ddb25511c67a3d4018f62df1e76ee7bc Mon Sep 17 00:00:00 2001 +From: Tao Wang +Date: Tue, 25 Jan 2022 19:25:45 -0500 +Subject: [PATCH] kubernetes: make isolcpus allocation SMT-aware + +Enhance isolcpus support in Kubernetes to allocate isolated SMT +siblings to the same container when SMT/HT is enabled on the host. + +As it stands, the device manager code in Kubernetes is not SMT-aware +(since normally it doesn't deal with CPUs). However, StarlingX +exposes isolated CPUs as devices and if possible we want to allocate +all SMT siblings from a CPU core to the same container in order to +minimize cross- container interference due to resource contention +within the CPU core. + +The solution is basically to take the list of isolated CPUs and +re-order it so that the SMT siblings are next to each other. That +way the existing resource selection code will allocate the siblings +together. As an optimization, if it is known that an odd number +of isolated CPUs are desired, a singleton SMT sibling will be +inserted into the list to avoid breaking up sibling pairs. + +Signed-off-by: Tao Wang +--- + pkg/kubelet/cm/devicemanager/manager.go | 84 ++++++++++++++++++++++++- + 1 file changed, 83 insertions(+), 1 deletion(-) + +diff --git a/pkg/kubelet/cm/devicemanager/manager.go b/pkg/kubelet/cm/devicemanager/manager.go +index 60de14a9..609da8ed 100644 +--- a/pkg/kubelet/cm/devicemanager/manager.go ++++ b/pkg/kubelet/cm/devicemanager/manager.go +@@ -19,11 +19,14 @@ package devicemanager + import ( + "context" + "fmt" ++ "io/ioutil" + "net" + "os" + "path/filepath" + "runtime" + "sort" ++ "strconv" ++ "strings" + "sync" + "time" + +@@ -41,6 +44,7 @@ import ( + "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" + "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" ++ "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" + "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/config" +@@ -667,6 +671,75 @@ func (m *ManagerImpl) UpdateAllocatedDevices() { + m.allocatedDevices = m.podDevices.devices() + } + ++//Given a list of isolated CPUs in 'devices', and the number of desired CPUs in 'needed', ++//return an ordered list of isolated CPUs such that the first 'needed' CPUs in the list ++//contain as many hyperthread sibling pairs as possible. ++func order_devices_by_sibling(devices sets.String, needed int) ([]string, error) { ++ var dev_lst []string ++ var single_lst []string ++ sibling_lst := make([]string, 0, int(devices.Len())) ++ _iterated_cpu := make(map[string]string) ++ get_sibling := func(cpu string, cpu_lst []string) string { ++ if cpu_lst[0] == cpu { ++ return cpu_lst[1] ++ } else { ++ return cpu_lst[0] ++ } ++ } ++ for cpu_id := range devices { ++ // If we've already found cpu_id as a sibling, skip it. ++ if _, ok := _iterated_cpu[cpu_id]; ok { ++ continue ++ } ++ devPath := fmt.Sprintf("/sys/devices/system/cpu/cpu%s/topology/thread_siblings_list", cpu_id) ++ dat, err := ioutil.ReadFile(devPath) ++ if err != nil { ++ return dev_lst, fmt.Errorf("Can't read cpu[%s] thread_siblings_list", cpu_id) ++ } ++ cpustring := strings.TrimSuffix(string(dat), "\n") ++ cpu_pair_set, err := cpuset.Parse(cpustring) ++ if err != nil { ++ return dev_lst, fmt.Errorf("Unable to parse thread_siblings_list[%s] string to cpuset", cpustring) ++ } ++ var cpu_pair_lst []string ++ for _, v := range cpu_pair_set.ToSlice() { ++ cpu_pair_lst = append(cpu_pair_lst, strconv.Itoa(v)) ++ } ++ sibling_cpu_id := get_sibling(cpu_id, cpu_pair_lst) ++ if _, ok := devices[sibling_cpu_id]; ok { ++ sibling_lst = append(sibling_lst, cpu_id, sibling_cpu_id) ++ _iterated_cpu[sibling_cpu_id] = "" ++ } else { ++ single_lst = append(single_lst, cpu_id) ++ } ++ _iterated_cpu[cpu_id] = "" ++ } ++ if needed%2 == 0 { ++ dev_lst = append(sibling_lst, single_lst...) ++ } else { ++ if len(single_lst) > 1 { ++ _tmp_list := append(sibling_lst, single_lst[1:]...) ++ dev_lst = append(single_lst[0:1], _tmp_list...) ++ } else { ++ if len(single_lst) == 0 { ++ dev_lst = sibling_lst ++ } else { ++ dev_lst = append(single_lst, sibling_lst...) ++ } ++ } ++ } ++ //klog.Infof("needed=%d ordered_cpu_list=%v", needed, dev_lst) ++ return dev_lst, nil ++} ++func smt_enabled() bool { ++ dat, _ := ioutil.ReadFile("/sys/devices/system/cpu/smt/active") ++ state := strings.TrimSuffix(string(dat), "\n") ++ if state == "0" { ++ return false ++ } ++ return true ++} ++ + // Returns list of device Ids we need to allocate with Allocate rpc call. + // Returns empty list in case we don't need to issue the Allocate rpc call. + func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, required int, reusableDevices sets.String) (sets.String, error) { +@@ -702,7 +775,16 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi + // Create a closure to help with device allocation + // Returns 'true' once no more devices need to be allocated. + allocateRemainingFrom := func(devices sets.String) bool { +- for device := range devices.Difference(allocated) { ++ availableDevices := devices.Difference(allocated).List() ++ // If we're dealing with isolcpus and SMT is enabled, reorder to group SMT siblings together. ++ if resource == "windriver.com/isolcpus" && len(devices) > 0 && smt_enabled() { ++ var err error ++ availableDevices, err = order_devices_by_sibling(devices.Difference(allocated), needed) ++ if err != nil { ++ klog.Errorf("error in order_devices_by_sibling: %v", err) ++ } ++ } ++ for _, device := range availableDevices { + m.allocatedDevices[resource].Insert(device) + allocated.Insert(device) + needed-- +-- +2.22.5 + diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/series b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/series new file mode 100644 index 000000000..a00bce212 --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/patches/series @@ -0,0 +1,9 @@ +kubelet-cpumanager-disable-CFS-quota-throttling-for-.patch +kubelet-cpumanager-keep-normal-containers-off-reserv.patch +kubelet-cpumanager-infrastructure-pods-use-system-re.patch +kubelet-cpumanager-introduce-concept-of-isolated-CPU.patch +kubeadm-create-platform-pods-with-zero-CPU-resources.patch +enable-support-for-kubernetes-to-ignore-isolcpus.patch +Revert-use-subpath-for-coredns-only-for-default-repo.patch +kubernetes-make-isolcpus-allocation-SMT-aware.patch +kubelet-sort-isolcpus-allocation-when-SMT-enabled.patch diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/rules b/kubernetes/kubernetes-1.23.1/debian/deb_folder/rules new file mode 100755 index 000000000..ec7b8b83a --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/rules @@ -0,0 +1,138 @@ +#!/usr/bin/make -f + +kube_version := 1.23.1 +kube_git_version := v${kube_version} +name := kubernetes-${kube_version} +go_version := 1.17.5 +_stage1 := /usr/local/kubernetes/${kube_version}/stage1 +_stage2 := /usr/local/kubernetes/${kube_version}/stage2 +_bindir := /usr/bin +output_path := src/k8s.io/kubernetes/_output/bin +DEBIAN_DESTDIR := $(CURDIR)/debian/tmp +export DH_VERBOSE = 1 +export PATH := /usr/lib/go-1.17/bin:$(PATH) +export KUBE_GIT_TREE_STATE="clean" +export KUBE_GIT_COMMIT=${kube_version} +export KUBE_GIT_VERSION=${kube_git_version} +export KUBE_EXTRA_GOPATH=$(pwd)/Godeps/_workspace +export PBR_VERSION=${kube_git_version} + + +%: + dh $@ --with=bash-completion --builddirectory=src --without=build-stamp + +override_dh_auto_build: + + mkdir -pv src/k8s.io/kubernetes/ + mv -v $$(ls | grep -v "^src$$" | grep -v "^debian$$") src/k8s.io/kubernetes/. + + go version + which go + + cd src/k8s.io/kubernetes/ && make WHAT="cmd/kube-proxy cmd/kube-apiserver cmd/kube-controller-manager cmd/kubelet cmd/kubeadm cmd/kube-scheduler cmd/kubectl" + + # TODO convert md to man + #./hack/generate-docs.sh || true + #pushd docs ;\ + #pushd admin ;\ + #cp kube-apiserver.md kube-controller-manager.md kube-proxy.md kube-scheduler.md kubelet.md .. ;\ + #popd ;\ + #bash genmanpages.sh ;\ + #popd ;\ + #popd ;\ + + +override_dh_install: + + install -m 755 -d ${DEBIAN_DESTDIR}${_bindir} + install -m 755 -d ${DEBIAN_DESTDIR}${_stage1}${_bindir} + install -m 755 -d ${DEBIAN_DESTDIR}${_stage2}${_bindir} + + echo "+++ INSTALLING kube-apiserver" + install -p -m 754 -t ${DEBIAN_DESTDIR}${_bindir} ${output_path}/kube-apiserver + + echo "+++ INSTALLING kubeadm" + install -p -m 755 -t ${DEBIAN_DESTDIR}${_stage1}${_bindir} ${output_path}/kubeadm + install -d -m 0755 ${DEBIAN_DESTDIR}${_stage2}/etc/systemd/system/kubelet.service.d + install -p -m 0644 -t ${DEBIAN_DESTDIR}${_stage2}/etc/systemd/system/kubelet.service.d debian/kubeadm.conf + + echo "+++ INSTALLING kubelet-cgroup-setup.sh" + install -p -m 0700 -t ${DEBIAN_DESTDIR}${_stage2}${_bindir} debian/kubelet-cgroup-setup.sh + + echo "+++ INSTALLING kube-apiserver"\ + install -p -m 754 -t ${DEBIAN_DESTDIR}${_bindir} ${output_path}/kube-apiserver + + echo "+++ INSTALLING kube-controller-manager" + install -p -m 754 -t ${DEBIAN_DESTDIR}${_bindir} ${output_path}/kube-controller-manager + + echo "+++ INSTALLING kube-scheduler" + install -p -m 754 -t ${DEBIAN_DESTDIR}${_bindir} ${output_path}/kube-scheduler + + echo "+++ INSTALLING kube-proxy" + install -p -m 754 -t ${DEBIAN_DESTDIR}${_bindir} ${output_path}/kube-proxy + + echo "+++ INSTALLING kubelet" + install -p -m 754 -t ${DEBIAN_DESTDIR}${_stage2}${_bindir} ${output_path}/kubelet + + echo "+++ INSTALLING kubectl" + install -p -m 754 -t ${DEBIAN_DESTDIR}${_stage2}${_bindir} ${output_path}/kubectl + + # install the bash completion + install -d -m 0755 ${DEBIAN_DESTDIR}${_stage2}/usr/share/bash-completion/completions/ + ${DEBIAN_DESTDIR}${_stage2}${_bindir}/kubectl completion bash > ${DEBIAN_DESTDIR}${_stage2}/usr/share/bash-completion/completions/kubectl + + # install specific cluster addons for optional use + install -d -m 0755 ${DEBIAN_DESTDIR}/etc/${name}/addons + # Addon: volumesnapshots + install -d -m 0755 ${DEBIAN_DESTDIR}/etc/${name}/addons/volumesnapshots + install -d -m 0755 ${DEBIAN_DESTDIR}/etc/${name}/addons/volumesnapshots/crd + install -m 0644 -t ${DEBIAN_DESTDIR}/etc/${name}/addons/volumesnapshots/crd src/k8s.io/kubernetes/cluster/addons/volumesnapshots/crd/* + install -d -m 0755 ${DEBIAN_DESTDIR}/etc/${name}/addons/volumesnapshots/volume-snapshot-controller + install -m 0644 -t ${DEBIAN_DESTDIR}/etc/${name}/addons/volumesnapshots/volume-snapshot-controller src/k8s.io/kubernetes/cluster/addons/volumesnapshots/volume-snapshot-controller/* + + #TODO install manpages + #install -d ${DEBIAN_DESTDIR}/usr/share/man/man1 + #install -p -m 644 docs/man/man1/* ${DEBIAN_DESTDIR}/usr/share/man/man1 + #rm -Rf ${DEBIAN_DESTDIR}/usr/share/man/man1/cloud-controller-manager.* + # from k8s tarball copied docs/man/man1/*.1 + #popd + + #mv src/k8s.io/kubernetes/*.md . + #mv src/k8s.io/kubernetes/LICENSE . + + + # place files for unit-test rpm + install -d -m 0755 ${DEBIAN_DESTDIR}/var/lib/kubernetes-unit-test/ + # basically, everything from the root directory is needed + # unit-tests needs source code + # integration tests needs docs and other files + # test-cmd.sh atm needs cluster, examples and other + cp -a src ${DEBIAN_DESTDIR}/var/lib/kubernetes-unit-test/ + rm -rf ${DEBIAN_DESTDIR}/var/lib/kubernetes-unit-test/src/k8s.io/kubernetes/_output + + #TODO with install manpages + #cp -a *.md ${DEBIAN_DESTDIR}/var/lib/kubernetes-unit-test/src/k8s.io/kubernetes/ + + dh_install + +override_dh_auto_test: + + echo "******Testing the commands*****" + src/k8s.io/kubernetes/hack/test-cmd.sh + + echo "******Benchmarking kube********" + src/k8s.io/kubernetes/hack/benchmark-go.sh + + echo "******Testing the go code******" + src/k8s.io/kubernetes/hack/test-go.sh + + echo "******Testing integration******" + src/k8s.io/kubernetes/hack/test-integration.sh --use_go_build + +override_dh_fixperms: + dh_fixperms -Xkube-apiserver -Xkubeadm -Xkubeadm.conf \ + -Xkubelet-cgroup-setup.sh -Xkube-apiserver \ + -Xkube-controller-manager -Xkube-scheduler \ + -Xkube-proxy -Xkubelet -Xkubectl + +override_dh_usrlocal: diff --git a/kubernetes/kubernetes-1.23.1/debian/deb_folder/source/format b/kubernetes/kubernetes-1.23.1/debian/deb_folder/source/format new file mode 100644 index 000000000..163aaf8d8 --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/deb_folder/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/kubernetes/kubernetes-1.23.1/debian/meta_data.yaml b/kubernetes/kubernetes-1.23.1/debian/meta_data.yaml new file mode 100644 index 000000000..9580c64fa --- /dev/null +++ b/kubernetes/kubernetes-1.23.1/debian/meta_data.yaml @@ -0,0 +1,9 @@ +debver: 1.23.1 +dl_path: + name: kubernetes-1.23.1.tar.gz + url: https://github.com/kubernetes/kubernetes/archive/refs/tags/v1.23.1.tar.gz + md5sum: dba0ea30d0a80065210a4ae857758f4a + sha256sum: 9529d76a623f133fd609765b040382d593c97803220f5166451f0dbca5c807d5 +revision: + dist: $STX_DIST + PKG_GITREVCOUNT: true