Merge remote-tracking branch 'gerrit/master' into f/centos76
Change-Id: Ie24450986ee993400c994f85d1f047b689052d4c Signed-off-by: Saul Wold <sgw@linux.intel.com>
This commit is contained in:
commit
1bcac9b7b6
3
.gitignore
vendored
3
.gitignore
vendored
@ -27,3 +27,6 @@ doc/build
|
|||||||
|
|
||||||
# Release Notes documentation
|
# Release Notes documentation
|
||||||
releasenotes/build
|
releasenotes/build
|
||||||
|
|
||||||
|
# docker registry token server vendor(dependencies)
|
||||||
|
kubernetes/registry-token-server/src/vendor/
|
||||||
|
10
.zuul.yaml
10
.zuul.yaml
@ -7,6 +7,7 @@
|
|||||||
- build-openstack-releasenotes
|
- build-openstack-releasenotes
|
||||||
- openstack-tox-pep8
|
- openstack-tox-pep8
|
||||||
- openstack-tox-linters
|
- openstack-tox-linters
|
||||||
|
- stx-integ-pylint
|
||||||
- stx-devstack-integ:
|
- stx-devstack-integ:
|
||||||
voting: false
|
voting: false
|
||||||
gate:
|
gate:
|
||||||
@ -14,6 +15,7 @@
|
|||||||
- build-openstack-releasenotes
|
- build-openstack-releasenotes
|
||||||
- openstack-tox-pep8
|
- openstack-tox-pep8
|
||||||
- openstack-tox-linters
|
- openstack-tox-linters
|
||||||
|
- stx-integ-pylint
|
||||||
post:
|
post:
|
||||||
jobs:
|
jobs:
|
||||||
- publish-stx-tox
|
- publish-stx-tox
|
||||||
@ -64,3 +66,11 @@
|
|||||||
LIBS_FROM_GIT: keystone
|
LIBS_FROM_GIT: keystone
|
||||||
files:
|
files:
|
||||||
- ^devstack/.*
|
- ^devstack/.*
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: stx-integ-pylint
|
||||||
|
parent: openstack-tox-pylint
|
||||||
|
required-projects:
|
||||||
|
- openstack/stx-config
|
||||||
|
- openstack/stx-fault
|
||||||
|
- openstack/stx-update
|
||||||
|
@ -1 +1 @@
|
|||||||
TIS_PATCH_VER=17
|
TIS_PATCH_VER=18
|
||||||
|
@ -2,3 +2,4 @@ spec-include-TiS-changes.patch
|
|||||||
stop-creating-shared-dirs.patch
|
stop-creating-shared-dirs.patch
|
||||||
fix-build-failures-due-to-unwanted-sgid.patch
|
fix-build-failures-due-to-unwanted-sgid.patch
|
||||||
0001-Update-package-versioning-for-TIS-format.patch
|
0001-Update-package-versioning-for-TIS-format.patch
|
||||||
|
ifup-alias-scope.patch
|
||||||
|
34
base/initscripts/centos/meta_patches/ifup-alias-scope.patch
Normal file
34
base/initscripts/centos/meta_patches/ifup-alias-scope.patch
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
From ad85db9465da885a5f186db7f23655a3735a43c5 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Teresa Ho <teresa.ho@windriver.com>
|
||||||
|
Date: Fri, 4 Jan 2019 10:49:27 -0500
|
||||||
|
Subject: [PATCH 1/1] Added ifup-alias-scope.patch
|
||||||
|
|
||||||
|
---
|
||||||
|
SPECS/initscripts.spec | 4 +++-
|
||||||
|
1 file changed, 3 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/SPECS/initscripts.spec b/SPECS/initscripts.spec
|
||||||
|
index 6e9fc13..bff1e12 100644
|
||||||
|
--- a/SPECS/initscripts.spec
|
||||||
|
+++ b/SPECS/initscripts.spec
|
||||||
|
@@ -48,6 +48,7 @@ Patch9: sysconfig-unsafe-usage-of-linkdelay-variable.patch
|
||||||
|
Patch10: ipv6-static-route-support.patch
|
||||||
|
Patch11: ifup-eth-stop-waiting-if-link-is-up.patch
|
||||||
|
Patch12: run-dhclient-as-daemon-for-ipv6.patch
|
||||||
|
+Patch13: ifup-alias-scope.patch
|
||||||
|
|
||||||
|
%description
|
||||||
|
The initscripts package contains basic system scripts used
|
||||||
|
@@ -80,7 +81,8 @@ Currently, this consists of various memory checking code.
|
||||||
|
%patch10 -p1
|
||||||
|
%patch11 -p1
|
||||||
|
%patch12 -p1
|
||||||
|
-
|
||||||
|
+%patch13 -p1
|
||||||
|
+
|
||||||
|
%build
|
||||||
|
make
|
||||||
|
|
||||||
|
--
|
||||||
|
1.8.3.1
|
||||||
|
|
32
base/initscripts/centos/patches/ifup-alias-scope.patch
Normal file
32
base/initscripts/centos/patches/ifup-alias-scope.patch
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
From 59e30a344df4b661f30c0a5c629dbd13e9d88e8f Mon Sep 17 00:00:00 2001
|
||||||
|
From: Teresa Ho <teresa.ho@windriver.com>
|
||||||
|
Date: Mon, 17 Dec 2018 17:47:18 -0500
|
||||||
|
Subject: [PATCH 1/1] WRS: Patch13: ifup-alias-scope.patch
|
||||||
|
|
||||||
|
---
|
||||||
|
sysconfig/network-scripts/ifup-aliases | 8 +++++++-
|
||||||
|
1 file changed, 7 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/sysconfig/network-scripts/ifup-aliases b/sysconfig/network-scripts/ifup-aliases
|
||||||
|
index 52d43ea..9086763 100755
|
||||||
|
--- a/sysconfig/network-scripts/ifup-aliases
|
||||||
|
+++ b/sysconfig/network-scripts/ifup-aliases
|
||||||
|
@@ -277,8 +277,14 @@ function new_interface ()
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
+ if [ "${parent_device}" = "lo" ]; then
|
||||||
|
+ SCOPE="scope host"
|
||||||
|
+ else
|
||||||
|
+ SCOPE=${SCOPE:-}
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
/sbin/ip addr add ${IPADDR}/${PREFIX} brd ${BROADCAST} \
|
||||||
|
- dev ${parent_device} label ${DEVICE}
|
||||||
|
+ dev ${parent_device} ${SCOPE} label ${DEVICE}
|
||||||
|
|
||||||
|
# update ARP cache of neighboring computers:
|
||||||
|
if ! is_false "${ARPUPDATE}" && [ "${REALDEVICE}" != "lo" ]; then
|
||||||
|
--
|
||||||
|
1.8.3.1
|
||||||
|
|
@ -4,8 +4,8 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
import exception
|
from ceph_manager import exception
|
||||||
from i18n import _LI
|
from ceph_manager.i18n import _LI
|
||||||
# noinspection PyUnresolvedReferences
|
# noinspection PyUnresolvedReferences
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
from i18n import _
|
from ceph_manager.i18n import _
|
||||||
# noinspection PyUnresolvedReferences
|
# noinspection PyUnresolvedReferences
|
||||||
from sysinv.common import constants as sysinv_constants
|
from sysinv.common import constants as sysinv_constants
|
||||||
|
|
||||||
|
@ -5,7 +5,8 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
# noinspection PyUnresolvedReferences
|
# noinspection PyUnresolvedReferences
|
||||||
from i18n import _, _LW
|
from ceph_manager.i18n import _
|
||||||
|
from ceph_manager.i18n import _LW
|
||||||
# noinspection PyUnresolvedReferences
|
# noinspection PyUnresolvedReferences
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
|
@ -14,10 +14,13 @@ from fm_api import constants as fm_constants
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
# noinspection PyProtectedMember
|
# noinspection PyProtectedMember
|
||||||
from i18n import _, _LI, _LW, _LE
|
from ceph_manager.i18n import _
|
||||||
|
from ceph_manager.i18n import _LI
|
||||||
|
from ceph_manager.i18n import _LW
|
||||||
|
from ceph_manager.i18n import _LE
|
||||||
|
|
||||||
import constants
|
from ceph_manager import constants
|
||||||
import exception
|
from ceph_manager import exception
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -30,11 +30,12 @@ from oslo_service import loopingcall
|
|||||||
# noinspection PyUnresolvedReferences
|
# noinspection PyUnresolvedReferences
|
||||||
from cephclient import wrapper
|
from cephclient import wrapper
|
||||||
|
|
||||||
from monitor import Monitor
|
from ceph_manager.monitor import Monitor
|
||||||
import exception
|
from ceph_manager import exception
|
||||||
import constants
|
from ceph_manager import constants
|
||||||
|
|
||||||
from i18n import _LI, _LW
|
from ceph_manager.i18n import _LI
|
||||||
|
from ceph_manager.i18n import _LW
|
||||||
from retrying import retry
|
from retrying import retry
|
||||||
|
|
||||||
eventlet.monkey_patch(all=True)
|
eventlet.monkey_patch(all=True)
|
||||||
|
@ -1,11 +1,8 @@
|
|||||||
|
=======================
|
||||||
stx-integ Documentation
|
stx-integ Documentation
|
||||||
=======================
|
=======================
|
||||||
|
|
||||||
StarlingX Integration and Packaging
|
This is the documentation for StarlingX integration and packaging.
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 2
|
|
||||||
:caption: Contents:
|
|
||||||
|
|
||||||
Release Notes
|
Release Notes
|
||||||
-------------
|
-------------
|
||||||
@ -16,7 +13,7 @@ Release Notes
|
|||||||
Release Notes <https://docs.starlingx.io/releasenotes/stx-integ>
|
Release Notes <https://docs.starlingx.io/releasenotes/stx-integ>
|
||||||
|
|
||||||
Links
|
Links
|
||||||
=====
|
-----
|
||||||
|
|
||||||
* Source: `stx-integ`_
|
* Source: `stx-integ`_
|
||||||
* Code Review: `Gerrit`_
|
* Code Review: `Gerrit`_
|
||||||
@ -25,9 +22,3 @@ Links
|
|||||||
.. _stx-integ: https://git.starlingx.io/cgit/stx-integ/
|
.. _stx-integ: https://git.starlingx.io/cgit/stx-integ/
|
||||||
.. _Gerrit: https://review.openstack.org/#/q/project:openstack/stx-integ
|
.. _Gerrit: https://review.openstack.org/#/q/project:openstack/stx-integ
|
||||||
.. _Storyboard: https://storyboard.openstack.org/#!/project/openstack/stx-integ
|
.. _Storyboard: https://storyboard.openstack.org/#!/project/openstack/stx-integ
|
||||||
|
|
||||||
Indices and Tables
|
|
||||||
==================
|
|
||||||
|
|
||||||
* :ref:`search`
|
|
||||||
* :ref:`genindex`
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
VERSION=2.11.0
|
VERSION=2.12.1
|
||||||
TAR_NAME=helm
|
TAR_NAME=helm
|
||||||
TAR="$TAR_NAME-v$VERSION-linux-amd64.tar.gz"
|
TAR="$TAR_NAME-v$VERSION-linux-amd64.tar.gz"
|
||||||
COPY_LIST="${CGCS_BASE}/downloads/$TAR $FILES_BASE/*"
|
COPY_LIST="${CGCS_BASE}/downloads/$TAR $FILES_BASE/*"
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
Name: helm
|
Name: helm
|
||||||
Version: 2.11.0
|
Version: 2.12.1
|
||||||
Release: 0%{?_tis_dist}.%{tis_patch_ver}
|
Release: 0%{?_tis_dist}.%{tis_patch_ver}
|
||||||
Summary: The Kubernetes Package Manager
|
Summary: The Kubernetes Package Manager
|
||||||
License: Apache-2.0
|
License: Apache-2.0
|
||||||
|
4
kubernetes/registry-token-server/centos/build_srpm.data
Normal file
4
kubernetes/registry-token-server/centos/build_srpm.data
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
TAR_NAME="registry-token-server"
|
||||||
|
SRC_DIR="$PKG_BASE/src"
|
||||||
|
COPY_LIST="$FILES_BASE/*"
|
||||||
|
TIS_PATCH_VER=0
|
@ -0,0 +1,19 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=v2 Registry token server for Docker
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
EnvironmentFile=/etc/docker-distribution/registry/token_server.conf
|
||||||
|
ExecStart=/usr/bin/registry-token-server -addr=${REGISTRY_TOKEN_SERVER_ADDR} \
|
||||||
|
-issuer=${REGISTRY_TOKEN_SERVER_ISSUER} \
|
||||||
|
-endpoint=${REGISTRY_TOKEN_SERVER_KS_ENDPOINT} \
|
||||||
|
-tlscert=${REGISTRY_TOKEN_SERVER_TLSCERT} \
|
||||||
|
-tlskey=${REGISTRY_TOKEN_SERVER_TLSKEY} \
|
||||||
|
-realm=${REGISTRY_TOKEN_SERVER_REALM} \
|
||||||
|
-key=${REGISTRY_TOKEN_SERVER_KEY}
|
||||||
|
Restart=on-failure
|
||||||
|
ExecStartPost=/bin/bash -c 'echo $MAINPID > /var/run/registry-token-server.pid'
|
||||||
|
ExecStopPost=/bin/rm -f /var/run/registry-token-server.pid
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
@ -0,0 +1,19 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDADCCAegCCQCSevkS4h7LQjANBgkqhkiG9w0BAQsFADBCMQswCQYDVQQGEwJY
|
||||||
|
WDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBh
|
||||||
|
bnkgTHRkMB4XDTE4MDkyMTE0MTYwOFoXDTE5MDkyMTE0MTYwOFowQjELMAkGA1UE
|
||||||
|
BhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UECgwTRGVmYXVsdCBD
|
||||||
|
b21wYW55IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKtCbNJ/
|
||||||
|
aPEMkZFEtMKRomOh9NgeOv0jYFY5i23fXghtTgdXu9//H3Huz5/KDJ+XEUp2DZgK
|
||||||
|
YQ2UHVR+cqj2sFjCllfAVrzmv9FFR0CQpQxqKcxChefVwsMh6XsqF+GzbqzFOx67
|
||||||
|
bT39Xb5+spAmDHctFl3nrmyA1wM6e+OXcktC0chILeN+UEyq5Xeng6/BpVnI2UaY
|
||||||
|
J1OpfuUrffddy5t0oeuKGZ/xG2g9sL6GMGBeVslOmLg4CBOwq3knUGoOTFYSjHVx
|
||||||
|
rU/p4YgUotIUvb4GBsXqbiI7M2NakItTR6mxfcYiKkxfjadQlptFyGucI84mMYx8
|
||||||
|
vO3o6TFLfcTYqZ8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAHXZR0U0pyMkYIeO5
|
||||||
|
Y/n0H9Onj/PtCJHBbYzMHZGMPlX2IbW+JAeE/0XNIYGHtAtFwlb825Tkg2p7wpa8
|
||||||
|
8HmOBqkTyn2ywDdmPqdfjCiMu/Ge6tkLjqkmYWv2l/d4+qEMR4dUh9g8SrrtUdZg
|
||||||
|
DP7H22B+0knQ7s04JuiJ27hqi4nPOzdwdJNpz5Przgce8vN1ihk8194pR/uoNrjP
|
||||||
|
td3Po+DwmxFKigoKPQCHgQuD63mAFor4vVnht+IkNbB3/lQyXP6Qv7DnWVW9WDBL
|
||||||
|
nKxgXhRwyy5mYebYmwA//JX41O/Kdp1Q6oWgv4zSLd8M9FIMtESG8k4gSl0XfUBa
|
||||||
|
Y24p0Q==
|
||||||
|
-----END CERTIFICATE-----
|
@ -0,0 +1,27 @@
|
|||||||
|
-----BEGIN RSA PRIVATE KEY-----
|
||||||
|
MIIEpAIBAAKCAQEAq0Js0n9o8QyRkUS0wpGiY6H02B46/SNgVjmLbd9eCG1OB1e7
|
||||||
|
3/8fce7Pn8oMn5cRSnYNmAphDZQdVH5yqPawWMKWV8BWvOa/0UVHQJClDGopzEKF
|
||||||
|
59XCwyHpeyoX4bNurMU7HrttPf1dvn6ykCYMdy0WXeeubIDXAzp745dyS0LRyEgt
|
||||||
|
435QTKrld6eDr8GlWcjZRpgnU6l+5St9913Lm3Sh64oZn/EbaD2wvoYwYF5WyU6Y
|
||||||
|
uDgIE7CreSdQag5MVhKMdXGtT+nhiBSi0hS9vgYGxepuIjszY1qQi1NHqbF9xiIq
|
||||||
|
TF+Np1CWm0XIa5wjziYxjHy87ejpMUt9xNipnwIDAQABAoIBAFHCIV+QkdHZ9TiL
|
||||||
|
u1vT2NmFvPTb4b9tfxVK3YRziVmujPy2Zqu2CRYEMzyOYd5iaU/J8g1ujwzDdAkd
|
||||||
|
YLHHK0MEim+UFBSUeGh4kV6CbzjxCclIzNJz20n6y5MP8ly+o4x5kBLI2YsphPJn
|
||||||
|
W+mzMGpIrQ/hhgSosX0KE5EAgQDqOfJSlhZvSgSO5UF9nXvEn7Y9Zc8GK0XQdcwB
|
||||||
|
Pr8iFhuhEJmmb4LrCm+3Me/fhLxFjUAOAcLSkFnqfxo2vAuRqk99OOLxFEfPYZB8
|
||||||
|
kLkKlQ+PwhkG3pjPg6w/rOmBHqW/ZEpd87972JWeHscXYpb/cLLVmcJbZI/claos
|
||||||
|
YOHS7CECgYEA4XKo7GzuqSkLskfaZM2pyNhHbxphqyNfk8GmW5NJnKavpmY8YiXh
|
||||||
|
7hNXXf4HCkcHvHMn4JUCHgHVavDNhNnrHNrQAzO3KwuUrrFiBP+yP1tRyQ4BP395
|
||||||
|
KIBSUyeEOo9vM7d3yerI8WHboio5gaoqEfeNS1dakZ6ZiOpoP94CIxECgYEAwnfW
|
||||||
|
Drdcqkpj794gYDlXH4D279f7+qmq11eI4C0zkZzTFkExl8BGfqpy49kruaTm0e4t
|
||||||
|
L1B23TYfKC0ei4BQskyNCHUnl/eic/JHe9gJRd6BAZi2REfV0LI4ytYGgniCu50H
|
||||||
|
EJVvTVMXS/+wWcjZr037oV6/WiB9Wzr7Z1oFoa8CgYBlmqdG5lEpK7Z5wqhKheXe
|
||||||
|
/pozGFCsMGUC0mOHIfoq/3RqKelM0oXgJhdZ5QKHPzvdUojGTmGF5I2qhJwbI5sy
|
||||||
|
her5hnUmkTGRCaCDYDmVFDLnycgGNg0Ek9CGaWjOe5ZCWI1EEuw83T1++Eiyh14u
|
||||||
|
esLTEatftXq8megh4IxWAQKBgQCTNfox27ZnJrcuXn0tulpse8jy2RJjt0qfhyET
|
||||||
|
asRN52SXxTRQhvoWattcBgsmlmEw69cCqSvB23WMiVNFERaFUpO0olMdpBUzJmXc
|
||||||
|
pzal0IDh/4OCfsqqGDALxCbbX3S/p2gwsp617z+EhYMvBG9dWHAywTGjfVLH3Ady
|
||||||
|
PmBi+wKBgQCWJS/PmTpyO8LU4MYZk91mJmjHAsPlgi/9n8yEqdmins+X698IsoCr
|
||||||
|
s2FN8rol8+UP8c3m9o4kp62ouoby2QzAZw0y3UGWcxOb3ZpoozatKodsoETSLLoL
|
||||||
|
T//wVn2Z2MsS9tLOBLZzsZiYlHyYxTUm7UTOdxdjbSLWVdLbCpKEhg==
|
||||||
|
-----END RSA PRIVATE KEY-----
|
@ -0,0 +1 @@
|
|||||||
|
# This is a puppet managed config file
|
@ -0,0 +1,61 @@
|
|||||||
|
%if ! 0%{?gobuild:1}
|
||||||
|
%define gobuild(o:) go build -ldflags "${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n')" -a -v -x %{?**};
|
||||||
|
%endif
|
||||||
|
|
||||||
|
Name: registry-token-server
|
||||||
|
Version: 1.0.0
|
||||||
|
Release: 1%{?_tis_dist}.%{tis_patch_ver}
|
||||||
|
Summary: Token server for use with Docker registry with Openstack Keystone back end
|
||||||
|
License: ASL 2.0
|
||||||
|
Source0: registry-token-server-%{version}.tar.gz
|
||||||
|
Source1: %{name}.service
|
||||||
|
Source2: token_server.conf
|
||||||
|
|
||||||
|
BuildRequires: systemd
|
||||||
|
Requires(post): systemd
|
||||||
|
Requires(preun): systemd
|
||||||
|
Requires(postun): systemd
|
||||||
|
|
||||||
|
BuildRequires: golang >= 1.6
|
||||||
|
BuildRequires: golang-dep
|
||||||
|
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 %{arm}}
|
||||||
|
|
||||||
|
%description
|
||||||
|
%{summary}
|
||||||
|
|
||||||
|
%prep
|
||||||
|
%setup -q -n registry-token-server-%{version}
|
||||||
|
|
||||||
|
%build
|
||||||
|
mkdir -p ./_build/src/
|
||||||
|
ln -s $(pwd) ./_build/src/registry-token-server
|
||||||
|
export GOPATH=$(pwd)/_build:%{gopath}
|
||||||
|
|
||||||
|
cd ./_build/src/registry-token-server
|
||||||
|
dep ensure
|
||||||
|
%gobuild -o bin/registry-token-server registry-token-server
|
||||||
|
|
||||||
|
%install
|
||||||
|
install -d -p %{buildroot}%{_bindir}
|
||||||
|
install -p -m 0755 bin/registry-token-server %{buildroot}%{_bindir}
|
||||||
|
|
||||||
|
# install systemd/init scripts
|
||||||
|
install -d %{buildroot}%{_unitdir}
|
||||||
|
install -p -m 644 %{SOURCE1} %{buildroot}%{_unitdir}
|
||||||
|
|
||||||
|
# install directory to install default certificate
|
||||||
|
install -d -p %{buildroot}%{_sysconfdir}/ssl/private
|
||||||
|
|
||||||
|
# install environment variables file for service file
|
||||||
|
install -d -p %{buildroot}%{_sysconfdir}/%{name}/registry
|
||||||
|
install -p -m 644 %{SOURCE2} %{buildroot}%{_sysconfdir}/%{name}/registry
|
||||||
|
|
||||||
|
#define license tag if not already defined
|
||||||
|
%{!?_licensedir:%global license %doc}
|
||||||
|
|
||||||
|
%files
|
||||||
|
%doc LICENSE
|
||||||
|
|
||||||
|
%{_bindir}/registry-token-server
|
||||||
|
%{_unitdir}/%{name}.service
|
||||||
|
%{_sysconfdir}/%{name}/registry/token_server.conf
|
85
kubernetes/registry-token-server/src/Gopkg.lock
generated
Normal file
85
kubernetes/registry-token-server/src/Gopkg.lock
generated
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:9c6c19030ff2899d13cfc7a4dc14ff3f2772395d848aa67580f96e6a58aa405f"
|
||||||
|
name = "github.com/Sirupsen/logrus"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "55eb11d21d2a31a3cc93838241d04800f52e823d"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:e894100bb5cc3b952965c9e7d160ea09cc31469a06d7d4bf5326184a87f5c726"
|
||||||
|
name = "github.com/docker/distribution"
|
||||||
|
packages = [
|
||||||
|
"context",
|
||||||
|
"registry/api/errcode",
|
||||||
|
"registry/auth",
|
||||||
|
"registry/auth/token",
|
||||||
|
"uuid",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
|
||||||
|
version = "v2.6.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:0e229970bd76d6cdef6558f51ae493931485fb086d513bc4e3b80003bcf81f39"
|
||||||
|
name = "github.com/docker/libtrust"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "fa567046d9b14f6aa788882a950d69651d230b21"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:f78f704db2735dd21b8050b305ceb4c98b2d50518730a83fdbead6702a57710e"
|
||||||
|
name = "github.com/gophercloud/gophercloud"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"openstack",
|
||||||
|
"openstack/identity/v2/tenants",
|
||||||
|
"openstack/identity/v2/tokens",
|
||||||
|
"openstack/identity/v3/tokens",
|
||||||
|
"openstack/utils",
|
||||||
|
"pagination",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "aa00757ee3ab58e53520b6cb910ca0543116400a"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:c79fb010be38a59d657c48c6ba1d003a8aa651fa56b579d959d74573b7dff8e1"
|
||||||
|
name = "github.com/gorilla/context"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42"
|
||||||
|
version = "v1.1.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:c661dee65a46d437daf269e5f5f462bd9df6d8e7c9750ad1655fb2cafdb177a6"
|
||||||
|
name = "github.com/gorilla/mux"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "e444e69cbd2e2e3e0749a2f3c717cec491552bbf"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:76ee51c3f468493aff39dbacc401e8831fbb765104cbf613b89bef01cf4bad70"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
packages = ["context"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "03003ca0c849e57b6ea29a4bab8d3cb6e4d568fe"
|
||||||
|
|
||||||
|
[solve-meta]
|
||||||
|
analyzer-name = "dep"
|
||||||
|
analyzer-version = 1
|
||||||
|
input-imports = [
|
||||||
|
"github.com/Sirupsen/logrus",
|
||||||
|
"github.com/docker/distribution/context",
|
||||||
|
"github.com/docker/distribution/registry/api/errcode",
|
||||||
|
"github.com/docker/distribution/registry/auth",
|
||||||
|
"github.com/docker/distribution/registry/auth/token",
|
||||||
|
"github.com/docker/libtrust",
|
||||||
|
"github.com/gophercloud/gophercloud",
|
||||||
|
"github.com/gophercloud/gophercloud/openstack",
|
||||||
|
"github.com/gorilla/mux",
|
||||||
|
]
|
||||||
|
solver-name = "gps-cdcl"
|
||||||
|
solver-version = 1
|
50
kubernetes/registry-token-server/src/Gopkg.toml
Normal file
50
kubernetes/registry-token-server/src/Gopkg.toml
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
# Gopkg.toml example
|
||||||
|
#
|
||||||
|
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
|
||||||
|
# for detailed Gopkg.toml documentation.
|
||||||
|
#
|
||||||
|
# required = ["github.com/user/thing/cmd/thing"]
|
||||||
|
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project"
|
||||||
|
# version = "1.0.0"
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project2"
|
||||||
|
# branch = "dev"
|
||||||
|
# source = "github.com/myfork/project2"
|
||||||
|
#
|
||||||
|
# [[override]]
|
||||||
|
# name = "github.com/x/y"
|
||||||
|
# version = "2.4.0"
|
||||||
|
#
|
||||||
|
# [prune]
|
||||||
|
# non-go = false
|
||||||
|
# go-tests = true
|
||||||
|
# unused-packages = true
|
||||||
|
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/Sirupsen/logrus"
|
||||||
|
revision = "55eb11d21d2a31a3cc93838241d04800f52e823d"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/docker/distribution"
|
||||||
|
version = "2.6.2"
|
||||||
|
|
||||||
|
[prune]
|
||||||
|
go-tests = true
|
||||||
|
unused-packages = true
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/docker/libtrust"
|
||||||
|
revision = "fa567046d9b14f6aa788882a950d69651d230b21"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/gorilla/mux"
|
||||||
|
revision = "e444e69cbd2e2e3e0749a2f3c717cec491552bbf"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/gophercloud/gophercloud"
|
||||||
|
revision = "aa00757ee3ab58e53520b6cb910ca0543116400a"
|
202
kubernetes/registry-token-server/src/LICENSE
Normal file
202
kubernetes/registry-token-server/src/LICENSE
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
43
kubernetes/registry-token-server/src/errors.go
Normal file
43
kubernetes/registry-token-server/src/errors.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
// Initial file was taken from https://github.com/docker/distribution 2018 Sept
|
||||||
|
//
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/docker/distribution/registry/api/errcode"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errGroup = "tokenserver"
|
||||||
|
|
||||||
|
// ErrorBadTokenOption is returned when a token parameter is invalid
|
||||||
|
ErrorBadTokenOption = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||||
|
Value: "BAD_TOKEN_OPTION",
|
||||||
|
Message: "bad token option",
|
||||||
|
Description: `This error may be returned when a request for a
|
||||||
|
token contains an option which is not valid`,
|
||||||
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
|
})
|
||||||
|
|
||||||
|
// ErrorMissingRequiredField is returned when a required form field is missing
|
||||||
|
ErrorMissingRequiredField = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||||
|
Value: "MISSING_REQUIRED_FIELD",
|
||||||
|
Message: "missing required field",
|
||||||
|
Description: `This error may be returned when a request for a
|
||||||
|
token does not contain a required form field`,
|
||||||
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
|
})
|
||||||
|
|
||||||
|
// ErrorUnsupportedValue is returned when a form field has an unsupported value
|
||||||
|
ErrorUnsupportedValue = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||||
|
Value: "UNSUPPORTED_VALUE",
|
||||||
|
Message: "unsupported value",
|
||||||
|
Description: `This error may be returned when a request for a
|
||||||
|
token contains a form field with an unsupported value`,
|
||||||
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
|
})
|
||||||
|
)
|
96
kubernetes/registry-token-server/src/keystone/access.go
Normal file
96
kubernetes/registry-token-server/src/keystone/access.go
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
// Initial file was taken from https://github.com/docker/distribution 2018 Sept
|
||||||
|
//
|
||||||
|
// Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
//
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
// Package keystone provides a simple authentication scheme that checks for the
|
||||||
|
// user credential against keystone with configuration-determined endpoint
|
||||||
|
//
|
||||||
|
// This authentication method MUST be used under TLS, as simple token-replay attack is possible.
|
||||||
|
package keystone
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/docker/distribution/context"
|
||||||
|
"github.com/docker/distribution/registry/auth"
|
||||||
|
"github.com/gophercloud/gophercloud"
|
||||||
|
"github.com/gophercloud/gophercloud/openstack"
|
||||||
|
)
|
||||||
|
|
||||||
|
type accessController struct {
|
||||||
|
realm string
|
||||||
|
endpoint string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ auth.AccessController = &accessController{}
|
||||||
|
|
||||||
|
func newAccessController(options map[string]interface{}) (auth.AccessController, error) {
|
||||||
|
realm, present := options["realm"]
|
||||||
|
if _, ok := realm.(string); !present || !ok {
|
||||||
|
return nil, fmt.Errorf(`"realm" must be set for keystone access controller`)
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoint, present := options["endpoint"]
|
||||||
|
if _, ok := endpoint.(string); !present || !ok {
|
||||||
|
return nil, fmt.Errorf(`"endpoint" must be set for keystone access controller`)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &accessController{realm: realm.(string), endpoint: endpoint.(string)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) {
|
||||||
|
req, err := context.GetRequest(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
username, password, ok := req.BasicAuth()
|
||||||
|
if !ok {
|
||||||
|
return nil, &challenge{
|
||||||
|
realm: ac.realm,
|
||||||
|
err: auth.ErrInvalidCredential,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := gophercloud.AuthOptions{
|
||||||
|
IdentityEndpoint: ac.endpoint,
|
||||||
|
Username: username,
|
||||||
|
Password: password,
|
||||||
|
DomainID: "default",
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := openstack.AuthenticatedClient(opts); err != nil {
|
||||||
|
context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err)
|
||||||
|
return nil, &challenge{
|
||||||
|
realm: ac.realm,
|
||||||
|
err: auth.ErrAuthenticationFailure,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// challenge implements the auth.Challenge interface.
|
||||||
|
type challenge struct {
|
||||||
|
realm string
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ auth.Challenge = challenge{}
|
||||||
|
|
||||||
|
// SetHeaders sets the basic challenge header on the response.
|
||||||
|
func (ch challenge) SetHeaders(w http.ResponseWriter) {
|
||||||
|
w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ch challenge) Error() string {
|
||||||
|
return fmt.Sprintf("basic authentication challenge for realm %q: %s", ch.realm, ch.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
auth.Register("keystone", auth.InitFunc(newAccessController))
|
||||||
|
}
|
||||||
|
|
435
kubernetes/registry-token-server/src/main.go
Normal file
435
kubernetes/registry-token-server/src/main.go
Normal file
@ -0,0 +1,435 @@
|
|||||||
|
// Initial file was taken from https://github.com/docker/distribution 2018 Sept
|
||||||
|
//
|
||||||
|
// Copyright (c) 2018 Wind River Systems, Inc.
|
||||||
|
//
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/distribution/context"
|
||||||
|
"github.com/docker/distribution/registry/api/errcode"
|
||||||
|
"github.com/docker/distribution/registry/auth"
|
||||||
|
_ "registry-token-server/keystone"
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
enforceRepoClass bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var (
|
||||||
|
issuer = &TokenIssuer{}
|
||||||
|
pkFile string
|
||||||
|
addr string
|
||||||
|
debug bool
|
||||||
|
err error
|
||||||
|
|
||||||
|
keystoneEndpoint string
|
||||||
|
realm string
|
||||||
|
|
||||||
|
cert string
|
||||||
|
certKey string
|
||||||
|
)
|
||||||
|
|
||||||
|
flag.StringVar(&issuer.Issuer, "issuer", "distribution-token-server", "Issuer string for token")
|
||||||
|
flag.StringVar(&pkFile, "key", "", "Private key file")
|
||||||
|
flag.StringVar(&addr, "addr", "localhost:8080", "Address to listen on")
|
||||||
|
flag.BoolVar(&debug, "debug", false, "Debug mode")
|
||||||
|
|
||||||
|
flag.StringVar(&keystoneEndpoint, "endpoint", "", "Passwd file")
|
||||||
|
flag.StringVar(&realm, "realm", "", "Authentication realm")
|
||||||
|
|
||||||
|
flag.StringVar(&cert, "tlscert", "", "Certificate file for TLS")
|
||||||
|
flag.StringVar(&certKey, "tlskey", "", "Certificate key for TLS")
|
||||||
|
|
||||||
|
flag.BoolVar(&enforceRepoClass, "enforce-class", false, "Enforce policy for single repository class")
|
||||||
|
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if debug {
|
||||||
|
logrus.SetLevel(logrus.DebugLevel)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pkFile == "" {
|
||||||
|
issuer.SigningKey, err = libtrust.GenerateECP256PrivateKey()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Fatalf("Error generating private key: %v", err)
|
||||||
|
}
|
||||||
|
logrus.Debugf("Using newly generated key with id %s", issuer.SigningKey.KeyID())
|
||||||
|
} else {
|
||||||
|
issuer.SigningKey, err = libtrust.LoadKeyFile(pkFile)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Fatalf("Error loading key file %s: %v", pkFile, err)
|
||||||
|
}
|
||||||
|
logrus.Debugf("Loaded private key with id %s", issuer.SigningKey.KeyID())
|
||||||
|
}
|
||||||
|
|
||||||
|
if realm == "" {
|
||||||
|
logrus.Fatalf("Must provide realm")
|
||||||
|
}
|
||||||
|
|
||||||
|
ac, err := auth.GetAccessController("keystone", map[string]interface{}{
|
||||||
|
"realm": realm,
|
||||||
|
"endpoint": keystoneEndpoint,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logrus.Fatalf("Error initializing access controller: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Make configurable
|
||||||
|
issuer.Expiration = 15 * time.Minute
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
ts := &tokenServer{
|
||||||
|
issuer: issuer,
|
||||||
|
accessController: ac,
|
||||||
|
refreshCache: map[string]refreshToken{},
|
||||||
|
}
|
||||||
|
|
||||||
|
router := mux.NewRouter()
|
||||||
|
router.Path("/token/").Methods("GET").Handler(handlerWithContext(ctx, ts.getToken))
|
||||||
|
router.Path("/token/").Methods("POST").Handler(handlerWithContext(ctx, ts.postToken))
|
||||||
|
|
||||||
|
if cert == "" {
|
||||||
|
err = http.ListenAndServe(addr, router)
|
||||||
|
} else if certKey == "" {
|
||||||
|
logrus.Fatalf("Must provide certficate (-tlscert) and key (-tlskey)")
|
||||||
|
} else {
|
||||||
|
err = http.ListenAndServeTLS(addr, cert, certKey, router)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
logrus.Infof("Error serving: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// handlerWithContext wraps the given context-aware handler by setting up the
|
||||||
|
// request context from a base context.
|
||||||
|
func handlerWithContext(ctx context.Context, handler func(context.Context, http.ResponseWriter, *http.Request)) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := context.WithRequest(ctx, r)
|
||||||
|
logger := context.GetRequestLogger(ctx)
|
||||||
|
ctx = context.WithLogger(ctx, logger)
|
||||||
|
|
||||||
|
handler(ctx, w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleError(ctx context.Context, err error, w http.ResponseWriter) {
|
||||||
|
ctx, w = context.WithResponseWriter(ctx, w)
|
||||||
|
|
||||||
|
if serveErr := errcode.ServeJSON(w, err); serveErr != nil {
|
||||||
|
context.GetResponseLogger(ctx).Errorf("error sending error response: %v", serveErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
context.GetResponseLogger(ctx).Info("application error")
|
||||||
|
}
|
||||||
|
|
||||||
|
var refreshCharacters = []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||||
|
|
||||||
|
const refreshTokenLength = 15
|
||||||
|
|
||||||
|
func newRefreshToken() string {
|
||||||
|
s := make([]rune, refreshTokenLength)
|
||||||
|
for i := range s {
|
||||||
|
s[i] = refreshCharacters[rand.Intn(len(refreshCharacters))]
|
||||||
|
}
|
||||||
|
return string(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
type refreshToken struct {
|
||||||
|
subject string
|
||||||
|
service string
|
||||||
|
}
|
||||||
|
|
||||||
|
type tokenServer struct {
|
||||||
|
issuer *TokenIssuer
|
||||||
|
accessController auth.AccessController
|
||||||
|
refreshCache map[string]refreshToken
|
||||||
|
}
|
||||||
|
|
||||||
|
type tokenResponse struct {
|
||||||
|
Token string `json:"access_token"`
|
||||||
|
RefreshToken string `json:"refresh_token,omitempty"`
|
||||||
|
ExpiresIn int `json:"expires_in,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var repositoryClassCache = map[string]string{}
|
||||||
|
|
||||||
|
func filterAccessList(ctx context.Context, scope string, requestedAccessList []auth.Access) []auth.Access {
|
||||||
|
if !strings.HasSuffix(scope, "/") {
|
||||||
|
scope = scope + "/"
|
||||||
|
}
|
||||||
|
grantedAccessList := make([]auth.Access, 0, len(requestedAccessList))
|
||||||
|
for _, access := range requestedAccessList {
|
||||||
|
if access.Type == "repository" {
|
||||||
|
// filter access to repos if the user is not "admin"
|
||||||
|
// need to have a "/" at the end because it adds one at the beginning of the fcn
|
||||||
|
// probably to prevent people making accounts like "adminnot" to steal admin powers
|
||||||
|
if !strings.HasPrefix(access.Name, scope) && scope != "admin/" {
|
||||||
|
context.GetLogger(ctx).Debugf("Resource scope not allowed: %s", access.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if enforceRepoClass {
|
||||||
|
if class, ok := repositoryClassCache[access.Name]; ok {
|
||||||
|
if class != access.Class {
|
||||||
|
context.GetLogger(ctx).Debugf("Different repository class: %q, previously %q", access.Class, class)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if strings.EqualFold(access.Action, "push") {
|
||||||
|
repositoryClassCache[access.Name] = access.Class
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if access.Type == "registry" {
|
||||||
|
if access.Name != "catalog" {
|
||||||
|
context.GetLogger(ctx).Debugf("Unknown registry resource: %s", access.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// TODO: Limit some actions to "admin" users
|
||||||
|
} else {
|
||||||
|
context.GetLogger(ctx).Debugf("Skipping unsupported resource type: %s", access.Type)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
grantedAccessList = append(grantedAccessList, access)
|
||||||
|
}
|
||||||
|
return grantedAccessList
|
||||||
|
}
|
||||||
|
|
||||||
|
type acctSubject struct{}
|
||||||
|
|
||||||
|
func (acctSubject) String() string { return "acctSubject" }
|
||||||
|
|
||||||
|
type requestedAccess struct{}
|
||||||
|
|
||||||
|
func (requestedAccess) String() string { return "requestedAccess" }
|
||||||
|
|
||||||
|
type grantedAccess struct{}
|
||||||
|
|
||||||
|
func (grantedAccess) String() string { return "grantedAccess" }
|
||||||
|
|
||||||
|
// getToken handles authenticating the request and authorizing access to the
|
||||||
|
// requested scopes.
|
||||||
|
func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||||
|
context.GetLogger(ctx).Info("getToken")
|
||||||
|
|
||||||
|
params := r.URL.Query()
|
||||||
|
service := params.Get("service")
|
||||||
|
scopeSpecifiers := params["scope"]
|
||||||
|
var offline bool
|
||||||
|
if offlineStr := params.Get("offline_token"); offlineStr != "" {
|
||||||
|
var err error
|
||||||
|
offline, err = strconv.ParseBool(offlineStr)
|
||||||
|
if err != nil {
|
||||||
|
handleError(ctx, ErrorBadTokenOption.WithDetail(err), w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
requestedAccessList := ResolveScopeSpecifiers(ctx, scopeSpecifiers)
|
||||||
|
|
||||||
|
authorizedCtx, err := ts.accessController.Authorized(ctx, requestedAccessList...)
|
||||||
|
if err != nil {
|
||||||
|
challenge, ok := err.(auth.Challenge)
|
||||||
|
if !ok {
|
||||||
|
handleError(ctx, err, w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get response context.
|
||||||
|
ctx, w = context.WithResponseWriter(ctx, w)
|
||||||
|
|
||||||
|
challenge.SetHeaders(w)
|
||||||
|
handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail(challenge.Error()), w)
|
||||||
|
|
||||||
|
context.GetResponseLogger(ctx).Info("get token authentication challenge")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ctx = authorizedCtx
|
||||||
|
|
||||||
|
username := context.GetStringValue(ctx, "auth.user.name")
|
||||||
|
|
||||||
|
ctx = context.WithValue(ctx, acctSubject{}, username)
|
||||||
|
ctx = context.WithLogger(ctx, context.GetLogger(ctx, acctSubject{}))
|
||||||
|
|
||||||
|
context.GetLogger(ctx).Info("authenticated client")
|
||||||
|
|
||||||
|
ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList)
|
||||||
|
ctx = context.WithLogger(ctx, context.GetLogger(ctx, requestedAccess{}))
|
||||||
|
|
||||||
|
grantedAccessList := filterAccessList(ctx, username, requestedAccessList)
|
||||||
|
ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList)
|
||||||
|
ctx = context.WithLogger(ctx, context.GetLogger(ctx, grantedAccess{}))
|
||||||
|
|
||||||
|
token, err := ts.issuer.CreateJWT(username, service, grantedAccessList)
|
||||||
|
if err != nil {
|
||||||
|
handleError(ctx, err, w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
context.GetLogger(ctx).Info("authorized client")
|
||||||
|
|
||||||
|
response := tokenResponse{
|
||||||
|
Token: token,
|
||||||
|
ExpiresIn: int(ts.issuer.Expiration.Seconds()),
|
||||||
|
}
|
||||||
|
|
||||||
|
if offline {
|
||||||
|
response.RefreshToken = newRefreshToken()
|
||||||
|
ts.refreshCache[response.RefreshToken] = refreshToken{
|
||||||
|
subject: username,
|
||||||
|
service: service,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, w = context.WithResponseWriter(ctx, w)
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(response)
|
||||||
|
|
||||||
|
context.GetResponseLogger(ctx).Info("get token complete")
|
||||||
|
}
|
||||||
|
|
||||||
|
type postTokenResponse struct {
|
||||||
|
Token string `json:"access_token"`
|
||||||
|
Scope string `json:"scope,omitempty"`
|
||||||
|
ExpiresIn int `json:"expires_in,omitempty"`
|
||||||
|
IssuedAt string `json:"issued_at,omitempty"`
|
||||||
|
RefreshToken string `json:"refresh_token,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// postToken handles authenticating the request and authorizing access to the
|
||||||
|
// requested scopes.
|
||||||
|
func (ts *tokenServer) postToken(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||||
|
grantType := r.PostFormValue("grant_type")
|
||||||
|
if grantType == "" {
|
||||||
|
handleError(ctx, ErrorMissingRequiredField.WithDetail("missing grant_type value"), w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
service := r.PostFormValue("service")
|
||||||
|
if service == "" {
|
||||||
|
handleError(ctx, ErrorMissingRequiredField.WithDetail("missing service value"), w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
clientID := r.PostFormValue("client_id")
|
||||||
|
if clientID == "" {
|
||||||
|
handleError(ctx, ErrorMissingRequiredField.WithDetail("missing client_id value"), w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var offline bool
|
||||||
|
switch r.PostFormValue("access_type") {
|
||||||
|
case "", "online":
|
||||||
|
case "offline":
|
||||||
|
offline = true
|
||||||
|
default:
|
||||||
|
handleError(ctx, ErrorUnsupportedValue.WithDetail("unknown access_type value"), w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
requestedAccessList := ResolveScopeList(ctx, r.PostFormValue("scope"))
|
||||||
|
|
||||||
|
var subject string
|
||||||
|
var rToken string
|
||||||
|
switch grantType {
|
||||||
|
case "refresh_token":
|
||||||
|
rToken = r.PostFormValue("refresh_token")
|
||||||
|
if rToken == "" {
|
||||||
|
handleError(ctx, ErrorUnsupportedValue.WithDetail("missing refresh_token value"), w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rt, ok := ts.refreshCache[rToken]
|
||||||
|
if !ok || rt.service != service {
|
||||||
|
handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail("invalid refresh token"), w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
subject = rt.subject
|
||||||
|
case "password":
|
||||||
|
ca, ok := ts.accessController.(auth.CredentialAuthenticator)
|
||||||
|
if !ok {
|
||||||
|
handleError(ctx, ErrorUnsupportedValue.WithDetail("password grant type not supported"), w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
subject = r.PostFormValue("username")
|
||||||
|
if subject == "" {
|
||||||
|
handleError(ctx, ErrorUnsupportedValue.WithDetail("missing username value"), w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
password := r.PostFormValue("password")
|
||||||
|
if password == "" {
|
||||||
|
handleError(ctx, ErrorUnsupportedValue.WithDetail("missing password value"), w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := ca.AuthenticateUser(subject, password); err != nil {
|
||||||
|
handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail("invalid credentials"), w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
handleError(ctx, ErrorUnsupportedValue.WithDetail("unknown grant_type value"), w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = context.WithValue(ctx, acctSubject{}, subject)
|
||||||
|
ctx = context.WithLogger(ctx, context.GetLogger(ctx, acctSubject{}))
|
||||||
|
|
||||||
|
context.GetLogger(ctx).Info("authenticated client")
|
||||||
|
|
||||||
|
ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList)
|
||||||
|
ctx = context.WithLogger(ctx, context.GetLogger(ctx, requestedAccess{}))
|
||||||
|
|
||||||
|
grantedAccessList := filterAccessList(ctx, subject, requestedAccessList)
|
||||||
|
ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList)
|
||||||
|
ctx = context.WithLogger(ctx, context.GetLogger(ctx, grantedAccess{}))
|
||||||
|
|
||||||
|
token, err := ts.issuer.CreateJWT(subject, service, grantedAccessList)
|
||||||
|
if err != nil {
|
||||||
|
handleError(ctx, err, w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
context.GetLogger(ctx).Info("authorized client")
|
||||||
|
|
||||||
|
response := postTokenResponse{
|
||||||
|
Token: token,
|
||||||
|
ExpiresIn: int(ts.issuer.Expiration.Seconds()),
|
||||||
|
IssuedAt: time.Now().UTC().Format(time.RFC3339),
|
||||||
|
Scope: ToScopeList(grantedAccessList),
|
||||||
|
}
|
||||||
|
|
||||||
|
if offline {
|
||||||
|
rToken = newRefreshToken()
|
||||||
|
ts.refreshCache[rToken] = refreshToken{
|
||||||
|
subject: subject,
|
||||||
|
service: service,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rToken != "" {
|
||||||
|
response.RefreshToken = rToken
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, w = context.WithResponseWriter(ctx, w)
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(response)
|
||||||
|
|
||||||
|
context.GetResponseLogger(ctx).Info("post token complete")
|
||||||
|
}
|
224
kubernetes/registry-token-server/src/token.go
Normal file
224
kubernetes/registry-token-server/src/token.go
Normal file
@ -0,0 +1,224 @@
|
|||||||
|
// Initial file was taken from https://github.com/docker/distribution 2018 Sept
|
||||||
|
//
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/distribution/context"
|
||||||
|
"github.com/docker/distribution/registry/auth"
|
||||||
|
"github.com/docker/distribution/registry/auth/token"
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResolveScopeSpecifiers converts a list of scope specifiers from a token
|
||||||
|
// request's `scope` query parameters into a list of standard access objects.
|
||||||
|
func ResolveScopeSpecifiers(ctx context.Context, scopeSpecs []string) []auth.Access {
|
||||||
|
requestedAccessSet := make(map[auth.Access]struct{}, 2*len(scopeSpecs))
|
||||||
|
|
||||||
|
for _, scopeSpecifier := range scopeSpecs {
|
||||||
|
// There should be 3 parts, separated by a `:` character.
|
||||||
|
parts := strings.SplitN(scopeSpecifier, ":", 3)
|
||||||
|
|
||||||
|
if len(parts) != 3 {
|
||||||
|
context.GetLogger(ctx).Infof("ignoring unsupported scope format %s", scopeSpecifier)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
resourceType, resourceName, actions := parts[0], parts[1], parts[2]
|
||||||
|
|
||||||
|
resourceType, resourceClass := splitResourceClass(resourceType)
|
||||||
|
if resourceType == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Actions should be a comma-separated list of actions.
|
||||||
|
for _, action := range strings.Split(actions, ",") {
|
||||||
|
requestedAccess := auth.Access{
|
||||||
|
Resource: auth.Resource{
|
||||||
|
Type: resourceType,
|
||||||
|
Class: resourceClass,
|
||||||
|
Name: resourceName,
|
||||||
|
},
|
||||||
|
Action: action,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add this access to the requested access set.
|
||||||
|
requestedAccessSet[requestedAccess] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
requestedAccessList := make([]auth.Access, 0, len(requestedAccessSet))
|
||||||
|
for requestedAccess := range requestedAccessSet {
|
||||||
|
requestedAccessList = append(requestedAccessList, requestedAccess)
|
||||||
|
}
|
||||||
|
|
||||||
|
return requestedAccessList
|
||||||
|
}
|
||||||
|
|
||||||
|
var typeRegexp = regexp.MustCompile(`^([a-z0-9]+)(\([a-z0-9]+\))?$`)
|
||||||
|
|
||||||
|
func splitResourceClass(t string) (string, string) {
|
||||||
|
matches := typeRegexp.FindStringSubmatch(t)
|
||||||
|
if len(matches) < 2 {
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
if len(matches) == 2 || len(matches[2]) < 2 {
|
||||||
|
return matches[1], ""
|
||||||
|
}
|
||||||
|
return matches[1], matches[2][1 : len(matches[2])-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolveScopeList converts a scope list from a token request's
|
||||||
|
// `scope` parameter into a list of standard access objects.
|
||||||
|
func ResolveScopeList(ctx context.Context, scopeList string) []auth.Access {
|
||||||
|
scopes := strings.Split(scopeList, " ")
|
||||||
|
return ResolveScopeSpecifiers(ctx, scopes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func scopeString(a auth.Access) string {
|
||||||
|
if a.Class != "" {
|
||||||
|
return fmt.Sprintf("%s(%s):%s:%s", a.Type, a.Class, a.Name, a.Action)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%s:%s", a.Type, a.Name, a.Action)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToScopeList converts a list of access to a
|
||||||
|
// scope list string
|
||||||
|
func ToScopeList(access []auth.Access) string {
|
||||||
|
var s []string
|
||||||
|
for _, a := range access {
|
||||||
|
s = append(s, scopeString(a))
|
||||||
|
}
|
||||||
|
return strings.Join(s, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenIssuer represents an issuer capable of generating JWT tokens
|
||||||
|
type TokenIssuer struct {
|
||||||
|
Issuer string
|
||||||
|
SigningKey libtrust.PrivateKey
|
||||||
|
Expiration time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateJWT creates and signs a JSON Web Token for the given subject and
|
||||||
|
// audience with the granted access.
|
||||||
|
func (issuer *TokenIssuer) CreateJWT(subject string, audience string, grantedAccessList []auth.Access) (string, error) {
|
||||||
|
// Make a set of access entries to put in the token's claimset.
|
||||||
|
resourceActionSets := make(map[auth.Resource]map[string]struct{}, len(grantedAccessList))
|
||||||
|
for _, access := range grantedAccessList {
|
||||||
|
actionSet, exists := resourceActionSets[access.Resource]
|
||||||
|
if !exists {
|
||||||
|
actionSet = map[string]struct{}{}
|
||||||
|
resourceActionSets[access.Resource] = actionSet
|
||||||
|
}
|
||||||
|
actionSet[access.Action] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
accessEntries := make([]*token.ResourceActions, 0, len(resourceActionSets))
|
||||||
|
for resource, actionSet := range resourceActionSets {
|
||||||
|
actions := make([]string, 0, len(actionSet))
|
||||||
|
for action := range actionSet {
|
||||||
|
actions = append(actions, action)
|
||||||
|
}
|
||||||
|
|
||||||
|
accessEntries = append(accessEntries, &token.ResourceActions{
|
||||||
|
Type: resource.Type,
|
||||||
|
Class: resource.Class,
|
||||||
|
Name: resource.Name,
|
||||||
|
Actions: actions,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
randomBytes := make([]byte, 15)
|
||||||
|
_, err := io.ReadFull(rand.Reader, randomBytes)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
randomID := base64.URLEncoding.EncodeToString(randomBytes)
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
signingHash := crypto.SHA256
|
||||||
|
var alg string
|
||||||
|
switch issuer.SigningKey.KeyType() {
|
||||||
|
case "RSA":
|
||||||
|
alg = "RS256"
|
||||||
|
case "EC":
|
||||||
|
alg = "ES256"
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("unsupported signing key type %q", issuer.SigningKey.KeyType()))
|
||||||
|
}
|
||||||
|
|
||||||
|
joseHeader := token.Header{
|
||||||
|
Type: "JWT",
|
||||||
|
SigningAlg: alg,
|
||||||
|
}
|
||||||
|
|
||||||
|
if x5c := issuer.SigningKey.GetExtendedField("x5c"); x5c != nil {
|
||||||
|
joseHeader.X5c = x5c.([]string)
|
||||||
|
} else {
|
||||||
|
var jwkMessage json.RawMessage
|
||||||
|
jwkMessage, err = issuer.SigningKey.PublicKey().MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
joseHeader.RawJWK = &jwkMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
exp := issuer.Expiration
|
||||||
|
if exp == 0 {
|
||||||
|
exp = 5 * time.Minute
|
||||||
|
}
|
||||||
|
|
||||||
|
claimSet := token.ClaimSet{
|
||||||
|
Issuer: issuer.Issuer,
|
||||||
|
Subject: subject,
|
||||||
|
Audience: audience,
|
||||||
|
Expiration: now.Add(exp).Unix(),
|
||||||
|
NotBefore: now.Unix(),
|
||||||
|
IssuedAt: now.Unix(),
|
||||||
|
JWTID: randomID,
|
||||||
|
|
||||||
|
Access: accessEntries,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
joseHeaderBytes []byte
|
||||||
|
claimSetBytes []byte
|
||||||
|
)
|
||||||
|
|
||||||
|
if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil {
|
||||||
|
return "", fmt.Errorf("unable to encode jose header: %s", err)
|
||||||
|
}
|
||||||
|
if claimSetBytes, err = json.Marshal(claimSet); err != nil {
|
||||||
|
return "", fmt.Errorf("unable to encode claim set: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
encodedJoseHeader := joseBase64Encode(joseHeaderBytes)
|
||||||
|
encodedClaimSet := joseBase64Encode(claimSetBytes)
|
||||||
|
encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet)
|
||||||
|
|
||||||
|
var signatureBytes []byte
|
||||||
|
if signatureBytes, _, err = issuer.SigningKey.Sign(strings.NewReader(encodingToSign), signingHash); err != nil {
|
||||||
|
return "", fmt.Errorf("unable to sign jwt payload: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
signature := joseBase64Encode(signatureBytes)
|
||||||
|
|
||||||
|
return fmt.Sprintf("%s.%s", encodingToSign, signature), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func joseBase64Encode(data []byte) string {
|
||||||
|
return strings.TrimRight(base64.URLEncoding.EncodeToString(data), "=")
|
||||||
|
}
|
@ -8,6 +8,7 @@ SPDX-License-Identifier: Apache-2.0
|
|||||||
###################
|
###################
|
||||||
# IMPORTS
|
# IMPORTS
|
||||||
###################
|
###################
|
||||||
|
from __future__ import absolute_import
|
||||||
import logging
|
import logging
|
||||||
import logging.handlers
|
import logging.handlers
|
||||||
import time
|
import time
|
||||||
@ -19,7 +20,7 @@ import sys
|
|||||||
|
|
||||||
from daemon import runner
|
from daemon import runner
|
||||||
|
|
||||||
import prepostrotate
|
from logmgmt import prepostrotate
|
||||||
|
|
||||||
###################
|
###################
|
||||||
# CONSTANTS
|
# CONSTANTS
|
||||||
|
@ -16,4 +16,4 @@ COPY_LIST="$PKG_BASE/src/LICENSE \
|
|||||||
$PKG_BASE/src/example.py \
|
$PKG_BASE/src/example.py \
|
||||||
$PKG_BASE/src/example.conf"
|
$PKG_BASE/src/example.conf"
|
||||||
|
|
||||||
TIS_PATCH_VER=4
|
TIS_PATCH_VER=5
|
||||||
|
@ -870,6 +870,9 @@ def _database_setup(database):
|
|||||||
(PLUGIN, database, retention))
|
(PLUGIN, database, retention))
|
||||||
collectd.info("%s influxdb:%s is setup" % (PLUGIN, database))
|
collectd.info("%s influxdb:%s is setup" % (PLUGIN, database))
|
||||||
PluginObject.database_setup = True
|
PluginObject.database_setup = True
|
||||||
|
else:
|
||||||
|
collectd.error("%s influxdb:%s setup %s" %
|
||||||
|
(PLUGIN, database, error_str))
|
||||||
|
|
||||||
|
|
||||||
def _clear_alarm_for_missing_filesystems():
|
def _clear_alarm_for_missing_filesystems():
|
||||||
|
@ -1,16 +1,12 @@
|
|||||||
#<Plugin "ntpq">
|
|
||||||
# Interval 60
|
|
||||||
#</Plugin>
|
|
||||||
|
|
||||||
<Plugin "threshold">
|
<Plugin "threshold">
|
||||||
<Plugin "ntpq">
|
<Plugin "ntpq">
|
||||||
<Type "absolute">
|
<Type "absolute">
|
||||||
Instance "state"
|
Instance "reachable"
|
||||||
Persist true
|
Persist true
|
||||||
PersistOK true
|
PersistOK true
|
||||||
WarningMin 1
|
WarningMin 1
|
||||||
FailureMin 0
|
FailureMin 0
|
||||||
# Hits 2
|
Hits 2
|
||||||
Invert false
|
Invert false
|
||||||
</Type>
|
</Type>
|
||||||
</Plugin>
|
</Plugin>
|
||||||
|
@ -1,9 +1,58 @@
|
|||||||
|
############################################################################
|
||||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
# Copyright (c) 2018-2019 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
#############################################################################
|
||||||
#
|
#
|
||||||
|
# This is the NTP connectivity monitor plugin for collectd.
|
||||||
|
#
|
||||||
|
# This plugin uses the industry standard ntpq exec to query NTP attributes.
|
||||||
|
#
|
||||||
|
# This plugin executes 'ntpq -np' to determined which provisioned servers
|
||||||
|
# are reachable. The ntpq output includes Tally Code. The tally Code is
|
||||||
|
# represented by the first character in each server's line item.
|
||||||
|
#
|
||||||
|
# The only ntpq output looked at by this plugin are the Tally Codes and
|
||||||
|
# associated IPs.
|
||||||
|
#
|
||||||
|
# Tally Code Summary:
|
||||||
|
#
|
||||||
|
# A server is considered reachable only when the Tally Code is a * or a +.
|
||||||
|
# A server is considered unreachable if the Tally Code is a ' ' (space)
|
||||||
|
# A server with a '*' Tally Code is the 'selected' server.
|
||||||
|
#
|
||||||
|
# Here is an example of the ntpq command output
|
||||||
|
#
|
||||||
|
# remote refid st t when poll reach delay offset jitter
|
||||||
|
# =============================================================================
|
||||||
|
# +192.168.204.104 206.108.0.133 2 u 203 1024 377 0.226 -3.443 1.137
|
||||||
|
# +97.107.129.217 200.98.196.212 2 u 904 1024 377 21.677 5.577 0.624
|
||||||
|
# 192.95.27.155 24.150.203.150 2 u 226 1024 377 15.867 0.381 1.124
|
||||||
|
# -97.107.129.217 200.98.196.212 2 u 904 1024 377 21.677 5.577 0.624
|
||||||
|
# *182.95.27.155 24.150.203.150 2 u 226 1024 377 15.867 0.381 1.124
|
||||||
|
#
|
||||||
|
# The local controller node is not to be considered a reachable server and is
|
||||||
|
# never alarmed if it is not reachable.
|
||||||
|
#
|
||||||
|
# Normal running modes with no alarms include
|
||||||
|
#
|
||||||
|
# 0 - All NTP servers are reachable and one is selected
|
||||||
|
# 1 - No NTP servers are provisioned
|
||||||
|
#
|
||||||
|
# Failure modes that warrant alarms include
|
||||||
|
#
|
||||||
|
# 2 - None of the NTP servers are reachable - major alarm
|
||||||
|
# 3 - Some NTP servers reachable and one is selected - server IP minor alarm
|
||||||
|
# 4 - Some NTP servers reachable but none is selected - major alarm
|
||||||
|
#
|
||||||
|
# None of these failures result in a host being degraded.
|
||||||
|
#
|
||||||
|
# This script will only be run on the controller nodes.
|
||||||
|
#
|
||||||
|
# This script logs to daemon.log with the 'collectd' process label
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
@ -16,149 +65,106 @@ import tsconfig.tsconfig as tsc
|
|||||||
api = fm_api.FaultAPIs()
|
api = fm_api.FaultAPIs()
|
||||||
|
|
||||||
PLUGIN = 'NTP query plugin'
|
PLUGIN = 'NTP query plugin'
|
||||||
|
PLUGIN_INTERVAL = 600 # audit interval in secs
|
||||||
PLUGIN_SCRIPT = '/etc/rmonfiles.d/query_ntp_servers.sh'
|
PLUGIN_CONF = '/etc/ntp.conf'
|
||||||
PLUGIN_RESULT = '/tmp/ntpq_server_info'
|
PLUGIN_EXEC = '/usr/sbin/ntpq'
|
||||||
|
PLUGIN_EXEC_OPTIONS = '-pn'
|
||||||
# static variables
|
PLUGIN_ALARMID = "100.114"
|
||||||
ALARM_ID__NTPQ = "100.114"
|
|
||||||
|
|
||||||
|
|
||||||
# define a class here that will persist over read calls
|
# define a class here that will persist over read calls
|
||||||
class NtpqObject:
|
class NtpqObject:
|
||||||
hostname = ''
|
|
||||||
base_eid = ''
|
# static variables set in init
|
||||||
severity = 'clear'
|
hostname = '' # the name of this host
|
||||||
|
base_eid = '' # the eid for the major alarm
|
||||||
|
config_complete = False # set to true once config is complete
|
||||||
|
alarm_raised = False # True when the major alarm is asserted
|
||||||
|
|
||||||
|
server_list_conf = [] # list of servers in the /etc/ntp.conf file
|
||||||
|
server_list_ntpq = [] # list of servers in the ntpq -np output
|
||||||
|
unreachable_servers = [] # list of unreachable servers
|
||||||
|
reachable_servers = [] # list of reachable servers
|
||||||
|
selected_server = 'None' # the ip address of the selected server
|
||||||
|
selected_server_save = 'None' # the last selected server ; note change
|
||||||
|
|
||||||
|
# variables used to raise alarms to FM
|
||||||
suppression = True
|
suppression = True
|
||||||
service_affecting = False
|
service_affecting = False
|
||||||
status = 0
|
|
||||||
last_result = ''
|
|
||||||
this_result = ''
|
|
||||||
id = ALARM_ID__NTPQ
|
|
||||||
name = "NTP"
|
name = "NTP"
|
||||||
alarm_type = fm_constants.FM_ALARM_TYPE_1
|
alarm_type = fm_constants.FM_ALARM_TYPE_1
|
||||||
cause = fm_constants.ALARM_PROBABLE_CAUSE_UNKNOWN
|
cause = fm_constants.ALARM_PROBABLE_CAUSE_UNKNOWN
|
||||||
repair = "Monitor and if condition persists, "
|
repair = "Monitor and if condition persists, "
|
||||||
repair += "contact next level of support."
|
repair += "contact next level of support."
|
||||||
|
|
||||||
|
# This plugin's class object - persists over read calls
|
||||||
obj = NtpqObject()
|
obj = NtpqObject()
|
||||||
|
|
||||||
|
|
||||||
def is_uuid_like(val):
|
###############################################################################
|
||||||
"""Returns validation of a value as a UUID."""
|
#
|
||||||
try:
|
# Name : _add_unreachable_server
|
||||||
return str(uuid.UUID(val)) == val
|
#
|
||||||
except (TypeError, ValueError, AttributeError):
|
# Description: This private interface is used to add an ip to the
|
||||||
return False
|
# unreachable servers list.
|
||||||
|
#
|
||||||
|
# Parameters : IP address
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
def _add_unreachable_server(ip=None):
|
||||||
|
""" Add ip to unreachable_servers list """
|
||||||
|
|
||||||
# The config function - called once on collectd process startup
|
if ip:
|
||||||
def config_func(config):
|
if ip not in obj.unreachable_servers:
|
||||||
"""
|
collectd.debug("%s adding '%s' to unreachable servers list: %s" %
|
||||||
Configure the plugin
|
(PLUGIN, ip, obj.unreachable_servers))
|
||||||
"""
|
|
||||||
|
|
||||||
collectd.debug('%s config function' % PLUGIN)
|
obj.unreachable_servers.append(ip)
|
||||||
return 0
|
|
||||||
|
|
||||||
|
collectd.info("%s added '%s' to unreachable servers list: %s" %
|
||||||
# The init function - called once on collectd process startup
|
(PLUGIN, ip, obj.unreachable_servers))
|
||||||
def init_func():
|
|
||||||
|
|
||||||
# ntp query is for controllers only
|
|
||||||
if tsc.nodetype != 'controller':
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# get current hostname
|
|
||||||
obj.hostname = os.uname()[1]
|
|
||||||
obj.base_eid = 'host=' + obj.hostname + '.ntp'
|
|
||||||
collectd.info("%s on %s with entity id '%s'" % PLUGIN, obj.hostname, obj.base_eid)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
# The sample read function - called on every audit interval
|
|
||||||
def read_func():
|
|
||||||
|
|
||||||
# ntp query is for controllers only
|
|
||||||
if tsc.nodetype != 'controller':
|
|
||||||
return 0
|
|
||||||
|
|
||||||
result = int(0)
|
|
||||||
# Query ntp
|
|
||||||
try:
|
|
||||||
result = os.system(PLUGIN_SCRIPT)
|
|
||||||
except Exception as e:
|
|
||||||
collectd.error("%s Could not run '%s' (%s)" %
|
|
||||||
(PLUGIN, e))
|
|
||||||
return 0
|
|
||||||
|
|
||||||
obj.status = int(result)/0x100
|
|
||||||
|
|
||||||
collectd.info("%s Query Result: %s" % (PLUGIN, obj.status))
|
|
||||||
|
|
||||||
if os.path.exists(PLUGIN_RESULT) is False:
|
|
||||||
collectd.error("%s produced no result file '%s'" %
|
|
||||||
(PLUGIN, PLUGIN_RESULT))
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# read the query result file.
|
|
||||||
# format is in the PLUGIN_SCRIPT file.
|
|
||||||
# This code only wants the second line.
|
|
||||||
# It contains list of unreachable ntp servers that need alarm management.
|
|
||||||
count = 0
|
|
||||||
with open(PLUGIN_RESULT, 'r') as infile:
|
|
||||||
for line in infile:
|
|
||||||
count += 1
|
|
||||||
collectd.info("%s Query Result: %s" % (PLUGIN, line))
|
|
||||||
if count == 0:
|
|
||||||
collectd.error("%s produced empty result file '%s'" %
|
|
||||||
(PLUGIN, PLUGIN_RESULT))
|
|
||||||
return 0
|
|
||||||
|
|
||||||
sample = 1
|
|
||||||
|
|
||||||
# Dispatch usage value to collectd
|
|
||||||
val = collectd.Values(host=obj.hostname)
|
|
||||||
val.plugin = 'ntpq'
|
|
||||||
val.plugin_instance = 'some.ntp.server.ip'
|
|
||||||
val.type = 'absolute'
|
|
||||||
val.type_instance = 'state'
|
|
||||||
val.dispatch(values=[sample])
|
|
||||||
|
|
||||||
severity = 'clear'
|
|
||||||
obj.severity = 'clear'
|
|
||||||
|
|
||||||
# if there is no severity change then consider exiting
|
|
||||||
if obj.severity == severity:
|
|
||||||
|
|
||||||
# unless the current severity is 'minor'
|
|
||||||
if severity == 'minor':
|
|
||||||
# TODO: check to see if the failing IP address is changed
|
|
||||||
collectd.info("%s NEED TO CHECK IP ADDRESSES" % (PLUGIN))
|
|
||||||
else:
|
else:
|
||||||
return 0
|
collectd.debug("%s ip '%s' already in unreachable_servers list" %
|
||||||
|
(PLUGIN, ip))
|
||||||
|
else:
|
||||||
|
collectd.error("%s _add_unreachable_server called with no IP" % PLUGIN)
|
||||||
|
|
||||||
# if current severity is clear but previous severity is not then
|
|
||||||
# prepare to clear the alarms
|
|
||||||
if severity == 'clear':
|
|
||||||
_alarm_state = fm_constants.FM_ALARM_STATE_CLEAR
|
|
||||||
|
|
||||||
# TODO: loop over all raises alarms and clear them
|
###############################################################################
|
||||||
collectd.info("%s NEED CLEAR ALL ALARMS" % (PLUGIN))
|
#
|
||||||
if api.clear_fault(obj.id, obj.base_eid) is False:
|
# Name : _raise_alarm
|
||||||
collectd.error("%s %s:%s clear_fault failed" %
|
#
|
||||||
(PLUGIN, obj.id, obj.base_eid))
|
# Description: This private interface is used to raise NTP alarms.
|
||||||
return 0
|
#
|
||||||
|
# Parameters : Optional IP address
|
||||||
|
#
|
||||||
|
# If called with no or empty IP then a generic major alarm is raised.
|
||||||
|
# If called with an IP then an IP specific minor alarm is raised.
|
||||||
|
#
|
||||||
|
# Returns : Error indication.
|
||||||
|
#
|
||||||
|
# True : is error. FM call failed to set the
|
||||||
|
# alarm and needs to be retried.
|
||||||
|
#
|
||||||
|
# False: no error. FM call succeeds
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
def _raise_alarm(ip=None):
|
||||||
|
""" Assert an NTP alarm """
|
||||||
|
|
||||||
|
if not ip:
|
||||||
|
# Don't re-raise the alarm if its already raised
|
||||||
|
if obj.alarm_raised is True:
|
||||||
|
return False
|
||||||
|
|
||||||
elif severity == 'major':
|
|
||||||
reason = "NTP configuration does not contain any valid "
|
reason = "NTP configuration does not contain any valid "
|
||||||
reason += "or reachable NTP servers."
|
reason += "or reachable NTP servers."
|
||||||
eid = obj.base_eid
|
eid = obj.base_eid
|
||||||
fm_severity = fm_constants.FM_ALARM_SEVERITY_MAJOR
|
fm_severity = fm_constants.FM_ALARM_SEVERITY_MAJOR
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# TODO: There can be up to 3 inacessable servers
|
|
||||||
ip = 'some.server.ip.addr'
|
|
||||||
reason = "NTP address "
|
reason = "NTP address "
|
||||||
reason += ip
|
reason += ip
|
||||||
reason += " is not a valid or a reachable NTP server."
|
reason += " is not a valid or a reachable NTP server."
|
||||||
@ -166,7 +172,7 @@ def read_func():
|
|||||||
fm_severity = fm_constants.FM_ALARM_SEVERITY_MINOR
|
fm_severity = fm_constants.FM_ALARM_SEVERITY_MINOR
|
||||||
|
|
||||||
fault = fm_api.Fault(
|
fault = fm_api.Fault(
|
||||||
alarm_id=obj.id,
|
alarm_id=PLUGIN_ALARMID,
|
||||||
alarm_state=fm_constants.FM_ALARM_STATE_SET,
|
alarm_state=fm_constants.FM_ALARM_STATE_SET,
|
||||||
entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST,
|
entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST,
|
||||||
entity_instance_id=eid,
|
entity_instance_id=eid,
|
||||||
@ -179,12 +185,593 @@ def read_func():
|
|||||||
suppression=obj.suppression)
|
suppression=obj.suppression)
|
||||||
|
|
||||||
alarm_uuid = api.set_fault(fault)
|
alarm_uuid = api.set_fault(fault)
|
||||||
if is_uuid_like(alarm_uuid) is False:
|
if _is_uuid_like(alarm_uuid) is False:
|
||||||
|
|
||||||
|
# Don't _add_unreachable_server list if the fm call failed.
|
||||||
|
# That way it will be retried at a later time.
|
||||||
collectd.error("%s %s:%s set_fault failed:%s" %
|
collectd.error("%s %s:%s set_fault failed:%s" %
|
||||||
(PLUGIN, obj.id, eid, alarm_uuid))
|
(PLUGIN, PLUGIN_ALARMID, eid, alarm_uuid))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
collectd.info("%s raised alarm %s:%s" % (PLUGIN, PLUGIN_ALARMID, eid))
|
||||||
|
if ip:
|
||||||
|
_add_unreachable_server(ip)
|
||||||
|
else:
|
||||||
|
obj.alarm_raised = True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# Name : _clear_base_alarm
|
||||||
|
#
|
||||||
|
# Description: This private interface is used to clear the NTP base alarm.
|
||||||
|
#
|
||||||
|
# Parameters : None
|
||||||
|
#
|
||||||
|
# Returns : Error indication.
|
||||||
|
#
|
||||||
|
# True : is error. FM call failed to clear the
|
||||||
|
# alarm and needs to be retried.
|
||||||
|
#
|
||||||
|
# False: no error. FM call succeeds
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
def _clear_base_alarm():
|
||||||
|
""" Clear the NTP base alarm """
|
||||||
|
|
||||||
|
if api.get_fault(PLUGIN_ALARMID, obj.base_eid) is not None:
|
||||||
|
if api.clear_fault(PLUGIN_ALARMID, obj.base_eid) is False:
|
||||||
|
collectd.error("%s failed to clear alarm %s:%s" %
|
||||||
|
(PLUGIN, PLUGIN_ALARMID, obj.base_eid))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
collectd.info("%s cleared alarm %s:%s" %
|
||||||
|
(PLUGIN, PLUGIN_ALARMID, obj.base_eid))
|
||||||
|
obj.alarm_raised = False
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# Name : _remove_ip_from_unreachable_list
|
||||||
|
#
|
||||||
|
# Description: This private interface is used to remove the specified IP
|
||||||
|
# from the unreachable servers list and clear its alarm if raised.
|
||||||
|
#
|
||||||
|
# Parameters : IP address
|
||||||
|
#
|
||||||
|
# Returns : Error indication.
|
||||||
|
#
|
||||||
|
# True : is error. FM call failed to clear the
|
||||||
|
# alarm and needs to be retried.
|
||||||
|
#
|
||||||
|
# False: no error. FM call succeeds
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
def _remove_ip_from_unreachable_list(ip):
|
||||||
|
"""
|
||||||
|
Remove an IP address from the unreachable list and
|
||||||
|
clear any NTP alarm that might be asserted for it.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# remove from unreachable list if its there
|
||||||
|
if ip and ip in obj.unreachable_servers:
|
||||||
|
eid = obj.base_eid + '=' + ip
|
||||||
|
collectd.debug("%s trying to clear alarm %s" % (PLUGIN, eid))
|
||||||
|
# clear the alarm if its asserted
|
||||||
|
if api.get_fault(PLUGIN_ALARMID, eid) is not None:
|
||||||
|
if api.clear_fault(PLUGIN_ALARMID, eid) is True:
|
||||||
|
collectd.info("%s cleared %s:%s alarm" %
|
||||||
|
(PLUGIN, PLUGIN_ALARMID, eid))
|
||||||
|
obj.unreachable_servers.remove(ip)
|
||||||
|
else:
|
||||||
|
# Handle clear failure by not removing the IP from the list.
|
||||||
|
# It will retry on next audit.
|
||||||
|
# Error should only occur if FM is not running at the time
|
||||||
|
# this get or clear is called
|
||||||
|
collectd.error("%s failed alarm clear %s:%s" %
|
||||||
|
(PLUGIN, PLUGIN_ALARMID, eid))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
obj.unreachable_servers.remove(ip)
|
||||||
|
collectd.info("%s alarm %s not raised" % (PLUGIN, eid))
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# Name : _add_ip_to_ntpq_server_list
|
||||||
|
#
|
||||||
|
# Description: This private interface is used to create a list if servers
|
||||||
|
# found in the ntpq output.
|
||||||
|
#
|
||||||
|
# This list is used to detect and handle servers that might come
|
||||||
|
# and go between readings that might otherwise result in stuck
|
||||||
|
# alarms.
|
||||||
|
#
|
||||||
|
# Parameters : IP address
|
||||||
|
#
|
||||||
|
# Returns : nothing
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
def _add_ip_to_ntpq_server_list(ip):
|
||||||
|
""" Add this IP to the list of servers that ntpq reports against. """
|
||||||
|
|
||||||
|
if ip not in obj.server_list_ntpq:
|
||||||
|
obj.server_list_ntpq.append(ip)
|
||||||
|
|
||||||
|
|
||||||
|
##############################################################################
|
||||||
|
#
|
||||||
|
# Name : _cleanup_stale_servers
|
||||||
|
#
|
||||||
|
# Description: This private interface walks through each server tracking list
|
||||||
|
# removing any that it finds that are not in the ntpq server list.
|
||||||
|
#
|
||||||
|
# Alarms are cleared as needed to avoid stale alarms
|
||||||
|
#
|
||||||
|
# Parameters : None
|
||||||
|
#
|
||||||
|
# Returns : nothing
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
def _cleanup_stale_servers():
|
||||||
|
""" Cleanup the server IP tracking lists """
|
||||||
|
|
||||||
|
collectd.debug("%s CLEANUP REACHABLE: %s %s" %
|
||||||
|
(PLUGIN, obj.server_list_ntpq, obj.reachable_servers))
|
||||||
|
for ip in obj.reachable_servers:
|
||||||
|
if ip not in obj.server_list_ntpq:
|
||||||
|
collectd.info("%s removing missing '%s' server from reachable "
|
||||||
|
"server list" % (PLUGIN, ip))
|
||||||
|
obj.reachable_servers.remove(ip)
|
||||||
|
|
||||||
|
collectd.debug("%s CLEANUP UNREACHABLE: %s %s" %
|
||||||
|
(PLUGIN, obj.server_list_ntpq, obj.unreachable_servers))
|
||||||
|
for ip in obj.unreachable_servers:
|
||||||
|
if ip not in obj.server_list_ntpq:
|
||||||
|
collectd.info("%s removing missing '%s' server from unreachable "
|
||||||
|
"server list" % (PLUGIN, ip))
|
||||||
|
_remove_ip_from_unreachable_list(ip)
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# Name : _get_ntp_servers
|
||||||
|
#
|
||||||
|
# Description: This private interface reads the list of ntp servers from the
|
||||||
|
# ntp.conf file
|
||||||
|
#
|
||||||
|
# Parameters : None
|
||||||
|
#
|
||||||
|
# Returns : nothing
|
||||||
|
#
|
||||||
|
# Updates : server_list_conf
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
def _get_ntp_servers():
|
||||||
|
""" Read the provisioned servers from the ntp conf file """
|
||||||
|
|
||||||
|
with open(PLUGIN_CONF, 'r') as infile:
|
||||||
|
for line in infile:
|
||||||
|
if line.startswith('server '):
|
||||||
|
ip = line.rstrip().split(' ')[1]
|
||||||
|
if ip not in obj.server_list_conf:
|
||||||
|
obj.server_list_conf.append(ip)
|
||||||
|
if len(obj.server_list_conf):
|
||||||
|
collectd.info("%s server list: %s" %
|
||||||
|
(PLUGIN, obj.server_list_conf))
|
||||||
|
else:
|
||||||
|
##################################################################
|
||||||
|
#
|
||||||
|
# Handle NTP_NOT_PROVISIONED (1) case
|
||||||
|
#
|
||||||
|
# There is no alarming for this case.
|
||||||
|
# Clear any that may have been raised.
|
||||||
|
#
|
||||||
|
##################################################################
|
||||||
|
collectd.info("%s No NTP servers are provisioned" % PLUGIN)
|
||||||
|
|
||||||
|
# clear all alarms
|
||||||
|
if obj.alarm_raised:
|
||||||
|
_clear_base_alarm()
|
||||||
|
|
||||||
|
if obj.unreachable_servers:
|
||||||
|
for ip in obj.unreachable_servers:
|
||||||
|
_remove_ip_from_unreachable_list(ip)
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# Name : is_controller
|
||||||
|
#
|
||||||
|
# Description: This private interface returns a True if the specified ip is
|
||||||
|
# associated with a local controller.
|
||||||
|
#
|
||||||
|
# Parameters : IP address
|
||||||
|
#
|
||||||
|
# Returns : True or False
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
def _is_controller(ip):
|
||||||
|
""" Returns True if this IP corresponds to one of the controllers """
|
||||||
|
|
||||||
|
collectd.debug("%s check if '%s' is a controller ip" % (PLUGIN, ip))
|
||||||
|
with open('/etc/hosts', 'r') as infile:
|
||||||
|
for line in infile:
|
||||||
|
# skip over file comment lines prefixed with '#'
|
||||||
|
if line[0] == '#':
|
||||||
|
continue
|
||||||
|
# line format is 'ip' 'name' ....
|
||||||
|
split_line = line.split()
|
||||||
|
if len(split_line) >= 2:
|
||||||
|
# look for exact match ip that contains controller in its name
|
||||||
|
if split_line[0] == ip and 'controller' in line:
|
||||||
|
collectd.debug("%s %s is a controller" % (PLUGIN, ip))
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# Name : is_uuid_like
|
||||||
|
#
|
||||||
|
# Description: This private interface returns a True if the specified value is
|
||||||
|
# a valid uuid.
|
||||||
|
#
|
||||||
|
# Parameters : val is a uuid string
|
||||||
|
#
|
||||||
|
# Returns : True or False
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
def _is_uuid_like(val):
|
||||||
|
"""Returns validation of a value as a UUID."""
|
||||||
|
try:
|
||||||
|
return str(uuid.UUID(val)) == val
|
||||||
|
except (TypeError, ValueError, AttributeError):
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# Name : config_func
|
||||||
|
#
|
||||||
|
# Description: The configuration interface this plugin publishes to collectd.
|
||||||
|
#
|
||||||
|
# collectd calls this interface one time on its process startup
|
||||||
|
# when it loads this plugin.
|
||||||
|
#
|
||||||
|
# There is currently no specific configuration options to parse
|
||||||
|
# for this plugin.
|
||||||
|
#
|
||||||
|
# Parameters : collectd config object
|
||||||
|
#
|
||||||
|
# Returns : zero
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
def config_func(config):
|
||||||
|
""" Configure the plugin """
|
||||||
|
|
||||||
|
collectd.debug('%s config function' % PLUGIN)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# Name : init_func
|
||||||
|
#
|
||||||
|
# Description: The initialization interface this plugin publishes to collectd.
|
||||||
|
#
|
||||||
|
# collectd calls this interface one time on its process startup
|
||||||
|
# when it loads this plugin.
|
||||||
|
#
|
||||||
|
# 1. get hostname
|
||||||
|
# 2. build base entity id for the NTP alarm
|
||||||
|
# 3. query FM for existing NTP alarms
|
||||||
|
# - base alarm is maintained and state loaded if it exists
|
||||||
|
# - ntp ip minor alalrms are cleared on init. This is done to
|
||||||
|
# auto correct ntp server IP address changes over process
|
||||||
|
# restart ; avoid stuck alarms.
|
||||||
|
#
|
||||||
|
# Parameters : None
|
||||||
|
#
|
||||||
|
# Returns : zero
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
def init_func():
|
||||||
|
|
||||||
|
# ntp query is for controllers only
|
||||||
|
if tsc.nodetype != 'controller':
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
# TODO: clear the object alarm state
|
# do nothing till config is complete.
|
||||||
|
# init_func will be called again by read_func once config is complete.
|
||||||
|
if os.path.exists(tsc.VOLATILE_CONTROLLER_CONFIG_COMPLETE) is False:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# get current hostname
|
||||||
|
obj.hostname = os.uname()[1]
|
||||||
|
if not obj.hostname:
|
||||||
|
collectd.error("%s failed to get hostname" % PLUGIN)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
obj.base_eid = 'host=' + obj.hostname + '.ntp'
|
||||||
|
collectd.debug("%s on %s with entity id '%s'" %
|
||||||
|
(PLUGIN, obj.hostname, obj.base_eid))
|
||||||
|
|
||||||
|
# get a list of provisioned ntp servers
|
||||||
|
_get_ntp_servers()
|
||||||
|
|
||||||
|
# manage existing alarms.
|
||||||
|
alarms = api.get_faults_by_id(PLUGIN_ALARMID)
|
||||||
|
if alarms:
|
||||||
|
for alarm in alarms:
|
||||||
|
eid = alarm.entity_instance_id
|
||||||
|
# ignore alarms not for this host
|
||||||
|
if obj.hostname not in eid:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# maintain only the base alarm.
|
||||||
|
if alarm.entity_instance_id != obj.base_eid:
|
||||||
|
# clear any ntp server specific alarms over process restart
|
||||||
|
# this is done to avoid the potential for stuck ntp ip alarms
|
||||||
|
collectd.info("%s clearing found startup alarm '%s'" %
|
||||||
|
(PLUGIN, alarm.entity_instance_id))
|
||||||
|
rc = api.clear_fault(PLUGIN_ALARMID, alarm.entity_instance_id)
|
||||||
|
if rc is False:
|
||||||
|
# if we can't clear the alarm now then lets load it and
|
||||||
|
# manage it like it just happened. When the server starts
|
||||||
|
# responding then the alarm will get cleared at that time.
|
||||||
|
collectd.error("%s failed to clear alarm %s:%s" %
|
||||||
|
(PLUGIN, PLUGIN_ALARMID,
|
||||||
|
alarm.entity_instance_id))
|
||||||
|
|
||||||
|
ip = alarm.entity_instance_id.split('=')[2]
|
||||||
|
if ip and ip not in obj.unreachable_servers:
|
||||||
|
_add_unreachable_server(ip)
|
||||||
|
else:
|
||||||
|
obj.alarm_raised = True
|
||||||
|
collectd.info("%s found alarm %s:%s" %
|
||||||
|
(PLUGIN,
|
||||||
|
PLUGIN_ALARMID,
|
||||||
|
alarm.entity_instance_id))
|
||||||
|
|
||||||
|
# ensure the base alarm is cleared if there are no
|
||||||
|
# provisioned servers.
|
||||||
|
if not obj.server_list_conf:
|
||||||
|
_clear_base_alarm()
|
||||||
|
|
||||||
|
else:
|
||||||
|
collectd.info("%s no major startup alarms found" % PLUGIN)
|
||||||
|
|
||||||
|
obj.config_complete = True
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# Name : read_func
|
||||||
|
#
|
||||||
|
# Description: The sample read interface this plugin publishes to collectd.
|
||||||
|
#
|
||||||
|
# collectd calls this interface every audit interval.
|
||||||
|
#
|
||||||
|
# Runs ntpq -np to query NTP status and manages alarms based on
|
||||||
|
# the result.
|
||||||
|
#
|
||||||
|
# See file header (above) for more specific behavioral detail.
|
||||||
|
#
|
||||||
|
# Should only run on a controller ; both
|
||||||
|
#
|
||||||
|
# Parameters : None
|
||||||
|
#
|
||||||
|
# Returns : zero or non-zero on significant error
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
def read_func():
|
||||||
|
|
||||||
|
# ntp query is for controllers only
|
||||||
|
if tsc.nodetype != 'controller':
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if obj.config_complete is False:
|
||||||
|
if os.path.exists(tsc.VOLATILE_CONTROLLER_CONFIG_COMPLETE) is False:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
collectd.info("%s controller config complete ; "
|
||||||
|
"invoking init_func" % PLUGIN)
|
||||||
|
if init_func() != 0:
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# get a list if provisioned ntp servers
|
||||||
|
_get_ntp_servers()
|
||||||
|
|
||||||
|
# nothing to do while there are no provisioned NTP servers
|
||||||
|
if len(obj.server_list_conf) == 0:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# Do NTP Query
|
||||||
|
data = subprocess.check_output([PLUGIN_EXEC, PLUGIN_EXEC_OPTIONS])
|
||||||
|
|
||||||
|
# Keep this FIT test code but make it commented out for security
|
||||||
|
#
|
||||||
|
# if os.path.exists('/var/run/fit/ntpq_data'):
|
||||||
|
# data = ''
|
||||||
|
# collectd.info("%s using ntpq FIT data" % PLUGIN)
|
||||||
|
# with open('/var/run/fit/ntpq_data', 'r') as infile:
|
||||||
|
# for line in infile:
|
||||||
|
# data += line
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
collectd.error("%s no data from query" % PLUGIN)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# Get the ntp query output into a list of lines
|
||||||
|
obj.ntpq = data.split('\n')
|
||||||
|
|
||||||
|
# keep track of changes ; only log on changes
|
||||||
|
reachable_list_changed = False
|
||||||
|
unreachable_list_changed = False
|
||||||
|
|
||||||
|
# Manage the selected server name
|
||||||
|
#
|
||||||
|
# save the old value so we can print a log if the selected server changes
|
||||||
|
if obj.selected_server:
|
||||||
|
obj.selected_server_save = obj.selected_server
|
||||||
|
# always assume no selected server ; till its learned
|
||||||
|
obj.selected_server = ''
|
||||||
|
|
||||||
|
# start with a fresh empty list for this new run to populate
|
||||||
|
obj.server_list_ntpq = []
|
||||||
|
|
||||||
|
# Loop through the ntpq output.
|
||||||
|
# Ignore the first 2 lines ; just header data.
|
||||||
|
for i in range(2, len(obj.ntpq)):
|
||||||
|
|
||||||
|
# ignore empty or lines that are not long enough
|
||||||
|
if len(obj.ntpq[i]) < 10:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# log the ntpq output ; minus the 2 lines of header
|
||||||
|
collectd.info("NTPQ: %s" % obj.ntpq[i])
|
||||||
|
|
||||||
|
# Unreachable servers are ones whose line start with a space
|
||||||
|
ip = ''
|
||||||
|
if obj.ntpq[i][0] == ' ':
|
||||||
|
# get the ip address
|
||||||
|
# example format of line:['', '132.163.4.102', '', '', '.INIT.',
|
||||||
|
# get ip from index [1] of the list
|
||||||
|
unreachable = obj.ntpq[i].split(' ')[1]
|
||||||
|
if unreachable:
|
||||||
|
# check to see if its a controller ip
|
||||||
|
# we skip over controller ips
|
||||||
|
if _is_controller(unreachable) is False:
|
||||||
|
_add_ip_to_ntpq_server_list(unreachable)
|
||||||
|
if unreachable not in obj.unreachable_servers:
|
||||||
|
if _raise_alarm(unreachable) is False:
|
||||||
|
unreachable_list_changed = True
|
||||||
|
# if the FM call to raise the alarm worked then
|
||||||
|
# add this ip to the unreachable list if its not
|
||||||
|
# already in it
|
||||||
|
_add_unreachable_server(unreachable)
|
||||||
|
|
||||||
|
# Reachable servers are ones whose line start with a '+'
|
||||||
|
elif obj.ntpq[i][0] == '+':
|
||||||
|
# remove the '+' and get the ip
|
||||||
|
ip = obj.ntpq[i].split(' ')[0][1:]
|
||||||
|
|
||||||
|
elif obj.ntpq[i][0] == '*':
|
||||||
|
# remove the '+' and get the ip
|
||||||
|
ip = obj.ntpq[i].split(' ')[0][1:]
|
||||||
|
if ip:
|
||||||
|
if _is_controller(ip) is False:
|
||||||
|
if obj.selected_server:
|
||||||
|
# done update the selected server if more selections
|
||||||
|
# are found. go with the first one found.
|
||||||
|
collectd.info("%s additional selected server found"
|
||||||
|
" '%s'; current selection is '%s'" %
|
||||||
|
(PLUGIN, ip, obj.selected_server))
|
||||||
|
else:
|
||||||
|
# update the selected server list
|
||||||
|
obj.selected_server = ip
|
||||||
|
collectd.debug("%s selected server is '%s'" %
|
||||||
|
(PLUGIN, obj.selected_server))
|
||||||
|
else:
|
||||||
|
collectd.debug("%s local controller '%s' marked "
|
||||||
|
"as selected server ; ignoring" %
|
||||||
|
(PLUGIN, ip))
|
||||||
|
|
||||||
|
# anything else is unreachable
|
||||||
|
else:
|
||||||
|
unreachable = obj.ntpq[i][1:].split(' ')[0]
|
||||||
|
if _is_controller(unreachable) is False:
|
||||||
|
_add_ip_to_ntpq_server_list(unreachable)
|
||||||
|
if unreachable not in obj.unreachable_servers:
|
||||||
|
if _raise_alarm(unreachable) is False:
|
||||||
|
unreachable_list_changed = True
|
||||||
|
# if the FM call to raise the alarm worked then
|
||||||
|
# add this ip to the unreachable list if its not
|
||||||
|
# already in it
|
||||||
|
_add_unreachable_server(unreachable)
|
||||||
|
|
||||||
|
if ip:
|
||||||
|
# if the ip is valid then manage it
|
||||||
|
if _is_controller(ip) is False:
|
||||||
|
_add_ip_to_ntpq_server_list(ip)
|
||||||
|
# add the ip to the reachable servers list
|
||||||
|
# if its not already there
|
||||||
|
if ip not in obj.reachable_servers:
|
||||||
|
obj.reachable_servers.append(ip)
|
||||||
|
reachable_list_changed = True
|
||||||
|
# make sure this IP is no longer in the unreachable
|
||||||
|
# list and that alarms for it are cleared
|
||||||
|
_remove_ip_from_unreachable_list(ip)
|
||||||
|
|
||||||
|
_cleanup_stale_servers()
|
||||||
|
|
||||||
|
if obj.selected_server:
|
||||||
|
if obj.selected_server != obj.selected_server_save:
|
||||||
|
collectd.info("%s selected server changed from '%s' to '%s'" %
|
||||||
|
(PLUGIN,
|
||||||
|
obj.selected_server_save,
|
||||||
|
obj.selected_server))
|
||||||
|
obj.selected_server_save = obj.selected_server
|
||||||
|
if obj.alarm_raised is True:
|
||||||
|
_clear_base_alarm()
|
||||||
|
|
||||||
|
elif obj.alarm_raised is False:
|
||||||
|
collectd.error("%s no selected server" % PLUGIN)
|
||||||
|
if _raise_alarm() is False:
|
||||||
|
obj.selected_server_save = 'None'
|
||||||
|
|
||||||
|
# only log and act on changes
|
||||||
|
if reachable_list_changed is True:
|
||||||
|
if obj.reachable_servers:
|
||||||
|
collectd.info("%s reachable servers: %s" %
|
||||||
|
(PLUGIN, obj.reachable_servers))
|
||||||
|
if obj.alarm_raised is True:
|
||||||
|
if obj.selected_server and obj.reachable_servers:
|
||||||
|
_clear_base_alarm()
|
||||||
|
else:
|
||||||
|
collectd.error("%s no reachable servers" % PLUGIN)
|
||||||
|
_raise_alarm()
|
||||||
|
|
||||||
|
# only log changes
|
||||||
|
if unreachable_list_changed is True:
|
||||||
|
if obj.unreachable_servers:
|
||||||
|
collectd.info("%s unreachable servers: %s" %
|
||||||
|
(PLUGIN, obj.unreachable_servers))
|
||||||
|
else:
|
||||||
|
collectd.info("%s all servers are reachable" % PLUGIN)
|
||||||
|
|
||||||
|
# The sample published to the database is simply the number
|
||||||
|
# of reachable servers if one is selected
|
||||||
|
if not obj.selected_server:
|
||||||
|
sample = 0
|
||||||
|
else:
|
||||||
|
sample = len(obj.reachable_servers)
|
||||||
|
|
||||||
|
# Dispatch usage value to collectd
|
||||||
|
val = collectd.Values(host=obj.hostname)
|
||||||
|
val.plugin = 'ntpq'
|
||||||
|
val.type = 'absolute'
|
||||||
|
val.type_instance = 'reachable'
|
||||||
|
val.dispatch(values=[sample])
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
@ -192,4 +779,4 @@ def read_func():
|
|||||||
# register the config, init and read functions
|
# register the config, init and read functions
|
||||||
collectd.register_config(config_func)
|
collectd.register_config(config_func)
|
||||||
collectd.register_init(init_func)
|
collectd.register_init(init_func)
|
||||||
collectd.register_read(read_func)
|
collectd.register_read(read_func, interval=PLUGIN_INTERVAL)
|
||||||
|
@ -9,12 +9,7 @@ LoadPlugin python
|
|||||||
<Module "memory">
|
<Module "memory">
|
||||||
Path "/proc/meminfo"
|
Path "/proc/meminfo"
|
||||||
</Module>
|
</Module>
|
||||||
# Import "example"
|
Import "ntpq"
|
||||||
# <Module "example">
|
|
||||||
# Data "1 50"
|
|
||||||
# </Module>
|
|
||||||
# Import "interface"
|
|
||||||
# Import "ntpq"
|
|
||||||
LogTraces = true
|
LogTraces = true
|
||||||
Encoding "utf-8"
|
Encoding "utf-8"
|
||||||
</Plugin>
|
</Plugin>
|
||||||
|
236
pylint.rc
Executable file
236
pylint.rc
Executable file
@ -0,0 +1,236 @@
|
|||||||
|
[MASTER]
|
||||||
|
# Specify a configuration file.
|
||||||
|
rcfile=pylint.rc
|
||||||
|
|
||||||
|
# Python code to execute, usually for sys.path manipulation such as pygtk.require().
|
||||||
|
#init-hook=
|
||||||
|
|
||||||
|
# Add files or directories to the blacklist. They should be base names, not paths.
|
||||||
|
ignore=unit_test.py
|
||||||
|
|
||||||
|
# Pickle collected data for later comparisons.
|
||||||
|
persistent=yes
|
||||||
|
|
||||||
|
# List of plugins (as comma separated values of python modules names) to load,
|
||||||
|
# usually to register additional checkers.
|
||||||
|
load-plugins=
|
||||||
|
|
||||||
|
|
||||||
|
[MESSAGES CONTROL]
|
||||||
|
# Enable the message, report, category or checker with the given id(s). You can
|
||||||
|
# either give multiple identifier separated by comma (,) or put this option
|
||||||
|
# multiple time.
|
||||||
|
#enable=
|
||||||
|
|
||||||
|
# Disable the message, report, category or checker with the given id(s). You
|
||||||
|
# can either give multiple identifier separated by comma (,) or put this option
|
||||||
|
# multiple time (only on the command line, not in the configuration file where
|
||||||
|
# it should appear only once).
|
||||||
|
# E0203 access-member-before-definition
|
||||||
|
# E0602 undefined-variable
|
||||||
|
# E1101 no-member
|
||||||
|
# E1205 logging-too-many-args
|
||||||
|
# fixme
|
||||||
|
# W0102 dangerous-default-value
|
||||||
|
# W0105 pointless-string-statement
|
||||||
|
# W0106 expression-not-assigned
|
||||||
|
# W0201 attribute-defined-outside-init
|
||||||
|
# W0212 protected-access
|
||||||
|
# W0221 arguments-differ
|
||||||
|
# W0231 super-init-not-called
|
||||||
|
# W0235 useless-super-delegation
|
||||||
|
# W0611 unused-import
|
||||||
|
# W0612 Unused variable warning
|
||||||
|
# W0613 Unused argument warning
|
||||||
|
# W0621 redefined-outer-name
|
||||||
|
# W0622 redefined-builtin
|
||||||
|
# W0702 bare-except
|
||||||
|
# W0703 broad except warning
|
||||||
|
# W1201 logging-not-lazy
|
||||||
|
# W1401 anomalous-backslash-in-string
|
||||||
|
disable=C, R, E0203, E0602, E1101, E1205, fixme,
|
||||||
|
W0102, W0105, W0106, W0201, W0212, W0221, W0231, W0235,
|
||||||
|
W0611, W0612, W0613, W0621, W0622, W0702, W0703, W1201, W1401
|
||||||
|
|
||||||
|
|
||||||
|
[REPORTS]
|
||||||
|
# Set the output format. Available formats are text, parseable, colorized, msvs
|
||||||
|
# (visual studio) and html
|
||||||
|
output-format=text
|
||||||
|
|
||||||
|
# Put messages in a separate file for each module / package specified on the
|
||||||
|
# command line instead of printing them on stdout. Reports (if any) will be
|
||||||
|
# written in a file name "pylint_global.[txt|html]".
|
||||||
|
files-output=no
|
||||||
|
|
||||||
|
# Tells whether to display a full report or only the messages
|
||||||
|
reports=no
|
||||||
|
|
||||||
|
# Python expression which should return a note less than 10 (10 is the highest
|
||||||
|
# note). You have access to the variables errors warning, statement which
|
||||||
|
# respectively contain the number of errors / warnings messages and the total
|
||||||
|
# number of statements analyzed. This is used by the global evaluation report
|
||||||
|
# (RP0004).
|
||||||
|
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
||||||
|
|
||||||
|
|
||||||
|
[SIMILARITIES]
|
||||||
|
# Minimum lines number of a similarity.
|
||||||
|
min-similarity-lines=4
|
||||||
|
|
||||||
|
# Ignore comments when computing similarities.
|
||||||
|
ignore-comments=yes
|
||||||
|
|
||||||
|
# Ignore docstrings when computing similarities.
|
||||||
|
ignore-docstrings=yes
|
||||||
|
|
||||||
|
|
||||||
|
[FORMAT]
|
||||||
|
# Maximum number of characters on a single line.
|
||||||
|
max-line-length=85
|
||||||
|
|
||||||
|
# Maximum number of lines in a module
|
||||||
|
max-module-lines=1000
|
||||||
|
|
||||||
|
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 tab).
|
||||||
|
indent-string=' '
|
||||||
|
|
||||||
|
|
||||||
|
[TYPECHECK]
|
||||||
|
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||||
|
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
||||||
|
ignore-mixin-members=yes
|
||||||
|
|
||||||
|
# List of classes names for which member attributes should not be checked
|
||||||
|
# (useful for classes with attributes dynamically set).
|
||||||
|
ignored-classes=SQLObject,sqlalchemy,scoped_session,_socketobject
|
||||||
|
|
||||||
|
# List of members which are set dynamically and missed by pylint inference
|
||||||
|
# system, and so shouldn't trigger E0201 when accessed. Python regular
|
||||||
|
# expressions are accepted.
|
||||||
|
generated-members=REQUEST,acl_users,aq_parent
|
||||||
|
|
||||||
|
|
||||||
|
[BASIC]
|
||||||
|
# List of builtins function names that should not be used, separated by a comma
|
||||||
|
bad-functions=map,filter,apply,input
|
||||||
|
|
||||||
|
# Regular expression which should only match correct module names
|
||||||
|
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct module level names
|
||||||
|
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct class names
|
||||||
|
class-rgx=[A-Z_][a-zA-Z0-9]+$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct function names
|
||||||
|
function-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct method names
|
||||||
|
method-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct instance attribute names
|
||||||
|
attr-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct argument names
|
||||||
|
argument-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct variable names
|
||||||
|
variable-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||||
|
|
||||||
|
# Regular expression which should only match correct list comprehension /
|
||||||
|
# generator expression variable names
|
||||||
|
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
|
||||||
|
|
||||||
|
# Good variable names which should always be accepted, separated by a comma
|
||||||
|
good-names=i,j,k,ex,Run,_
|
||||||
|
|
||||||
|
# Bad variable names which should always be refused, separated by a comma
|
||||||
|
bad-names=foo,bar,baz,toto,tutu,tata
|
||||||
|
|
||||||
|
# Regular expression which should only match functions or classes name which do
|
||||||
|
# not require a docstring
|
||||||
|
no-docstring-rgx=__.*__
|
||||||
|
|
||||||
|
|
||||||
|
[MISCELLANEOUS]
|
||||||
|
# List of note tags to take in consideration, separated by a comma.
|
||||||
|
notes=FIXME,XXX,TODO
|
||||||
|
|
||||||
|
|
||||||
|
[VARIABLES]
|
||||||
|
# Tells whether we should check for unused import in __init__ files.
|
||||||
|
init-import=no
|
||||||
|
|
||||||
|
# A regular expression matching the beginning of the name of dummy variables
|
||||||
|
# (i.e. not used).
|
||||||
|
dummy-variables-rgx=_|dummy
|
||||||
|
|
||||||
|
# List of additional names supposed to be defined in builtins. Remember that
|
||||||
|
# you should avoid to define new builtins when possible.
|
||||||
|
additional-builtins=
|
||||||
|
|
||||||
|
|
||||||
|
[IMPORTS]
|
||||||
|
# Deprecated modules which should not be used, separated by a comma
|
||||||
|
deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
|
||||||
|
|
||||||
|
# Create a graph of every (i.e. internal and external) dependencies in the
|
||||||
|
# given file (report RP0402 must not be disabled)
|
||||||
|
import-graph=
|
||||||
|
|
||||||
|
# Create a graph of external dependencies in the given file (report RP0402 must
|
||||||
|
# not be disabled)
|
||||||
|
ext-import-graph=
|
||||||
|
|
||||||
|
# Create a graph of internal dependencies in the given file (report RP0402 must
|
||||||
|
# not be disabled)
|
||||||
|
int-import-graph=
|
||||||
|
|
||||||
|
|
||||||
|
[DESIGN]
|
||||||
|
# Maximum number of arguments for function / method
|
||||||
|
max-args=5
|
||||||
|
|
||||||
|
# Argument names that match this expression will be ignored. Default to name
|
||||||
|
# with leading underscore
|
||||||
|
ignored-argument-names=_.*
|
||||||
|
|
||||||
|
# Maximum number of locals for function / method body
|
||||||
|
max-locals=15
|
||||||
|
|
||||||
|
# Maximum number of return / yield for function / method body
|
||||||
|
max-returns=6
|
||||||
|
|
||||||
|
# Maximum number of branch for function / method body
|
||||||
|
max-branchs=12
|
||||||
|
|
||||||
|
# Maximum number of statements in function / method body
|
||||||
|
max-statements=50
|
||||||
|
|
||||||
|
# Maximum number of parents for a class (see R0901).
|
||||||
|
max-parents=7
|
||||||
|
|
||||||
|
# Maximum number of attributes for a class (see R0902).
|
||||||
|
max-attributes=7
|
||||||
|
|
||||||
|
# Minimum number of public methods for a class (see R0903).
|
||||||
|
min-public-methods=2
|
||||||
|
|
||||||
|
# Maximum number of public methods for a class (see R0904).
|
||||||
|
max-public-methods=20
|
||||||
|
|
||||||
|
|
||||||
|
[CLASSES]
|
||||||
|
# List of method names used to declare (i.e. assign) instance attributes.
|
||||||
|
defining-attr-methods=__init__,__new__,setUp
|
||||||
|
|
||||||
|
# List of valid names for the first argument in a class method.
|
||||||
|
valid-classmethod-first-arg=cls
|
||||||
|
|
||||||
|
|
||||||
|
[EXCEPTIONS]
|
||||||
|
# Exceptions that will emit a warning when being caught. Defaults to
|
||||||
|
# "Exception"
|
||||||
|
overgeneral-exceptions=Exception
|
@ -6,8 +6,3 @@ stx-integ Release Notes
|
|||||||
:maxdepth: 2
|
:maxdepth: 2
|
||||||
|
|
||||||
unreleased
|
unreleased
|
||||||
|
|
||||||
Search
|
|
||||||
------
|
|
||||||
|
|
||||||
:ref:`search`
|
|
||||||
|
@ -207,8 +207,8 @@ function print_help()
|
|||||||
echo "collect --all --start-date 20150101 ... logs dated on and after Jan 1 2015 from all hosts"
|
echo "collect --all --start-date 20150101 ... logs dated on and after Jan 1 2015 from all hosts"
|
||||||
echo "collect --all --start-date 20151101 --end-date 20160201 ... logs dated between Nov 1, 2015 and Feb 1 2016 from all hosts"
|
echo "collect --all --start-date 20151101 --end-date 20160201 ... logs dated between Nov 1, 2015 and Feb 1 2016 from all hosts"
|
||||||
echo "collect --start-date 20151101 --end-date 20160201 ... only logs dated between Nov 1, 2015 and Feb 1 2016 for current host"
|
echo "collect --start-date 20151101 --end-date 20160201 ... only logs dated between Nov 1, 2015 and Feb 1 2016 for current host"
|
||||||
echo "collect --list controller-0 compute-0 storage-0 ... all logs from specified host list"
|
echo "collect --list controller-0 worker-0 storage-0 ... all logs from specified host list"
|
||||||
echo "collect --list controller-0 compute-1 --end-date 20160201 ... only logs before Nov 1, 2015 for host list"
|
echo "collect --list controller-0 worker-1 --end-date 20160201 ... only logs before Nov 1, 2015 for host list"
|
||||||
echo "collect --list controller-1 storage-0 --start-date 20160101 ... only logs after Jan 1 2016 for controller-1 and storage-0"
|
echo "collect --list controller-1 storage-0 --start-date 20160101 ... only logs after Jan 1 2016 for controller-1 and storage-0"
|
||||||
echo ""
|
echo ""
|
||||||
exit 0
|
exit 0
|
||||||
|
@ -164,8 +164,8 @@ function collect_extra()
|
|||||||
delimiter ${LOGFILE} "pstree --arguments --ascii --long --show-pids"
|
delimiter ${LOGFILE} "pstree --arguments --ascii --long --show-pids"
|
||||||
pstree --arguments --ascii --long --show-pids >> ${LOGFILE}
|
pstree --arguments --ascii --long --show-pids >> ${LOGFILE}
|
||||||
|
|
||||||
# Collect process, thread and scheduling info (compute subfunction only)
|
# Collect process, thread and scheduling info (worker subfunction only)
|
||||||
# (also gets process 'affinity' which is useful on computes;
|
# (also gets process 'affinity' which is useful on workers;
|
||||||
which ps-sched.sh >/dev/null 2>&1
|
which ps-sched.sh >/dev/null 2>&1
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
delimiter ${LOGFILE} "ps-sched.sh"
|
delimiter ${LOGFILE} "ps-sched.sh"
|
||||||
@ -226,7 +226,7 @@ function collect_extra()
|
|||||||
facter >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
|
facter >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$nodetype" == "compute" || "$subfunction" == *"compute"* ]] ; then
|
if [[ "$nodetype" == "worker" || "$subfunction" == *"worker"* ]] ; then
|
||||||
delimiter ${LOGFILE} "topology"
|
delimiter ${LOGFILE} "topology"
|
||||||
topology >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
|
topology >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
|
||||||
fi
|
fi
|
||||||
@ -323,18 +323,18 @@ function collect_extra()
|
|||||||
delimiter ${LOGFILE} "targetcli sessions detail"
|
delimiter ${LOGFILE} "targetcli sessions detail"
|
||||||
targetcli sessions detail >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
|
targetcli sessions detail >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
|
||||||
|
|
||||||
elif [[ "$nodetype" == "compute" || "$subfunction" == *"compute"* ]] ; then
|
elif [[ "$nodetype" == "worker" || "$subfunction" == *"worker"* ]] ; then
|
||||||
# Compute - iSCSI initiator information
|
# Worker - iSCSI initiator information
|
||||||
collect_dir=${EXTRA_DIR}/iscsi_initiator_info
|
collect_dir=${EXTRA_DIR}/iscsi_initiator_info
|
||||||
mkdir -p ${collect_dir}
|
mkdir -p ${collect_dir}
|
||||||
cp -rf /run/iscsi-cache/nodes/* ${collect_dir}
|
cp -rf /run/iscsi-cache/nodes/* ${collect_dir}
|
||||||
find ${collect_dir} -type d -exec chmod 750 {} \;
|
find ${collect_dir} -type d -exec chmod 750 {} \;
|
||||||
|
|
||||||
# Compute - iSCSI initiator active sessions
|
# Worker - iSCSI initiator active sessions
|
||||||
delimiter ${LOGFILE} "iscsiadm -m session"
|
delimiter ${LOGFILE} "iscsiadm -m session"
|
||||||
iscsiadm -m session >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
|
iscsiadm -m session >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
|
||||||
|
|
||||||
# Compute - iSCSI udev created nodes
|
# Worker - iSCSI udev created nodes
|
||||||
delimiter ${LOGFILE} "ls -la /dev/disk/by-path | grep \"iqn\""
|
delimiter ${LOGFILE} "ls -la /dev/disk/by-path | grep \"iqn\""
|
||||||
ls -la /dev/disk/by-path | grep "iqn" >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
|
ls -la /dev/disk/by-path | grep "iqn" >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
|
||||||
fi
|
fi
|
||||||
|
@ -44,9 +44,9 @@ iptables -L -v -x -n -t mangle >> ${LOGFILE} 2>>${COLLECT_ERROR_LOG}
|
|||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# Only Compute
|
# Only Worker
|
||||||
###############################################################################
|
###############################################################################
|
||||||
if [[ "$nodetype" = "compute" || "$subfunction" == *"compute"* ]] ; then
|
if [[ "$nodetype" = "worker" || "$subfunction" == *"worker"* ]] ; then
|
||||||
NAMESPACES=($(ip netns))
|
NAMESPACES=($(ip netns))
|
||||||
for NS in ${NAMESPACES[@]}; do
|
for NS in ${NAMESPACES[@]}; do
|
||||||
delimiter ${LOGFILE} "${NS}"
|
delimiter ${LOGFILE} "${NS}"
|
||||||
|
@ -17,9 +17,9 @@ LOGFILE="${extradir}/${SERVICE}.info"
|
|||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# Only Compute Nodes
|
# Only Worker Nodes
|
||||||
###############################################################################
|
###############################################################################
|
||||||
if [[ "$nodetype" == "compute" || "$subfunction" == *"compute"* ]] ; then
|
if [[ "$nodetype" == "worker" || "$subfunction" == *"worker"* ]] ; then
|
||||||
|
|
||||||
if [[ "$vswitch_type" == *ovs* ]]; then
|
if [[ "$vswitch_type" == *ovs* ]]; then
|
||||||
echo "${hostname}: OVS Info ..........: ${LOGFILE}"
|
echo "${hostname}: OVS Info ..........: ${LOGFILE}"
|
||||||
|
@ -76,7 +76,7 @@ if [ -e ${PLATFORM_CONF} ] ; then
|
|||||||
source ${PLATFORM_CONF}
|
source ${PLATFORM_CONF}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${nodetype}" != "controller" -a "${nodetype}" != "compute" -a "${nodetype}" != "storage" ] ; then
|
if [ "${nodetype}" != "controller" -a "${nodetype}" != "worker" -a "${nodetype}" != "storage" ] ; then
|
||||||
logger -t ${COLLECT_TAG} "could not identify nodetype ($nodetype)"
|
logger -t ${COLLECT_TAG} "could not identify nodetype ($nodetype)"
|
||||||
exit $FAIL_NODETYPE
|
exit $FAIL_NODETYPE
|
||||||
fi
|
fi
|
||||||
|
@ -16,8 +16,10 @@ import logging
|
|||||||
from six.moves import configparser
|
from six.moves import configparser
|
||||||
import itertools
|
import itertools
|
||||||
import six
|
import six
|
||||||
from multiprocessing import Process, cpu_count
|
from multiprocessing import Process
|
||||||
from subprocess import Popen, PIPE
|
from multiprocessing import cpu_count
|
||||||
|
from subprocess import Popen
|
||||||
|
from subprocess import PIPE
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from six.moves import input
|
from six.moves import input
|
||||||
|
|
||||||
|
@ -482,7 +482,7 @@ def list_to_range(L=[]):
|
|||||||
""" Convert a list into a string of comma separate ranges.
|
""" Convert a list into a string of comma separate ranges.
|
||||||
E.g., [1,2,3,8,9,15] is converted to '1-3,8-9,15'
|
E.g., [1,2,3,8,9,15] is converted to '1-3,8-9,15'
|
||||||
"""
|
"""
|
||||||
G = (list(x) for _, x in groupby(enumerate(L), lambda (i, x): i - x))
|
G = (list(x) for _, x in groupby(enumerate(L), lambda i_x: i_x[0] - i_x[1]))
|
||||||
return ",".join(
|
return ",".join(
|
||||||
"-".join(map(str, (g[0][1], g[-1][1])[:len(g)])) for g in G)
|
"-".join(map(str, (g[0][1], g[-1][1])[:len(g)])) for g in G)
|
||||||
|
|
||||||
@ -505,7 +505,8 @@ def timeout_handler(signum, frame):
|
|||||||
raise TimeoutError('timeout')
|
raise TimeoutError('timeout')
|
||||||
|
|
||||||
|
|
||||||
def libvirt_domain_info_worker((host)):
|
def libvirt_domain_info_worker(tuple_hosts):
|
||||||
|
(host) = tuple_hosts
|
||||||
pid = os.getpid()
|
pid = os.getpid()
|
||||||
active_pids.update({pid: (host, time.time())})
|
active_pids.update({pid: (host, time.time())})
|
||||||
error = None
|
error = None
|
||||||
@ -519,11 +520,12 @@ def libvirt_domain_info_worker((host)):
|
|||||||
return (host, domain, topology, time.time(), error)
|
return (host, domain, topology, time.time(), error)
|
||||||
|
|
||||||
|
|
||||||
def do_libvirt_domain_info((host)):
|
def do_libvirt_domain_info(tuple_hosts):
|
||||||
"""
|
"""
|
||||||
Connect to libvirt for specified host, and retrieve per-domain information
|
Connect to libvirt for specified host, and retrieve per-domain information
|
||||||
including cpu affinity per vcpu.
|
including cpu affinity per vcpu.
|
||||||
"""
|
"""
|
||||||
|
(host) = tuple_hosts
|
||||||
domains = {}
|
domains = {}
|
||||||
topology = {}
|
topology = {}
|
||||||
if not host:
|
if not host:
|
||||||
@ -899,7 +901,7 @@ def print_all_tables(tenants=None,
|
|||||||
'memory', 'U:memory', 'A:mem_4K', 'A:mem_2M', 'A:mem_1G']:
|
'memory', 'U:memory', 'A:mem_4K', 'A:mem_2M', 'A:mem_1G']:
|
||||||
pt.align[C] = 'r'
|
pt.align[C] = 'r'
|
||||||
for host_name, H in sorted(hypervisors.items(),
|
for host_name, H in sorted(hypervisors.items(),
|
||||||
key=lambda (k, v): (natural_keys(k))):
|
key=lambda k_v1: (natural_keys(k_v1[0]))):
|
||||||
A = list(agg_h[host_name].keys())
|
A = list(agg_h[host_name].keys())
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -1020,7 +1022,7 @@ def print_all_tables(tenants=None,
|
|||||||
print
|
print
|
||||||
print('LOGICAL CPU TOPOLOGY (compute hosts):')
|
print('LOGICAL CPU TOPOLOGY (compute hosts):')
|
||||||
for host_name, topology in sorted(topologies.items(),
|
for host_name, topology in sorted(topologies.items(),
|
||||||
key=lambda (k, v): (natural_keys(k))):
|
key=lambda k_v2: (natural_keys(k_v2[0]))):
|
||||||
H = hypervisors[host_name]
|
H = hypervisors[host_name]
|
||||||
try:
|
try:
|
||||||
topology_idx = topologies_idx[host_name]
|
topology_idx = topologies_idx[host_name]
|
||||||
@ -1084,7 +1086,7 @@ def print_all_tables(tenants=None,
|
|||||||
print
|
print
|
||||||
print('LOGICAL CPU TOPOLOGY (compute hosts):')
|
print('LOGICAL CPU TOPOLOGY (compute hosts):')
|
||||||
for host_name, topology in sorted(topologies.items(),
|
for host_name, topology in sorted(topologies.items(),
|
||||||
key=lambda (k, v): (natural_keys(k))):
|
key=lambda k_v3: (natural_keys(k_v3[0]))):
|
||||||
H = hypervisors[host_name]
|
H = hypervisors[host_name]
|
||||||
try:
|
try:
|
||||||
topology_idx = topologies_idx[host_name]
|
topology_idx = topologies_idx[host_name]
|
||||||
@ -1161,10 +1163,10 @@ def print_all_tables(tenants=None,
|
|||||||
for C in ['in_libvirt']:
|
for C in ['in_libvirt']:
|
||||||
pt.align[C] = 'c'
|
pt.align[C] = 'c'
|
||||||
for _, S in sorted(servers.items(),
|
for _, S in sorted(servers.items(),
|
||||||
key=lambda (k, v): (natural_keys(v.host),
|
key=lambda k_v4: (natural_keys(k_v4[1].host),
|
||||||
v.server_group,
|
k_v4[1].server_group,
|
||||||
v.instance_name)
|
k_v4[1].instance_name)
|
||||||
if (v.host is not None) else 'None'
|
if (k_v4[1].host is not None) else 'None'
|
||||||
):
|
):
|
||||||
if S.server_group is not None and S.server_group:
|
if S.server_group is not None and S.server_group:
|
||||||
match = re_server_group.search(S.server_group)
|
match = re_server_group.search(S.server_group)
|
||||||
@ -1257,9 +1259,9 @@ def print_all_tables(tenants=None,
|
|||||||
for C in ['in_nova']:
|
for C in ['in_nova']:
|
||||||
pt.align[C] = 'c'
|
pt.align[C] = 'c'
|
||||||
for host, D in sorted(domains.items(),
|
for host, D in sorted(domains.items(),
|
||||||
key=lambda (k, v): (natural_keys(k))):
|
key=lambda k_v5: (natural_keys(k_v5[0]))):
|
||||||
for _, S in sorted(D.items(),
|
for _, S in sorted(D.items(),
|
||||||
key=lambda (k, v): (v['name'])):
|
key=lambda k_v: (k_v[1]['name'])):
|
||||||
in_nova = True if S['uuid'] in servers else False
|
in_nova = True if S['uuid'] in servers else False
|
||||||
pt.add_row(
|
pt.add_row(
|
||||||
[S['uuid'],
|
[S['uuid'],
|
||||||
@ -1292,7 +1294,7 @@ def print_all_tables(tenants=None,
|
|||||||
])
|
])
|
||||||
pt.align = 'l'
|
pt.align = 'l'
|
||||||
for _, M in sorted(migrations.items(),
|
for _, M in sorted(migrations.items(),
|
||||||
key=lambda (k, v): (k)):
|
key=lambda k_v6: (k_v6[0])):
|
||||||
pt.add_row(
|
pt.add_row(
|
||||||
[M.instance_uuid,
|
[M.instance_uuid,
|
||||||
M.status,
|
M.status,
|
||||||
@ -1328,7 +1330,7 @@ def print_all_tables(tenants=None,
|
|||||||
'rxtx_factor']:
|
'rxtx_factor']:
|
||||||
pt.align[C] = 'r'
|
pt.align[C] = 'r'
|
||||||
for _, F in sorted(flavors.items(),
|
for _, F in sorted(flavors.items(),
|
||||||
key=lambda (k, v): (k)):
|
key=lambda k_v7: (k_v7[0])):
|
||||||
if F.id in flavors_in_use:
|
if F.id in flavors_in_use:
|
||||||
pt.add_row(
|
pt.add_row(
|
||||||
[F.id,
|
[F.id,
|
||||||
@ -1362,7 +1364,7 @@ def print_all_tables(tenants=None,
|
|||||||
for C in ['id', 'min_disk', 'min_ram', 'status']:
|
for C in ['id', 'min_disk', 'min_ram', 'status']:
|
||||||
pt.align[C] = 'r'
|
pt.align[C] = 'r'
|
||||||
for _, I in sorted(images.items(),
|
for _, I in sorted(images.items(),
|
||||||
key=lambda (k, v): (k)):
|
key=lambda k_v8: (k_v8[0])):
|
||||||
if I.id in images_in_use:
|
if I.id in images_in_use:
|
||||||
pt.add_row(
|
pt.add_row(
|
||||||
[I.id,
|
[I.id,
|
||||||
@ -1388,7 +1390,7 @@ def print_all_tables(tenants=None,
|
|||||||
])
|
])
|
||||||
pt.align = 'l'
|
pt.align = 'l'
|
||||||
for _, S in sorted(server_groups.items(),
|
for _, S in sorted(server_groups.items(),
|
||||||
key=lambda (k, v): (k)):
|
key=lambda k_v9: (k_v9[0])):
|
||||||
if S.id in server_groups_in_use:
|
if S.id in server_groups_in_use:
|
||||||
tenant = tenants[S.project_id].name
|
tenant = tenants[S.project_id].name
|
||||||
pt.add_row(
|
pt.add_row(
|
||||||
|
36
tox.ini
36
tox.ini
@ -2,6 +2,7 @@
|
|||||||
envlist = linters
|
envlist = linters
|
||||||
minversion = 2.3
|
minversion = 2.3
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
|
stxdir = {toxinidir}/..
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
install_command = pip install -U {opts} {packages}
|
install_command = pip install -U {opts} {packages}
|
||||||
@ -74,7 +75,6 @@ commands =
|
|||||||
# H233: Python 3.x incompatible use of print operator
|
# H233: Python 3.x incompatible use of print operator
|
||||||
# H237: module exception is removed in Python 3
|
# H237: module exception is removed in Python 3
|
||||||
# H238: old style class declaration, use new style
|
# H238: old style class declaration, use new style
|
||||||
# H301: one import per line
|
|
||||||
# H306: imports not in alphabetical order
|
# H306: imports not in alphabetical order
|
||||||
# H401: docstring should not start with a space
|
# H401: docstring should not start with a space
|
||||||
# H404: multi line docstring should start without a leading new line
|
# H404: multi line docstring should start without a leading new line
|
||||||
@ -94,7 +94,7 @@ commands =
|
|||||||
# F841 local variable '_alarm_state' is assigned to but never used
|
# F841 local variable '_alarm_state' is assigned to but never used
|
||||||
ignore = E101,E121,E123,E124,E125,E126,E127,E128,E201,E202,E203,E211,E221,E222,E225,E226,E231,E251,E261,E265,E266,
|
ignore = E101,E121,E123,E124,E125,E126,E127,E128,E201,E202,E203,E211,E221,E222,E225,E226,E231,E251,E261,E265,E266,
|
||||||
E302,E303,E305,E402,E501,E711,E722,E741,E999,
|
E302,E303,E305,E402,E501,E711,E722,E741,E999,
|
||||||
H101,H102,H104,H201,H238,H233,H237,H301,H306,H401,H404,H405,
|
H101,H102,H104,H201,H238,H233,H237,H306,H401,H404,H405,
|
||||||
W191,W291,W391,W503,
|
W191,W291,W391,W503,
|
||||||
B001,B007,B301,B306,
|
B001,B007,B301,B306,
|
||||||
F401,F841
|
F401,F841
|
||||||
@ -110,6 +110,38 @@ deps =
|
|||||||
commands =
|
commands =
|
||||||
flake8
|
flake8
|
||||||
|
|
||||||
|
[testenv:pylint]
|
||||||
|
basepython = python2.7
|
||||||
|
deps = -r{toxinidir}/test-requirements.txt
|
||||||
|
-e{[tox]stxdir}/stx-update/tsconfig/tsconfig
|
||||||
|
-e{[tox]stxdir}/stx-fault/fm-api
|
||||||
|
-e{[tox]stxdir}/stx-config/sysinv/sysinv/sysinv
|
||||||
|
-e{[tox]stxdir}/stx-config/sysinv/cgts-client/cgts-client
|
||||||
|
docutils
|
||||||
|
keyring
|
||||||
|
libvirt-python
|
||||||
|
oslo_i18n
|
||||||
|
oslo_log
|
||||||
|
oslo_messaging
|
||||||
|
oslo_service
|
||||||
|
python-cephclient
|
||||||
|
python-cinderclient
|
||||||
|
python-glanceclient
|
||||||
|
python-keystoneclient
|
||||||
|
python-novaclient
|
||||||
|
SQLAlchemy
|
||||||
|
retrying
|
||||||
|
python-daemon==2.1.2
|
||||||
|
pylint
|
||||||
|
|
||||||
|
# There are currenrly 5 python modules with a setup.py file
|
||||||
|
commands = pylint --rcfile=./pylint.rc \
|
||||||
|
ceph/ceph-manager/ceph-manager/ceph_manager \
|
||||||
|
logging/logmgmt/logmgmt/logmgmt \
|
||||||
|
tools/storage-topology/storage-topology/storage_topology \
|
||||||
|
tools/vm-topology/vm-topology/vm_topology \
|
||||||
|
utilities/platform-util/platform-util/platform_util
|
||||||
|
|
||||||
[testenv:venv]
|
[testenv:venv]
|
||||||
basepython = python3
|
basepython = python3
|
||||||
commands = {posargs}
|
commands = {posargs}
|
||||||
|
@ -3,14 +3,23 @@
|
|||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
from ctypes import cdll,util,c_bool,c_int,c_char_p,pointer,create_string_buffer
|
from ctypes import cdll
|
||||||
|
from ctypes import util
|
||||||
|
from ctypes import c_bool
|
||||||
|
from ctypes import c_int
|
||||||
|
from ctypes import c_char_p
|
||||||
|
from ctypes import pointer
|
||||||
|
from ctypes import create_string_buffer
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
from platform_util.license import constants,exception
|
from platform_util.license import constants
|
||||||
|
from platform_util.license import exception
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
from sysinv.common import constants as sysinv_constants
|
from sysinv.common import constants as sysinv_constants
|
||||||
from tsconfig.tsconfig import system_type,system_mode, SW_VERSION
|
from tsconfig.tsconfig import system_type
|
||||||
|
from tsconfig.tsconfig import system_mode
|
||||||
|
from tsconfig.tsconfig import SW_VERSION
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user