Introduce application to deploy rook ceph, support for Helm v3

rook enable with such procedure
1, deploy system without add ceph storage backend
2, update osd info with helm override
    $ system helm-override-update rook-ceph-apps rook-ceph --values value.yaml
3, apply rook-ceph application
    $ system application-apply rook-ceph-storage

sample value.yaml to deploy provision sdb on host 'controller-0'
cluster:
  storage:
    nodes:
    - devices:
      - config:
          journalSizeMB: 1024
          storeType: bluestore
        name: sdb
      name: controller-0

Story: 2005527
Task: 39214

Depends-On: https://review.opendev.org/#/c/713084/

Change-Id: Ie8f43082a5022b4e3507f0ac8fe0a2654e2a3302
Signed-off-by: Martin, Chen <haochuan.z.chen@intel.com>
This commit is contained in:
Martin, Chen 2020-04-02 07:48:26 +08:00 committed by Chen, Haochuan Z
parent 10c623509a
commit 5bbddf837a
85 changed files with 5552 additions and 0 deletions

185
.zuul.yaml Normal file
View File

@ -0,0 +1,185 @@
---
- project:
check:
jobs:
- openstack-tox-linters
- k8sapp-rook-ceph-tox-py27
- k8sapp-rook-ceph-tox-py36
- k8sapp-rook-ceph-tox-flake8
- k8sapp-rook-ceph-tox-pylint
- k8sapp-rook-ceph-tox-bandit
gate:
jobs:
- openstack-tox-linters
- k8sapp-rook-ceph-tox-py27
- k8sapp-rook-ceph-tox-py36
- k8sapp-rook-ceph-tox-flake8
- k8sapp-rook-ceph-tox-pylint
- k8sapp-rook-ceph-tox-bandit
post:
jobs:
- stx-rook-ceph-apps-upload-git-mirror
- job:
name: k8sapp-rook-ceph-tox-py27
parent: tox
description: |
Run py27 test for k8sapp_rook_ceph
nodeset: ubuntu-xenial
required-projects:
- starlingx/config
- starlingx/fault
- starlingx/update
- starlingx/utilities
files:
- python-k8sapp-rook/*
vars:
tox_envlist: py27
tox_extra_args: -c python-k8sapp-rook/k8sapp_rook/tox.ini
- job:
name: k8sapp-rook-ceph-tox-py36
parent: tox
description: |
Run py36 test for k8sapp_rook_ceph
nodeset: ubuntu-bionic
required-projects:
- starlingx/config
- starlingx/fault
- starlingx/update
- starlingx/utilities
files:
- python-k8sapp-rook/*
vars:
tox_envlist: py36
tox_extra_args: -c python-k8sapp-rook/k8sapp_rook/tox.ini
- job:
name: k8sapp-rook-ceph-tox-flake8
parent: tox
description: |
Run flake8 test for k8sapp_rook_ceph
files:
- python-k8sapp-rook/*
vars:
tox_envlist: flake8
tox_extra_args: -c python-k8sapp-rook/k8sapp_rook/tox.ini
- job:
name: k8sapp-rook-ceph-tox-pylint
parent: tox
description: |
Run pylint test for k8sapp_rook_ceph
required-projects:
- starlingx/config
- starlingx/fault
- starlingx/update
- starlingx/utilities
files:
- python-k8sapp-rook/*
vars:
tox_envlist: pylint
tox_extra_args: -c python-k8sapp-rook/k8sapp_rook/tox.ini
- job:
name: k8sapp-rook-ceph-tox-bandit
parent: tox
description: |
Run bandit test for k8sapp_rook_ceph
files:
- python-k8sapp-rook/*
vars:
tox_envlist: bandit
tox_extra_args: -c python-k8sapp-rook/k8sapp_rook/tox.ini
- job:
name: stx-rook-ceph-apps-upload-git-mirror
parent: upload-git-mirror
description: >
Mirrors opendev.org/starlingx/rook-ceph to
github.com/starlingx/rook-ceph
vars:
git_mirror_repository: starlingx/rook-ceph
secrets:
- name: git_mirror_credentials
secret: stx-rook-ceph-apps-github-secret
pass-to-parent: true
- secret:
name: stx-rook-ceph-apps-github-secret
data:
user: git
host: github.com
# yamllint disable-line rule:line-length
host_key: github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
ssh_key: !encrypted/pkcs1-oaep
- RBkNQekpxNA4MtCrQg5rJBrJ0+39cqupC07+3y4ba1cu/Ub4/psRGSVvanCjB2Jyh+62W
Vw9rQVkqSQ+9RXO+6aZr7sfptPGnTq7oYO52NhsM2wz6GsxJYUQPTwZkuuddWEJr6j70S
FGbk4DJGI2xhQ1aLhdpKQIsVu5ullpgCQwBc5XI5WaWEae7SEcBJc0CgDat8Necsi/5Fg
+2LHMZwGAVChvpQj7MqAr6c1CkDCgQ9R8w7JvgNG1q46pB5g5oLzVjHH+EmhwlkDAjLH6
PCOPaTBjKh6a3kFNBvAPBPu1ufnNK/tXOOZUHx81UTKTmy39hnYGA378cDfYfDWYjgUZQ
WGrENjktBAIFqyh7cGrIa4WreCq/YYbsOmgIVsxB2VmRoZz1pJHw+ciQ2aWW5IN5H0rfJ
cGIPILChxsbf6Lp1AZ3UScE1jQZginedoCfHSF4OFYicnvXaDGc8kz5umDBMhgbs38CBD
42J6SGQiY/jylFZl1Paq7SSLm5VwvMxTHfBkg+cd8qxYscAWTRSKaealZJy6qZBuOD1Hr
7cbWHiRQ/Kr+Txuaa9rNjdiZYftxqbLRij2p2AwVguEAwmJ2UaWpJkyJef3EoJYCLxCws
oEzcijEkrOKPWMrYB3SIemV3/xZIE6YOmBl2uPiCVEhMSJ3Zp+DQ3R0Vbgak1c=
- SqjXh5ylJEFSDVLDlK5vDs9t1RQyVYiKJrtEzi9c2mX69nQ0SHbIUJhzXj8/pDJnvtvZc
aPhAXAy1GJtlQVSOEkFfgtXfcFkoaSoafrGW5pMlDetqnir/Ta1yGjtxL+qN0g5y+ajBP
nEzRN8YwSOfFNi3MnZmZJJWTf9VtTPKvXv+tldK50WTsVTddeAptqNumUsVEXshiKYQrd
x3CIdqWVrZ4FNA7IA6v+oen5H2bmuYghrib/KwoPBoa2mK4QcfPixp01bNVycE1/Fy2Aa
G9Uqr1dOr+o8i618RVddPiogLtee9ofHu8StsGaqsuArh+1Ln8QwYNIGkk4XR1vFRp6Ad
6Be1mJodwt9M68GpnRxgwdPlaj9k8mp9twh7bfXhdPkkevb9IzJsdm74GitJM5/Gj2SZG
WjLU3d2enzpAiSs/ByLb4MIw8mJvuC3XEGVnQjCGKQ2i6qGzn1Ev+ynK8PZa8j/xuI3Nj
G/C+6hSEB/wXPAN23EvBG+Ow6NC0QaBAGhHEALFMYThChTxn5Ae2ECm10v1d8OydfMFLj
mzrQvk0vetJA+ao//K4Z34PIBVRKocez4LYyUOot9+CyZiS+NsYIxMN6llHrKpGCpftgE
E2VuEKQLuz5SHZuA2gxxvqiYGuoi9z6AJyU9kbdhc1xpgjevJeDDtJ/wS8u9jc=
- PSmeLpCC4m+kSH+pZVL71HSBU0xspfhAntNdP01sK6UhdFMd/ts5dObVkEjRBQIFPJ2hD
8AGSrjHOQ7jEfq2zbsxHtlYFvToqAZ+dq9qh42qd3k36BUu67QiUAsd+IEWbtLTf+tmRM
O2zu2d+nIj8n+hpkziKXQjXdI2NaA4KT0n0zNHNdnFznDyPKYvBN0/PXKlV4NSrDnGrQo
adiZ8fSomYoDka/2jBA94m2QpkyyWVPIoE2FyBAnKBHH3mSaehIuY8d66uC3Qee1A7JIl
Yt3uKVmYqW971lKoz1wijktn738uZrsdEXGjF5VgXyVe94xoIIQ9mNQn6CMAhRCF3ccJb
ytlIe9lQCl+2+dRRd5d83viU+Nnh68UwQgl4gfrSJAkd0aHId5ZFJ9ZEpUvk5jr19oab3
jIrde7ZUtIdupxlsaiwJRhro2c1h6kABY3gbngUipQ53UyaMs/XwAr7yXFavFIp/BOPlo
3w9nq7NbAWdaTDx+kiH6wNlPQXkw3DUg2z/7nNmx8l1eUrbupXMgsGV/rhLfYMIpYlLW6
KNfouNZtW0pSKeGUsPGVtYw8gK8WWWUt7Uc0Kkg2Aqj2oFCQzObIHLbcgIBJhEMynhhLC
ivuDFzqSgHiVvdFygGUlBgutu7Vr+HHh+BeFX8m1kxIhID1HnD5Hl8MJVpMISU=
- BN5eCwoftd9I5C3HcJRymwpnVIGui2K8xYt7RcQoUrXHC73aalogRYxRDvJQicZ2Jcrxu
Hc/6HlhPpsIB9nRxweGhgD6l6KFLIVDZIB5Bl/17X0VVDMHtedLZDhGf2Ft410Jq3QNbX
xREkdHSc6suFah8vJKqN32Hk35PQVnzq4My5zmNzkBci16ZcvIrZkULbunFE40X297nA0
WdAD/qOYY3uULwuFW1yteDh9zh+PwpB/n7eRSF57Ah0bJFGeRcLgBnSRmk65jGFs/fZH0
hHFNsnJi8q1RcsHisiBYcqI+H74q6JvqhNxu2ZVU3Sh9EQnU/d1gwWvXtX91GluIZCE6X
C3DXG0FnKdNbEji7BNY2yAX6X84dN25cL304k+tqVv2MTJHSPtQ/puiafV5y4mqocq0U0
box/1zaaMI/VTngAxGuNSixd/qW8c1VJkr/i5DseX0qaytCZO9R1UeBCHfiIs9pmNEuBR
dfXaRGuWafJjt0SKnLIIeFv9s6A2LkXEgcz4uF9DlmHJJfQaPT/QJClxAW64/7E7/e/2S
HYG+EE6TOVJhcw6db/7cPiPV/jmxSiOgRvdqvlUSBqLdKeaxPRiik/HFmvT2yJtn68HdR
U8r5MvQR4oyNQWfDd+9jeH3z1gNzDN3J/fk6LeEcjuFEWhtaA2+geqGmylBcNU=
- iCOm/PYRs7hQqs3/ROzUlYj8+zw9AqjjyVPatDqyi9n04mVn+S52elWBUkliyyPuON/f7
9cJBrbUmgg/jRw+OtIFMNr5+QpbqGBkLTf4+k8srPL8oFg0v4B1eXHS7IQB6CCktHWyGj
eEN2imXOF8d0zFLv6TPPY1mlv7C+6RH/WQiYOBfTHBDSGQtbluiyvxZw9XrtOsre/25b5
+k9qFVLsy9iwhILNQhXvpK+A1vGKixBujqvKluijtLDwB51MVqSh/PhWR0eEncxy/iocY
TDJmQNBqy6/AL/QkAsZTOs+txpg4oV4xA6TzT5m9fLe8ekrkkVq+aNJ9EoQm4gglBm4qs
JmXcuIXlzOe4jhUB6z22DbErEgcWy96Yr2fhoklRj+HMfCr0NNXKtPg8qKZJngZ+RQ+w8
+LpgMAoRGPw6JU+Cz+akPNryFxAXUq3J+GEpRl2D8MyPTcwo8rCqbVOPJcRriVr5cHK2K
AidM4KzzC8smkhRpv+MMSXz5T7YMl5KRBJr3pEk9rxPr551LJzz7SSMPv2RYBC78xO5Ow
8VJUSkvPygwcz4yp9uJz5wwUGNjXMpuqegmNpGydyrXSSGfhb7x0qw5cNhB5MvuPSrH1B
wI9XH7zIDTx9uu5uEaT1KX8ilPmBz+e4mJaY1O9HkQLAyWm130G7KwJHewO0Fs=
- f6FNZRR0oMLoDJNLJoMH7IpBbelYIvxH7Qudnb/4sEnojEnpdyjn3Hf4QVbyHyJ50YI6T
922Vu8dZhf6Siuta6un1bHB+gXlRm5GsJ6m7xaoAds569bp/KHGxtFvLnDqhiJ5Pli8Xu
U6HC57gdludUQxq+BiuHixB1AbEzZpbP61coozc3anAuMIVbpeAPdPzPnvDDKqnFUxrIH
mroDYUZWqy5aWXxNJbsQ8m6U8o+cMPrEng6twrUaKBIPWwTzqESq+8rWyFLOGzsjbqYVG
90gEkmUlFheVPEt240AWU8ghVjdA8vRqXQ7FCMuKxktmMHKhs9ok+cyNT1lZ7o1KqbWOq
1jJVL6m784BL3Hhg3ZPEdv26YmXYgxAwvidFBDN8uDGzIpvw/y45JofijERaTymKwXpxa
dYncc/qxmsJO7Ik0XBPI/V0wcE/LZiQcVcj3gYpNbVmA5yooPeayjc73r+LAWa2winbbN
TLTc1uT5ZXbCyQqurIvvAj4CpWDSej4IYY438TXgLSBWsv0KHi6kNoLgDs/kbpKjXQEqJ
4fMOhOC9q6KDShrq0xLtNqTvYFQRz8m+bM35e9a73Q3TVHVEJURzM5/vFvR39LCGhga3Q
T6sCjE794O++C4zAdmHZ9gjfqGSVln8wE5reXdmLicII4akY8jJzMwfYESovUw=
- eHBgHyQrh16uH1Ec8jv3V8TpI6CzcRdC1Jjo8mh0xaX0J6NVASdrPiPuzqDfzVveiwbRj
RW00ObXftd60GOGf8+kYAyDiNv1i3q1NLzPAC9kztD/cVW+Rf99nTd6/UBogL/LD+9FI0
SYfGwREdk8fk+sakYp5UxrvScEiFXusyyyU9ArTrH5sfXcBLB3ClsV4uNsY+fmcuimTMW
Sq9AZ1DOTIkD4IrdxUZbbQGRUXckXueul9k7UZisSFk2bJnJRgYZdpgQWvFOsYGgCg7gt
9BaINkxGsXqM7mXumSOYc5lBJu9CghxjUe73Pv72N88r6uXV425r40MtqDQH2q6Dz9woW
+olEwWL4mO9dQJt9r8aUVDnrcNli4A8eR8muUSOX5kX3SaSGbUfqfeMf2J2zWtbaIUY+n
E1JZL4b1wjAwBkH7MRSIe/W7wAZiwuDmLVbnXLBd1LiCjQ8iuQHVBURD76TXqDg48LirA
7iZPgDlkzC1PfllTkkF12tZ/Jg+5mXXd2ceeumc3giQaWenmqUay8rhQaIcAUSBA023c2
5IOYGSpfWjacJYl3S70/+8htKI189Sn4MZbBYFHJ6U87jDxluihsreSM5mllbdqpp5kaG
QZduODZO9ALffN9cUTmmLHLoueRyVIXizoQY0c7XIDxEMwkMGxtNS9Bt3R2zSs=

16
CONTRIBUTING.rst Normal file
View File

@ -0,0 +1,16 @@
If you would like to contribute to the development of OpenStack,
you must follow the steps in this page:
https://docs.openstack.org/infra/manual/developers.html
Once those steps have been completed, changes to OpenStack
should be submitted for review via the Gerrit tool, following
the workflow documented at:
https://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs should be filed in Launchpad:
https://bugs.launchpad.net/starlingx

17
HACKING.rst Normal file
View File

@ -0,0 +1,17 @@
StarlingX Rook-Ceph-Armada-App Style Commandments
================================================
- Step 1: Read the OpenStack style commandments
https://docs.openstack.org/hacking/latest/
- Step 2: Read on
Rook-Ceph-Armada-App Specific Commandments
-----------------------------------------
None so far
Running tests
-------------
The approach to running tests is to simply run the command ``tox``. This will
create virtual environments, populate them with dependencies and run all of
the tests that OpenStack CI systems run.

1
centos_build_layer.cfg Normal file
View File

@ -0,0 +1 @@
flock

1
centos_iso_image.inc Normal file
View File

@ -0,0 +1 @@
stx-rook-ceph

2
centos_pkg_dirs Normal file
View File

@ -0,0 +1,2 @@
stx-rook-ceph
python-k8sapp-rook

View File

@ -0,0 +1,3 @@
SRC_DIR="k8sapp_rook"
TIS_BASE_SRCREV=10c623509a68acad945d4e0c06a86b3e8486ad5b
TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -0,0 +1,56 @@
%global pypi_name k8sapp-rook
%global sname k8sapp_rook
Name: python-%{pypi_name}
Version: 1.0
Release: %{tis_patch_ver}%{?_tis_dist}
Summary: StarlingX sysinv extensions: Rook Ceph K8S app
License: Apache-2.0
Source0: %{name}-%{version}.tar.gz
BuildArch: noarch
BuildRequires: python-setuptools
BuildRequires: python-pbr
BuildRequires: python2-pip
BuildRequires: python2-wheel
%description
StarlingX sysinv extensions: Rook Ceph K8S app
%prep
%setup
# Remove bundled egg-info
rm -rf %{pypi_name}.egg-info
%build
export PBR_VERSION=%{version}
%{__python2} setup.py build
%py2_build_wheel
%install
export PBR_VERSION=%{version}.%{tis_patch_ver}
export SKIP_PIP_INSTALL=1
%{__python2} setup.py install --skip-build --root %{buildroot}
mkdir -p ${RPM_BUILD_ROOT}/plugins
install -m 644 dist/*.whl ${RPM_BUILD_ROOT}/plugins/
%files
%{python2_sitelib}/%{sname}
%{python2_sitelib}/%{sname}-*.egg-info
%package wheels
Summary: %{name} wheels
%description wheels
Contains python wheels for %{name}
%files wheels
/plugins/*
%changelog
* Tue Jun 2 2020 Martin Chen <haochuan.z.chen@intel.com>
- Initial version

View File

@ -0,0 +1,35 @@
# Compiled files
*.py[co]
*.a
*.o
*.so
# Sphinx
_build
doc/source/api/
# Packages/installer info
*.egg
*.egg-info
dist
build
eggs
parts
var
sdist
develop-eggs
.installed.cfg
# Other
*.DS_Store
.stestr
.testrepository
.tox
.venv
.*.swp
.coverage
bandit.xml
cover
AUTHORS
ChangeLog
*.sqlite

View File

@ -0,0 +1,4 @@
[DEFAULT]
test_path=./k8sapp_rook/tests
top_dir=./k8sapp_rook
#parallel_class=True

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2020 Intel Corporation, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,7 @@
k8sapp-rook-ceph
================
This project contains StarlingX Kubernetes application specific python plugins
for the rook ceph application. These plugins are required to
integrate the application into the StarlingX application framework and to
support the various StarlingX deployments.

View File

@ -0,0 +1,19 @@
#
# Copyright (c) 2020 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import yaml
class quoted_str(str):
pass
# force strings to be single-quoted to avoid interpretation as numeric values
def quoted_presenter(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
yaml.add_representer(quoted_str, quoted_presenter)

View File

@ -0,0 +1,36 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2020 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# All Rights Reserved.
#
""" System inventory Armada manifest operator."""
from k8sapp_rook.helm.rook_ceph import RookCephHelm
from k8sapp_rook.helm.rook_ceph_provisioner import RookCephProvisionerHelm
from k8sapp_rook.helm.rook_operator import RookOperatorHelm
from sysinv.common import constants
from sysinv.helm import manifest_generic as generic
class RookCephArmadaManifestOperator(generic.GenericArmadaManifestOperator):
APP = constants.HELM_APP_ROOK_CEPH
ARMADA_MANIFEST = 'rook-ceph-manifest'
CHART_GROUP_ROOK = 'starlingx-rook-charts'
CHART_GROUPS_LUT = {
RookOperatorHelm.CHART: CHART_GROUP_ROOK,
RookCephHelm.CHART: CHART_GROUP_ROOK,
RookCephProvisionerHelm: CHART_GROUP_ROOK,
}
CHARTS_LUT = {
RookOperatorHelm.CHART: 'kube-system-rook-operator',
RookCephHelm.CHART: 'kube-system-rook-ceph',
RookCephProvisionerHelm.CHART: 'kube-system-rook-ceph-provisioner',
}

View File

@ -0,0 +1,11 @@
#
# Copyright (c) 2020 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Helm: Supported charts:
# These values match the names in the chart package's Chart.yaml
HELM_CHART_ROOK_OPERATOR = 'rook-operator'
HELM_CHART_ROOK_CEPH = 'rook-ceph'
HELM_CHART_ROOK_CEPH_PROVISIONER = 'rook-ceph-provisioner'

View File

@ -0,0 +1,19 @@
#
# Copyright (c) 2020 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import yaml
class quoted_str(str):
pass
# force strings to be single-quoted to avoid interpretation as numeric values
def quoted_presenter(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
yaml.add_representer(quoted_str, quoted_presenter)

View File

@ -0,0 +1,115 @@
#
# Copyright (c) 2018 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from k8sapp_rook.common import constants as app_constants
import socket
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv.helm import base
from sysinv.helm import common
class RookCephHelm(base.BaseHelm):
"""Class to encapsulate helm operations for the rook-ceph chart"""
CHART = app_constants.HELM_CHART_ROOK_CEPH
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
[common.HELM_NS_STORAGE_PROVISIONER]
SUPPORTED_APP_NAMESPACES = {
constants.HELM_APP_ROOK_CEPH:
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_STORAGE_PROVISIONER],
}
def execute_manifest_updates(self, operator):
# On application load this chart is enabled. Only disable if specified
# by the user
if not self._is_enabled(operator.APP, self.CHART,
common.HELM_NS_STORAGE_PROVISIONER):
operator.chart_group_chart_delete(
operator.CHART_GROUPS_LUT[self.CHART],
operator.CHARTS_LUT[self.CHART])
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_STORAGE_PROVISIONER: {
'cluster': self._get_cluster_override(),
'hook': self._get_hook_override(),
}
}
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
elif namespace:
raise exception.InvalidHelmNamespace(chart=self.CHART,
namespace=namespace)
else:
return overrides
def _get_cluster_override(self):
cluster = {
'mon': {
'count': self._get_mon_count(),
},
}
return cluster
def _get_mon_count(self):
# change it with deployment configs:
# AIO simplex/duplex have 1 mon, multi-node has 3 mons,
# 2 controllers + first mon (and cannot reconfig)
if cutils.is_aio_system(self.dbapi):
return 1
else:
return 3
def _get_hook_override(self):
hook = {
'cleanup': {
'mon_hosts': self._get_mon_hosts(),
},
'duplexPreparation': self._get_duplex_preparation(),
}
return hook
def _get_mon_hosts(self):
ceph_mon_label = "ceph-mon-placement=enabled"
mon_hosts = []
hosts = self.dbapi.ihost_get_list()
for h in hosts:
labels = self.dbapi.label_get_by_host(h.uuid)
for label in labels:
if (ceph_mon_label == str(label.label_key) + '=' + str(label.label_value)):
mon_hosts.append(h.hostname.encode('utf8', 'strict'))
return mon_hosts
def _get_duplex_preparation(self):
duplex = {
'enable': cutils.is_aio_duplex_system(self.dbapi)
}
if cutils.is_aio_duplex_system(self.dbapi):
hosts = self.dbapi.ihost_get_by_personality(
constants.CONTROLLER)
for host in hosts:
if host['hostname'] == socket.gethostname():
duplex.update({'activeController': host['hostname'].encode('utf8', 'strict')})
pools = self.dbapi.address_pools_get_all()
for pool in pools:
if pool.name == 'management':
duplex.update({'floatIP': pool.floating_address})
return duplex

View File

@ -0,0 +1,161 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from k8sapp_rook.common import constants as app_constants
from kubernetes.client.rest import ApiException
from oslo_log import log as logging
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import kubernetes
from sysinv.common import utils
from sysinv.helm import base
from sysinv.helm import common
LOG = logging.getLogger(__name__)
class RookCephProvisionerHelm(base.BaseHelm):
"""Class to encapsulate helm operations for the rook-ceph-provisioner chart"""
CHART = app_constants.HELM_CHART_ROOK_CEPH_PROVISIONER
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
[common.HELM_NS_STORAGE_PROVISIONER]
SUPPORTED_APP_NAMESPACES = {
constants.HELM_APP_ROOK_CEPH:
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_STORAGE_PROVISIONER],
}
SERVICE_NAME = app_constants.HELM_CHART_ROOK_CEPH_PROVISIONER
def execute_manifest_updates(self, operator):
# On application load this chart is enabled. Only disable if specified
# by the user
if not self._is_enabled(operator.APP, self.CHART,
common.HELM_NS_STORAGE_PROVISIONER):
operator.chart_group_chart_delete(
operator.CHART_GROUPS_LUT[self.CHART],
operator.CHARTS_LUT[self.CHART])
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
base_name = 'ceph-pool'
secret_name = base_name + '-' + constants.CEPH_POOL_KUBE_NAME
if utils.is_aio_simplex_system(self.dbapi):
replica = 1
else:
replica = 2
audit = utils.is_aio_duplex_system(self.dbapi)
overrides = {
common.HELM_NS_STORAGE_PROVISIONER: {
"global": {
"job_ceph_mon_audit": audit,
},
"provisionStorage": {
"defaultStorageClass": constants.K8S_RBD_PROV_STOR_CLASS_NAME,
"classdefaults": {
"monitors": self._get_monitors(),
"adminId": constants.K8S_RBD_PROV_USER_NAME,
"adminSecretName": constants.K8S_RBD_PROV_ADMIN_SECRET_NAME,
},
"classes": {
"name": constants.K8S_RBD_PROV_STOR_CLASS_NAME,
"pool": {
"pool_name": constants.CEPH_POOL_KUBE_NAME,
"replication": replica,
"crush_rule_name": "storage_tier_ruleset",
"chunk_size": 64,
},
"secret": {
"userId": constants.CEPH_POOL_KUBE_NAME,
"userSecretName": secret_name,
}
},
},
"host_provision": {
"controller_hosts": self._get_controller_hosts(),
},
"ceph_mon_audit_jobs": self._get_ceph_mon_audit(),
}
}
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
elif namespace:
raise exception.InvalidHelmNamespace(chart=self.CHART,
namespace=namespace)
else:
return overrides
def _get_rook_mon_ip(self):
try:
kube = kubernetes.KubeOperator()
mon_ip_name = 'rook-ceph-mon-endpoints'
configmap = kube.kube_read_config_map(mon_ip_name,
common.HELM_NS_STORAGE_PROVISIONER)
if configmap is not None:
data = configmap.data['data']
LOG.info('rook configmap data is %s' % data)
mons = data.split(',')
lists = []
for mon in mons:
mon = mon.split('=')
lists.append(mon[1])
ip_str = ','.join(lists)
LOG.info('rook mon ip is %s' % ip_str)
return ip_str
except Exception as e:
LOG.error("Kubernetes exception in rook mon ip: %s" % e)
raise
return ''
def _is_rook_ceph(self):
try:
# check function getLabels in rook/pkg/operator/ceph/cluster/mon/spec.go
# rook will assign label "mon_cluster=kube-system" to monitor pods
label = "mon_cluster=" + common.HELM_NS_STORAGE_PROVISIONER
kube = kubernetes.KubeOperator()
pods = kube.kube_get_pods_by_selector(common.HELM_NS_STORAGE_PROVISIONER, label, "")
if len(pods) > 0:
return True
except ApiException as ae:
LOG.error("get monitor pod exception: %s" % ae)
except exception.SysinvException as se:
LOG.error("get sysinv exception: %s" % se)
return False
def _get_monitors(self):
if self._is_rook_ceph():
return self._get_rook_mon_ip()
else:
return ''
def _get_controller_hosts(self):
controller_hosts = []
hosts = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
for h in hosts:
controller_hosts.append(h.hostname.encode('utf8', 'strict'))
return controller_hosts
def _get_ceph_mon_audit(self):
audit = {}
if utils.is_aio_duplex_system(self.dbapi):
pools = self.dbapi.address_pools_get_all()
for pool in pools:
if pool.name == 'management':
audit.update({'floatIP': pool.floating_address})
return audit

View File

@ -0,0 +1,65 @@
#
# Copyright (c) 2021 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from k8sapp_rook.common import constants as app_constants
from sysinv.common import constants
from sysinv.common import exception
from sysinv.helm import base
from sysinv.helm import common
class RookOperatorHelm(base.BaseHelm):
"""Class to encapsulate helm operations for the rook-operator chart"""
CHART = app_constants.HELM_CHART_ROOK_OPERATOR
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
[common.HELM_NS_STORAGE_PROVISIONER]
SUPPORTED_APP_NAMESPACES = {
constants.HELM_APP_ROOK_CEPH:
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_STORAGE_PROVISIONER],
}
def execute_manifest_updates(self, operator):
# On application load this chart is enabled. Only disable if specified
# by the user
if not self._is_enabled(operator.APP, self.CHART,
common.HELM_NS_STORAGE_PROVISIONER):
operator.chart_group_chart_delete(
operator.CHART_GROUPS_LUT[self.CHART],
operator.CHARTS_LUT[self.CHART])
def get_namespaces(self):
return self.SUPPORTED_NAMESPACES
def get_overrides(self, namespace=None):
secrets = [{"name": "default-registry-key"}]
overrides = {
common.HELM_NS_STORAGE_PROVISIONER: {
'operator': self._get_operator_override(),
'imagePullSecrets': secrets,
}
}
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
elif namespace:
raise exception.InvalidHelmNamespace(chart=self.CHART,
namespace=namespace)
else:
return overrides
def _get_operator_override(self):
operator = {
'csi': {
'enableRbdDriver': True
},
'enableFlexDriver': False,
'logLevel': 'DEBUG',
}
return operator

View File

@ -0,0 +1,34 @@
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.common import constants
from sysinv.tests.db import base as dbbase
from sysinv.tests.helm.test_helm import HelmOperatorTestSuiteMixin
class K8SAppRookAppMixin(object):
app_name = constants.HELM_APP_ROOK_CEPH
path_name = app_name + '.tgz'
def setUp(self):
super(K8SAppRookAppMixin, self).setUp()
# Test Configuration:
# - Controller
# - IPv6
class K8SAppRookControllerTestCase(K8SAppRookAppMixin,
dbbase.BaseIPv6Mixin,
HelmOperatorTestSuiteMixin,
dbbase.ControllerHostTestCase):
pass
# Test Configuration:
# - AIO
# - IPv4
class K8SAppRookAIOTestCase(K8SAppRookAppMixin,
HelmOperatorTestSuiteMixin,
dbbase.AIOSimplexHostTestCase):
pass

View File

@ -0,0 +1,240 @@
[MASTER]
# Specify a configuration file.
rcfile=pylint.rc
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Add files or directories to the blacklist. Should be base names, not paths.
ignore=tests
# Pickle collected data for later comparisons.
persistent=yes
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# Use multiple processes to speed up Pylint.
jobs=4
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code
extension-pkg-whitelist=lxml.etree,greenlet
[MESSAGES CONTROL]
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time.
#enable=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once).
# See "Messages Control" section of
# https://pylint.readthedocs.io/en/latest/user_guide
# We are disabling (C)onvention
# We are disabling (R)efactor
# We are selectively disabling (W)arning
# We are not disabling (F)atal, (E)rror
disable=C, R
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html
output-format=text
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
# written in a file name "pylint_global.[txt|html]".
files-output=no
# Tells whether to display a full report or only the messages
reports=yes
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
[SIMILARITIES]
# Minimum lines number of a similarity.
min-similarity-lines=4
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=80
# Maximum number of lines in a module
max-module-lines=1000
# String used as indentation unit. This is usually 4 spaces or "\t" (1 tab).
indent-string=' '
[TYPECHECK]
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis
ignored-modules=distutils,eventlet.green.subprocess,six,six.moves
# List of classes names for which member attributes should not be checked
# (useful for classes with attributes dynamically set).
# pylint is confused by sqlalchemy Table, as well as sqlalchemy Enum types
# ie: (unprovisioned, identity)
# LookupDict in requests library confuses pylint
ignored-classes=SQLObject, optparse.Values, thread._local, _thread._local,
Table, unprovisioned, identity, LookupDict
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E0201 when accessed. Python regular
# expressions are accepted.
generated-members=REQUEST,acl_users,aq_parent
[BASIC]
# List of builtins function names that should not be used, separated by a comma
bad-functions=map,filter,apply,input
# Regular expression which should only match correct module names
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression which should only match correct module level names
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Regular expression which should only match correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$
# Regular expression which should only match correct function names
function-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct method names
method-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct argument names
argument-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct variable names
variable-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct list comprehension /
# generator expression variable names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
# Regular expression which should only match functions or classes name which do
# not require a docstring
no-docstring-rgx=__.*__
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,XXX,TODO
[VARIABLES]
# Tells whether we should check for unused import in __init__ files.
init-import=no
# A regular expression matching the beginning of the name of dummy variables
# (i.e. not used).
dummy-variables-rgx=_|dummy
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
[DESIGN]
# Maximum number of arguments for function / method
max-args=5
# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*
# Maximum number of locals for function / method body
max-locals=15
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of branch for function / method body
max-branchs=12
# Maximum number of statements in function / method body
max-statements=50
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,__new__,setUp
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception"
overgeneral-exceptions=Exception

View File

@ -0,0 +1,2 @@
pbr>=0.5
PyYAML>=3.10

View File

@ -0,0 +1,46 @@
[metadata]
name = k8sapp-rook
summary = StarlingX sysinv extensions for rook-ceph-apps
long_description = file: README.rst
long_description_content_type = text/x-rst
license = Apache 2.0
author = StarlingX
author-email = starlingx-discuss@lists.starlingx.io
home-page = https://www.starlingx.io/
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
Intended Audience :: System Administrators
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
[files]
packages =
k8sapp_rook
[global]
setup-hooks =
pbr.hooks.setup_hook
[entry_points]
systemconfig.helm_applications =
rook-ceph-apps = systemconfig.helm_plugins.rook_ceph_apps
systemconfig.helm_plugins.rook_ceph_apps =
001_rook-operator = k8sapp_rook.helm.rook_operator:RookOperatorHelm
002_rook-ceph = k8sapp_rook.helm.rook_ceph:RookCephHelm
003_rook-ceph-provisioner = k8sapp_rook.helm.rook_ceph_provisioner:RookCephProvisionerHelm
systemconfig.armada.manifest_ops =
rook-ceph-apps = k8sapp_rook.armada.manifest_rook_ceph:RookCephArmadaManifestOperator
[wheel]
universal = 1

View File

@ -0,0 +1,12 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import setuptools
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
pbr=True)

View File

@ -0,0 +1,27 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
hacking>=1.1.0,<=2.0.0 # Apache-2.0
bandit;python_version>="3.0"
coverage>=3.6
fixtures>=3.0.0 # Apache-2.0/BSD
mock>=2.0.0 # BSD
passlib>=1.7.0
psycopg2-binary
python-barbicanclient
python-subunit>=0.0.18
requests-mock>=0.6.0 # Apache-2.0
sphinx
oslosphinx
oslotest>=3.2.0 # Apache-2.0
stestr>=1.0.0 # Apache-2.0
testrepository>=0.0.18
testtools!=1.2.0,>=0.9.36
isort<5;python_version>="3.0"
pylint<2.1.0;python_version<"3.0" # GPLv2
pylint<2.4.0;python_version>="3.0" # GPLv2
pytest
pyudev
migrate
python-ldap>=3.1.0
markupsafe

View File

@ -0,0 +1,125 @@
[tox]
envlist = flake8,py27,py36,pylint,bandit
minversion = 1.6
# skipsdist = True
#,pip-missing-reqs
# tox does not work if the path to the workdir is too long, so move it to /tmp
toxworkdir = /tmp/{env:USER}_k8srooktox
stxdir = {toxinidir}/../../..
distshare={toxworkdir}/.tox/distshare
[testenv]
# usedevelop = True
# enabling usedevelop results in py27 develop-inst:
# Exception: Versioning for this project requires either an sdist tarball,
# or access to an upstream git repository.
# Note. site-packages is true and rpm-python must be yum installed on your dev machine.
sitepackages = True
# tox is silly... these need to be separated by a newline....
whitelist_externals = bash
find
install_command = pip install \
-v -v -v \
-c{toxinidir}/upper-constraints.txt \
-c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/stable/stein/upper-constraints.txt} \
{opts} {packages}
# Note the hash seed is set to 0 until can be tested with a
# random hash seed successfully.
setenv = VIRTUAL_ENV={envdir}
PYTHONHASHSEED=0
PYTHONDONTWRITEBYTECODE=1
OS_TEST_PATH=./k8sapp_rook/tests
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
SYSINV_TEST_ENV=True
TOX_WORK_DIR={toxworkdir}
PYLINTHOME={toxworkdir}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
-e{[tox]stxdir}/config/sysinv/sysinv/sysinv
-e{[tox]stxdir}/config/tsconfig/tsconfig
-e{[tox]stxdir}/fault/fm-api
-e{[tox]stxdir}/fault/python-fmclient/fmclient
-e{[tox]stxdir}/utilities/ceph/python-cephclient/python-cephclient
-e{[tox]stxdir}/update/cgcs-patch/cgcs-patch
commands =
find . -type f -name "*.pyc" -delete
[flake8]
exclude = build,dist,tools,.eggs
max-line-length=120
[testenv:flake8]
basepython = python3
deps = -r{toxinidir}/test-requirements.txt
commands =
flake8 {posargs} .
[testenv:py27]
basepython = python2.7
commands =
{[testenv]commands}
stestr run {posargs}
stestr slowest
[testenv:py36]
basepython = python3.6
commands =
{[testenv]commands}
stestr run {posargs}
stestr slowest
[testenv:pep8]
# testenv:flake8 clone
basepython = {[testenv:flake8]basepython}
deps = {[testenv:flake8]deps}
commands = {[testenv:flake8]commands}
[testenv:venv]
commands = {posargs}
[bandit]
exclude = tests
[testenv:bandit]
basepython = python3
deps = -r{toxinidir}/test-requirements.txt
commands = bandit --ini tox.ini -n 5 -r k8sapp_rook
[testenv:pylint]
basepython = python2.7
sitepackages = False
deps = {[testenv]deps}
commands =
pylint {posargs} k8sapp_rook --rcfile=./pylint.rc
[testenv:cover]
basepython = python2.7
deps = {[testenv]deps}
setenv = {[testenv]setenv}
PYTHON=coverage run --parallel-mode
commands =
{[testenv]commands}
coverage erase
stestr run {posargs}
coverage combine
coverage html -d cover
coverage xml -o cover/coverage.xml
coverage report
[testenv:pip-missing-reqs]
# do not install test-requirements as that will pollute the virtualenv for
# determining missing packages
# this also means that pip-missing-reqs must be installed separately, outside
# of the requirements.txt files
deps = pip_missing_reqs
-rrequirements.txt
commands=pip-missing-reqs -d --ignore-file=/k8sapp_rook/tests k8sapp_rook

View File

@ -0,0 +1 @@
# Override upstream constraints based on StarlingX load

1
requirements.txt Normal file
View File

@ -0,0 +1 @@
# Nothing

View File

@ -0,0 +1,6 @@
SRC_DIR="stx-rook-ceph"
COPY_LIST="files/*"
EXCLUDE_LIST_FROM_TAR=".stestr"
TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -0,0 +1,92 @@
# Application tunables (maps to metadata)
%global app_name rook-ceph-apps
%global helm_repo stx-platform
# Install location
%global app_folder /usr/local/share/applications/helm
# Build variables
%global helm_folder /usr/lib/helm
#%global toolkit_version 0.1.0
%global rook_version 1.2.7
Summary: StarlingX K8S application: Rook Ceph
Name: stx-rook-ceph
Version: 1.0
Release: %{tis_patch_ver}%{?_tis_dist}
License: Apache-2.0
Group: base
Packager: Intel
URL: unknown
Source0: %{name}-%{version}.tar.gz
Source1: rook-mon-exit.sh
BuildArch: noarch
BuildRequires: helm
BuildRequires: openstack-helm-infra
BuildRequires: chartmuseum
BuildRequires: python-k8sapp-rook
BuildRequires: python-k8sapp-rook-wheels
%description
The StarlingX K8S application for Rook Ceph
%prep
%setup
%build
# Host a server for the charts
chartmuseum --debug --port=8879 --context-path='/charts' --storage="local" --storage-local-rootdir="./helm-charts" &
sleep 2
helm repo add local http://localhost:8879/charts
# Make the charts. These produce a tgz file
cd helm-charts
make rook-operator
make rook-ceph
make rook-ceph-provisioner
cd -
# Terminate helm server (the last backgrounded task)
kill %1
# Create a chart tarball compliant with sysinv kube-app.py
%define app_staging %{_builddir}/staging
%define app_tarball %{app_name}-%{version}-%{tis_patch_ver}.tgz
# Setup staging
mkdir -p %{app_staging}
cp files/metadata.yaml %{app_staging}
cp manifests/manifest.yaml %{app_staging}
mkdir -p %{app_staging}/charts
cp helm-charts/*.tgz %{app_staging}/charts
cd %{app_staging}
# Populate metadata
sed -i 's/@APP_NAME@/%{app_name}/g' %{app_staging}/metadata.yaml
sed -i 's/@APP_VERSION@/%{version}-%{tis_patch_ver}/g' %{app_staging}/metadata.yaml
sed -i 's/@HELM_REPO@/%{helm_repo}/g' %{app_staging}/metadata.yaml
# Copy the plugins: installed in the buildroot
mkdir -p %{app_staging}/plugins
cp /plugins/*.whl %{app_staging}/plugins
# package it up
find . -type f ! -name '*.md5' -print0 | xargs -0 md5sum > checksum.md5
tar -zcf %{_builddir}/%{app_tarball} -C %{app_staging}/ .
# Cleanup staging
rm -fr %{app_staging}
%install
install -d -m 755 %{buildroot}/%{app_folder}
install -d -m 755 %{buildroot}%{_initrddir}
install -p -D -m 755 %{_builddir}/%{app_tarball} %{buildroot}/%{app_folder}
install -m 750 %{SOURCE1} %{buildroot}%{_initrddir}/rook-mon-exit
%files
%defattr(-,root,root,-)
%{app_folder}/*
%{_initrddir}/rook-mon-exit

View File

@ -0,0 +1,79 @@
#!/bin/bash
#
# Copyright (c) 2020 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
RETVAL=0
################################################################################
# Start Action
################################################################################
function start {
return
}
################################################################################
# Stop Action
################################################################################
function stop {
pgrep ceph-mon
if [ x"$?" = x"0" ]; then
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete \
deployments.apps -n kube-system rook-ceph-mon-a
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete po \
-n kube-system --selector="app=rook-ceph-mon,mon=a"
fi
pgrep ceph-osd
if [ x"$?" = x"0" ]; then
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete \
deployments.apps -n kube-system \
--selector="app=rook-ceph-osd,failure-domain=$(hostname)"
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete po \
--selector="app=rook-ceph-osd,failure-domain=$(hostname)" \
-n kube-system
fi
}
################################################################################
# Status Action
################################################################################
function status {
pgrep sysinv-api
RETVAL=$?
return
}
################################################################################
# Main Entry
################################################################################
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
status)
status
;;
*)
echo "usage: $0 { start | stop | status | restart }"
exit 1
;;
esac
exit $RETVAL

View File

@ -0,0 +1,6 @@
This directory contains all StarlingX charts that need to be built to support
platform integration immediately after installation. Some charts are common
across applications. These common charts reside in the
stx-config/kubernetes/helm-charts directory. To include these in this
application update the build_srpm.data file and use the COPY_LIST_TO_TAR
mechanism to populate these commom charts.

View File

@ -0,0 +1,4 @@
app_name: @APP_NAME@
app_version: @APP_VERSION@
helm_repo: @HELM_REPO@
helm_toolkit_required: false

View File

@ -0,0 +1,43 @@
#
# Copyright 2017 The Openstack-Helm Authors.
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# It's necessary to set this because some environments don't link sh -> bash.
SHELL := /bin/bash
TASK := build
EXCLUDES := helm-toolkit doc tests tools logs tmp
CHARTS := helm-toolkit $(filter-out $(EXCLUDES), $(patsubst %/.,%,$(wildcard */.)))
.PHONY: $(EXCLUDES) $(CHARTS)
all: $(CHARTS)
$(CHARTS):
@if [ -d $@ ]; then \
echo; \
echo "===== Processing [$@] chart ====="; \
make $(TASK)-$@; \
fi
init-%:
if [ -f $*/Makefile ]; then make -C $*; fi
if [ -f $*/requirements.yaml ]; then helm dep up $*; fi
lint-%: init-%
if [ -d $* ]; then helm lint $*; fi
build-%: lint-%
if [ -d $* ]; then helm package $*; fi
clean:
@echo "Clean all build artifacts"
rm -f */templates/_partials.tpl */templates/_globals.tpl
rm -f *tgz */charts/*tgz */requirements.lock
rm -rf */charts */tmpcharts
%:
@:

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,11 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
# Copyright (c) 2020 Intel, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Kubernetes
name: rook-ceph-provisioner
version: 0.1.0

View File

@ -0,0 +1,5 @@
dependencies:
# - name: rook-ceph
#repository: http://localhost:8879
#version: 0.1.0
#alias: rook-operator

View File

@ -0,0 +1,2 @@
The rook-ceph-provisioner has been installed.

View File

@ -0,0 +1,52 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.rbac }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.clusterRole }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["kube-dns"]
verbs: ["list", "get"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "list", "update", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create", "list", "update", "delete", "patch"]
- apiGroups: ["extensions", "apps"]
resources: ["deployments"]
verbs: ["get", "list", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "update", "delete"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "create", "list", "update"]
{{- end}}

View File

@ -0,0 +1,22 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.rbac }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.clusterRoleBinding }}
subjects:
- kind: ServiceAccount
name: {{ .Values.rbac.serviceAccount }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Values.rbac.clusterRole }}
apiGroup: rbac.authorization.k8s.io
{{- end}}

View File

@ -0,0 +1,22 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.global.configmap_key_init | quote }}
namespace: {{ .Release.Namespace }}
data:
provision.sh: |-
#!/bin/bash
if [ "${MON_HOST}"x == ""x ]; then
MON_HOST=$(echo ${ROOK_MONS} | sed 's/[a-z]\+=//g')
fi
cat > /etc/ceph/ceph.conf << EOF
[global]
mon_host = $MON_HOST
EOF
admin_keyring=$(echo $ADMIN_KEYRING | cut -f4 -d' ')
cat > /etc/ceph/ceph.client.admin.keyring << EOF
[client.admin]
key = $admin_keyring
EOF

View File

@ -0,0 +1,111 @@
{{/*
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.job_ceph_mon_audit }}
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-mon-audit-bin
namespace: {{ .Release.Namespace }}
data:
audit.sh: |-
#!/bin/bash
source /etc/build.info
node=$(hostname)
stat /opt/platform/.keyring/${SW_VERSION}/.CREDENTIAL > /dev/null 2>&1
if [ $? -ne 0 ]; then
if [ x"$node" = x"controller-0" ]; then
active="controller-1"
else
active="controller-0"
fi
else
active=$node
fi
controller_node=$(kubectl get pods -n kube-system --selector=app="rook-ceph-mon,ceph_daemon_id=a" -o wide | awk '/controller/ {print $7}')
if [ x"$active" = x"$controller_node" ]; then
echo "mon-a pod is running on active controler"
exit 0
fi
# update configmap
cat > endpoint.yaml << EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: rook-ceph-mon-endpoints
namespace: $NAMESPACE
data:
data: a=$FLOAT_IP:6789
mapping: '{"node":{"a":{"Name":"$active","Hostname":"$active","Address":"$FLOAT_IP"}}}'
maxMonId: "0"
EOF
kubectl apply -f endpoint.yaml --overwrite=true
rm -f endpoint.yaml
# delete mon-a deployment and pod
kubectl delete deployments.apps -n kube-system rook-ceph-mon-a
kubectl delete pods -n kube-system --selector="app=rook-ceph-mon,ceph_daemon_id=a"
kubectl delete po -n kube-system --selector="app=rook-ceph-operator"
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: stx-ceph-mon-audit
spec:
schedule: {{ .Values.ceph_mon_audit_jobs.audit.cron | quote }}
successfulJobsHistoryLimit: {{ .Values.ceph_mon_audit_jobs.audit.history.success }}
failedJobsHistoryLimit: {{ .Values.ceph_mon_audit_jobs.audit.history.failed }}
concurrencyPolicy: Forbid
jobTemplate:
metadata:
name: stx-ceph-mon-audit
namespace: {{ .Release.Namespace }}
labels:
app: ceph-mon-audit
spec:
template:
metadata:
labels:
app: ceph-mon-audit
spec:
serviceAccountName: {{ .Values.rbac.serviceAccount }}
restartPolicy: OnFailure
hostNetwork: true
{{- if .Values.global.nodeSelector }}
nodeSelector:
{{ .Values.global.nodeSelector | toYaml | trim | indent 10 }}
{{- end }}
volumes:
- name: ceph-mon-audit-bin
configMap:
name: ceph-mon-audit-bin
defaultMode: 0555
- name: platform
hostPath:
path: /opt/platform
containers:
- name: ceph-mon-audit
image: {{ .Values.images.tags.ceph_config_helper | quote }}
command: [ "/bin/bash", "/tmp/mount/audit.sh" ]
env:
- name: NAMESPACE
value: {{ .Release.Namespace }}
- name: FLOAT_IP
value: {{ .Values.ceph_mon_audit_jobs.floatIP }}
volumeMounts:
- name: platform
mountPath: /opt/platform
readOnly: true
- name: ceph-mon-audit-bin
mountPath: /tmp/mount
{{- end }}

View File

@ -0,0 +1,123 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.job_ceph_mgr_provision }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-mgr-provision-bin
namespace: {{ .Release.Namespace }}
data:
provision.sh: |-
#!/bin/bash
# Check if ceph is accessible
echo "===================================="
ceph -s
if [ $? -ne 0 ]; then
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
exit 1
fi
cat > /tmp/controller << EOF
[req]
req_extensions = v3_ca
distinguished_name = req_distinguished_name
[v3_ca]
subjectAltName= @alt_names
basicConstraints = CA:true
[req_distinguished_name]
0.organizationName = IT
commonName = ceph-restful
[alt_names]
DNS.1 = controller-0
DNS.2 = controller-1
EOF
openssl req -new -nodes -x509 -subj /O=IT/CN=controller -days 3650 -config /tmp/controller -out /tmp/controller.crt -keyout /tmp/controller.key -extensions v3_ca
for i in "a" "controller-0" "controller-1"
do
ceph config-key set mgr/restful/$i/crt -i /tmp/controller.crt
ceph config-key set mgr/restful/$i/key -i /tmp/controller.key
done
ceph config set mgr mgr/restful/server_port 7999
ceph mgr module disable restful
ceph mgr module enable restful
ceph restful create-key admin
rm -rf /tmp/conf /tmp/controller.crt /tmp/controller.key
---
apiVersion: batch/v1
kind: Job
metadata:
name: ceph-mgr-provision
namespace: {{ .Release.Namespace }}
labels:
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
chart: "{{.Chart.Name}}-{{.Chart.Version}}"
spec:
backoffLimit: 5 # Limit the number of job restart in case of failure: ~5 minutes.
template:
metadata:
name: ceph-mgr-provision
namespace: {{ .Release.Namespace }}
labels:
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
chart: "{{.Chart.Name}}-{{.Chart.Version}}"
spec:
restartPolicy: OnFailure
volumes:
- name: ceph-mgr-provision-bin
configMap:
name: ceph-mgr-provision-bin
- name: config-key-provision
configMap:
name: {{ .Values.global.configmap_key_init }}
- name: ceph-config
emptyDir: {}
initContainers:
- name: init
image: {{ .Values.images.tags.ceph_config_helper | quote }}
command: [ "/bin/bash", "/tmp/mount/provision.sh" ]
env:
- name: ADMIN_KEYRING
valueFrom:
secretKeyRef:
name: rook-ceph-admin-keyring
key: keyring
- name: ROOK_MONS
valueFrom:
configMapKeyRef:
name: rook-ceph-mon-endpoints
key: data
volumeMounts:
- mountPath: /etc/ceph
name: ceph-config
- name: config-key-provision
mountPath: /tmp/mount
containers:
- name: provision
image: {{ .Values.images.tags.ceph_config_helper | quote }}
command: [ "/bin/bash", "/tmp/mount/provision.sh" ]
env:
- name: NAMESPACE
value: {{ .Release.Namespace }}
volumeMounts:
- mountPath: /etc/ceph
name: ceph-config
- name: ceph-mgr-provision-bin
mountPath: /tmp/mount/
{{- if .Values.global.nodeSelector }}
nodeSelector:
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,66 @@
{{/*
#
# Copyright (c) 2020 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.job_host_provision }}
{{ $root := . }}
{{- range $controller_host := $root.Values.host_provision.controller_hosts }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: "rook-ceph-host-provision-{{ $controller_host }}"
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{ $root.Release.Service | quote }}
release: {{ $root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
annotations:
"helm.sh/hook": "post-install"
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
spec:
template:
metadata:
name: "rook-ceph-host-provision-{{ $controller_host }}"
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{ $root.Release.Service | quote }}
release: {{ $root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
spec:
restartPolicy: OnFailure
volumes:
- name: rook-conf
hostPath:
path: /etc/ceph/
- name: config-key-provision
configMap:
name: {{ $root.Values.global.configmap_key_init }}
containers:
- name: host-provision
image: {{ $root.Values.images.tags.ceph_config_helper | quote }}
command: [ "/bin/bash", "/tmp/mount/provision.sh" ]
env:
- name: ADMIN_KEYRING
valueFrom:
secretKeyRef:
name: rook-ceph-admin-keyring
key: keyring
- name: ROOK_MONS
valueFrom:
configMapKeyRef:
name: rook-ceph-mon-endpoints
key: data
volumeMounts:
- name: rook-conf
mountPath: /etc/ceph/
- name: config-key-provision
mountPath: /tmp/mount
nodeName: {{ $controller_host }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,190 @@
{{/*
#
# Copyright (c) 2020 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.provision_storage }}
{{ $root := . }}
{{ $defaults := .Values.provisionStorage.classdefaults}}
{{ $mount := "/tmp/mount" }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: config-rook-ceph-provisioner
namespace: {{ $root.Release.Namespace }}
data:
provision.sh: |-
#!/bin/bash
# Check if ceph is accessible
echo "===================================="
ceph -s
if [ $? -ne 0 ]; then
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
exit 1
fi
if [[ -z "${USER_ID}" && -z "${CEPH_USER_SECRET}" ]]; then
echo "No need to create secrets for pool ${POOL_NAME}"
exit 0
fi
set -ex
# Make sure the pool exists.
ceph osd pool stats ${POOL_NAME}
if [ $? -ne 0 ]; then
echo "Error: no pool for storge class"
exit 1
fi
ceph osd pool set ${POOL_NAME} size ${POOL_REPLICATION}
ceph osd pool set ${POOL_NAME} pg_num ${POOL_CHUNK_SIZE}
# Make sure crush rule exists.
ceph osd crush rule create-replicated ${POOL_CRUSH_RULE_NAME} default host
ceph osd pool set ${POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
if [ $? -ne 0 ]; then
echo "Error: set pool crush rule failed"
fi
set +ex
kubectl get configmap ceph-etc -n ${NAMESPACE} | grep ceph-etc
if [ $? ]; then
echo "Delete out-of-date configmap ceph-etc"
kubectl delete configmap -n kube-system ceph-etc
fi
kubectl create configmap ceph-etc --from-file=/etc/ceph/ceph.conf -n ${NAMESPACE}
if [ $? -ne 0 ]; then
echo "Error creating configmap ceph-etc, exit"
exit 1
fi
if [ -n "${CEPH_ADMIN_SECRET}" ]; then
kubectl get secret ${CEPH_ADMIN_SECRET} -n ${NAMESPACE} | grep ${CEPH_ADMIN_SECRET}
if [ $? ]; then
echo "Delete out-of-date ${CEPH_ADMIN_SECRET} secret"
kubectl delete secret -n kube-system ${CEPH_ADMIN_SECRET}
fi
echo "Create ${CEPH_ADMIN_SECRET} secret"
admin_keyring=$(echo $ADMIN_KEYRING | cut -f4 -d' ')
kubectl create secret generic ${CEPH_ADMIN_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$admin_keyring --namespace=${NAMESPACE}
if [ $? -ne 0 ]; then
echo "Error creating secret ${CEPH_ADMIN_SECRET}, exit"
exit 1
fi
fi
KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
if [ -n "${CEPH_USER_SECRET}" ]; then
kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null
if [ $? ]; then
echo "Delete out-of-date ${CEPH_USER_SECRET} secret"
kubectl delete secret -n kube-system ${CEPH_USER_SECRET}
fi
echo "Create ${CEPH_USER_SECRET} secret"
kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
if [ $? -ne 0 ]; then
echo"Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit"
exit 1
fi
fi
---
apiVersion: batch/v1
kind: Job
metadata:
name: "rook-ceph-provision"
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
annotations:
"helm.sh/hook": "post-install, pre-upgrade, pre-rollback"
"helm.sh/hook-delete-policy": "before-hook-creation"
spec:
backoffLimit: 5 # Limit the number of job restart in case of failure: ~5 minutes.
template:
metadata:
name: "rook-ceph-provision"
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
spec:
serviceAccountName: {{ $root.Values.rbac.serviceAccount }}
restartPolicy: OnFailure
volumes:
- name: config-volume-rook-ceph-provisioner
configMap:
name: config-rook-ceph-provisioner
- name: config-key-provision
configMap:
name: {{ .Values.global.configmap_key_init }}
- name: ceph-config
emptyDir: {}
initContainers:
- name: init
image: {{ $root.Values.images.tags.ceph_config_helper | quote }}
command: [ "/bin/bash", "{{ $mount }}/provision.sh" ]
env:
- name: MON_HOST
value: "{{ $defaults.monitors }}"
- name: ADMIN_KEYRING
valueFrom:
secretKeyRef:
name: rook-ceph-admin-keyring
key: keyring
- name: ROOK_MONS
valueFrom:
configMapKeyRef:
name: rook-ceph-mon-endpoints
key: data
volumeMounts:
- mountPath: /etc/ceph
name: ceph-config
- name: config-key-provision
mountPath: /tmp/mount
containers:
{{ $classConfig := $root.Values.provisionStorage.classes }}
- name: storage-init-{{- $classConfig.name }}
image: {{ $root.Values.images.tags.ceph_config_helper | quote }}
command: [ "/bin/bash", "{{ $mount }}/provision.sh" ]
env:
- name: NAMESPACE
value: {{ $root.Release.Namespace }}
- name: CEPH_ADMIN_SECRET
value: {{ $defaults.adminSecretName }}
- name: CEPH_USER_SECRET
value: {{ $classConfig.secret.userSecretName }}
- name: USER_ID
value: {{ $classConfig.secret.userId }}
- name: POOL_NAME
value: {{ $classConfig.pool.pool_name }}
- name: POOL_REPLICATION
value: {{ $classConfig.pool.replication | quote }}
- name: POOL_CRUSH_RULE_NAME
value: {{ $classConfig.pool.crush_rule_name | quote }}
- name: POOL_CHUNK_SIZE
value: {{ $classConfig.pool.chunk_size | quote }}
- name: ADMIN_KEYRING
valueFrom:
secretKeyRef:
name: rook-ceph-admin-keyring
key: keyring
volumeMounts:
- name: config-volume-rook-ceph-provisioner
mountPath: {{ $mount }}
- name: ceph-config
mountPath: /etc/ceph
readOnly: true
{{- if .Values.global.nodeSelector }}
nodeSelector:
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,69 @@
{{/*
#
# Copyright (c) 2020 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.job_cleanup }}
{{ $root := . }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: config-rook-provisioner-cleanup
namespace: {{ .Release.Namespace }}
data:
rook_clean_up.sh: |-
#!/bin/bash
kubectl delete configmap -n ${NAMESPACE} ceph-etc
kubectl delete secret -n kube-system ${CEPH_ADMIN_SECRET}
kubectl delete secret -n kube-system ${CEPH_USER_SECRET}
echo "rook ceph provisioner cleanup"
---
apiVersion: batch/v1
kind: Job
metadata:
name: rook-provisioner-cleanup
namespace: {{ .Release.Namespace }}
labels:
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
annotations:
"helm.sh/hook": "pre-delete"
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
spec:
template:
metadata:
name: rook-provisioner-cleanup
namespace: {{ .Release.Namespace }}
labels:
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
spec:
restartPolicy: OnFailure
serviceAccountName: {{ .Values.rbac.serviceAccount }}
volumes:
- name: config-rook-provisioner-cleanup
configMap:
name: config-rook-provisioner-cleanup
containers:
- name: rook-provisioner-cleanup
image: {{ .Values.images.tags.ceph_config_helper | quote }}
command: [ "/bin/bash", "/tmp/mount/rook_clean_up.sh" ]
env:
- name: NAMESPACE
value: {{ .Release.Namespace }}
- name: CEPH_ADMIN_SECRET
value: {{ .Values.provisionStorage.classdefaults.adminSecretName }}
- name: CEPH_USER_SECRET
value: {{ .Values.provisionStorage.classes.secret.userSecretName }}
volumeMounts:
- name: config-rook-provisioner-cleanup
mountPath: /tmp/mount
{{- end }}

View File

@ -0,0 +1,25 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.rbac }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ .Values.rbac.role }}
namespace: {{ .Release.Namespace }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "list", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "create", "list", "update"]
{{- end}}

View File

@ -0,0 +1,23 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.rbac }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Values.rbac.roleBinding }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Values.rbac.role }}
subjects:
- kind: ServiceAccount
name: {{ .Values.rbac.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- end}}

View File

@ -0,0 +1,17 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.rbac }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.rbac.serviceAccount }}
namespace: {{ .Release.Namespace }}
imagePullSecrets:
- name: default-registry-key
{{- end }}

View File

@ -0,0 +1,55 @@
{{- if .Values.global.provision_storage }}
{{ $namespace := .Release.Namespace }}
{{ $defaults := .Values.provisionStorage.classdefaults}}
{{ $provisioner := .Values.provisionStorage.provisioner_name }}
{{ $defaultSC := .Values.provisionStorage.defaultStorageClass }}
{{ $releaseGroup := .Values.release_group | default .Release.Name }}
{{ $element := .Values.provisionStorage.classes }}
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: {{ $element.pool.pool_name }}
namespace: {{ $namespace }}
spec:
failureDomain: host
replicated:
size: {{ $element.pool.replication }}
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: {{ $element.name }}
{{- if eq $defaultSC $element.name}}
annotations:
"storageclass.kubernetes.io/is-default-class": "true"
{{- end }}
labels:
release_group: {{ $releaseGroup }}
provisioner: {{ $provisioner }}
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: kube-system
# Ceph pool into which the RBD image shall be created
pool: {{ $element.pool.pool_name }}
# RBD image format. Defaults to "2".
imageFormat: "2"
# RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
imageFeatures: layering
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: kube-system
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: kube-system
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`.
csi.storage.k8s.io/fstype: ext4
# uncomment the following to use rbd-nbd as mounter on supported nodes
#mounter: rbd-nbd
reclaimPolicy: Delete
{{- end }}

View File

@ -0,0 +1,85 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
# Copyright (c) 2020 Intel Corporation, Inc
#
# SPDX-License-Identifier: Apache-2.0
#
#
# Global options.
# Defaults should be fine in most cases.
global:
configmap_key_init: ceph-key-init-bin
#
provision_storage: true
job_ceph_mgr_provision: true
job_ceph_mon_audit: false
job_host_provision: true
job_cleanup: true
# Defines whether to generate service account and role bindings.
rbac: true
# Node Selector
nodeSelector: { node-role.kubernetes.io/master: "" }
#
# RBAC options.
# Defaults should be fine in most cases.
rbac:
clusterRole: rook-ceph-provisioner
clusterRoleBinding: rook-ceph-provisioner
role: rook-ceph-provisioner
roleBinding: rook-ceph-provisioner
serviceAccount: rook-ceph-provisioner
images:
tags:
ceph_config_helper: docker.io/starlingx/ceph-config-helper:v1.15.0
provisionStorage:
# Defines the name of the provisioner associated with a set of storage classes
provisioner_name: kube-system.rbd.csi.ceph.com
# Enable this storage class as the system default storage class
defaultStorageClass: rook-ceph
# Configure storage classes.
# Defaults for storage classes. Update this if you have a single Ceph storage cluster.
# No need to add them to each class.
classdefaults:
# Define ip addresses of Ceph Monitors
monitors: 192.168.204.3:6789,192.168.204.4:6789,192.168.204.1:6789
# Ceph admin account
adminId: admin
# K8 secret name for the admin context
adminSecretName: ceph-secret
# Configure storage classes.
# This section should be tailored to your setup. It allows you to define multiple storage
# classes for the same cluster (e.g. if you have tiers of drives with different speeds).
# If you have multiple Ceph clusters take attributes from classdefaults and add them here.
classes:
name: rook-ceph # Name of storage class.
secret:
# K8 secret name with key for accessing the Ceph pool
userSecretName: ceph-secret-kube
# Ceph user name to access this pool
userId: kube
pool:
pool_name: kube
replication: 1
crush_rule_name: storage_tier_ruleset
chunk_size: 8
host_provision:
controller_hosts:
- controller-0
ceph_mon_audit_jobs:
floatIP: 192.168.204.2
audit:
cron: "*/3 * * * *"
history:
success: 1
failed: 1

View File

@ -0,0 +1,7 @@
apiVersion: v1
description: File, Block, and Object Storage Services for your Cloud-Native Environment
name: rook-ceph
version: 0.1.0
icon: https://rook.io/images/rook-logo.svg
sources:
- https://github.com/rook/rook

View File

@ -0,0 +1 @@
See the [Operator Helm Chart](/Documentation/helm-operator.md) documentation.

View File

@ -0,0 +1,106 @@
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: ceph-cluster
namespace: {{ .Release.Namespace }}
spec:
cephVersion:
image: "{{ .Values.cluster.image.repository }}:{{ .Values.cluster.image.tag }}"
allowUnsupported: false
dataDirHostPath: /var/lib/ceph
skipUpgradeChecks: false
continueUpgradeAfterChecksEvenIfNotHealthy: false
mon:
count: {{ .Values.cluster.mon.count }}
allowMultiplePerNode: {{ .Values.cluster.mon.allowMultiplePerNode }}
dashboard:
enabled: true
# urlPrefix: /ceph-dashboard
# port: 8443
ssl: true
monitoring:
enabled: false
rulesNamespace: {{ .Release.Namespace }}
network:
hostNetwork: {{ .Values.cluster.hostNetwork }}
rbdMirroring:
workers: 0
placement:
mon:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: ceph-mon-placement
operator: In
values:
- enabled
mgr:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: ceph-mgr-placement
operator: In
values:
- enabled
annotations:
resources:
removeOSDsIfOutAndSafeToRemove: false
storage: # cluster level storage configuration and selection
useAllNodes: false
useAllDevices: false
deviceFilter:
location:
{{ if .Values.cluster.storage.nodes }}
nodes:
{{- range $nodeConfig := .Values.cluster.storage.nodes }}
- name: {{ $nodeConfig.name }}
{{ if $nodeConfig.devices }}
devices:
{{- range $osdConfig := $nodeConfig.devices }}
- name: {{ $osdConfig.name }}
{{ if $osdConfig.config }}
config:
storeType: {{ $osdConfig.config.storeType }}
{{ if eq "$osdConfig.config.storeType" "bluestore" }}
metadataDevice: {{ $osdConfig.config.metadataDevice }}
databaseSizeMB: {{ $osdConfig.config.databaseSizeMB }}
{{ else if eq "$osdConfig.config.storeType" "filestore" }}
journalSizeMB: {{ $osdConfig.config.journalSizeMB }}
{{ end }}
{{ end }}
{{- end }}
{{ end }}
{{ if $nodeConfig.config }}
config:
storeType: {{ $nodeConfig.config.storeType }}
{{ if eq "$nodeConfig.config.storeType" "bluestore" }}
metadataDevice: {{ $nodeConfig.config.metadataDevice }}
databaseSizeMB: {{ $nodeConfig.config.databaseSizeMB }}
{{ else if eq "$nodeConfig.config.storeType" "filestore" }}
journalSizeMB: {{ $nodeConfig.config.journalSizeMB }}
{{ end }}
{{ end }}
{{ if $nodeConfig.directories }}
directories:
{{- range $directoriesConfig := $nodeConfig.directories }}
- path: {{ $directoriesConfig }}
{{- end }}
{{ end }}
{{- end }}
{{ end }}
config:
storeType: {{ .Values.cluster.storage.storeType }}
{{ if eq ".Values.cluster.storage.storeType" "bluestore" }}
metadataDevice: {{ .Values.cluster.storage.metadataDevice }}
databaseSizeMB: {{ .Values.cluster.storage.databaseSizeMB }}
{{ else if eq ".Values.cluster.storage.storeType" "filestore" }}
journalSizeMB: {{ .Values.cluster.storage.journalSizeMB }}
{{ end }}
osdsPerDevice: "1" # this value can be overridden at the node or device level
# encryptedDevice: "true" # the default value for this option is "false"
{{ if .Values.cluster.storage.dataPath }}
directories:
- path: "{{ .Values.cluster.storage.dataPath }}"
{{ end }}

View File

@ -0,0 +1,24 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{ if .Values.hook.cleanup.enable }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.hook.cleanup.rbac.clusterRole }}
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create", "list", "update", "delete"]
- apiGroups: ["extensions", "apps"]
resources: ["deployments"]
verbs: ["get", "create", "list", "update", "delete"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["get", "create", "list", "update", "delete"]
{{- end}}

View File

@ -0,0 +1,22 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{ if .Values.hook.cleanup.enable }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.hook.cleanup.rbac.clusterRoleBinding }}
subjects:
- kind: ServiceAccount
name: {{ .Values.hook.cleanup.rbac.serviceAccount }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Values.hook.cleanup.rbac.clusterRole }}
apiGroup: rbac.authorization.k8s.io
{{- end}}

View File

@ -0,0 +1,76 @@
{{/*
#
# Copyright (c) 2020 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.hook.cleanup.enable }}
{{ $root := . }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: config-rook-ceph-host-cleanup
namespace: {{ $root.Release.Namespace }}
annotations:
"helm.sh/hook": "post-delete"
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
data:
rook_host_cleanup.sh: |-
#!/bin/bash
rm -rf /var/lib/ceph/*
stat /var/lib/ceph/mon-a/ > /dev/null 2>&1
if [ x"$?" = x"0" ]; then
rm -rf /var/lib/ceph/mon-a/*
fi
exit 0
{{- range $mon_host := $root.Values.hook.cleanup.mon_hosts }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: "rook-ceph-cleanup-{{ $mon_host }}"
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{ $root.Release.Service | quote }}
release: {{ $root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
annotations:
"helm.sh/hook": "post-delete"
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
spec:
template:
metadata:
name: "rook-ceph-cleanup-{{ $mon_host }}"
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{ $root.Release.Service | quote }}
release: {{ $root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
spec:
restartPolicy: OnFailure
volumes:
- name: rook-data
hostPath:
path: /var/lib/ceph
- name: config-rook-ceph-host-cleanup
configMap:
name: config-rook-ceph-host-cleanup
containers:
- name: host-cleanup
image: {{ $root.Values.hook.image }}
command: [ "/bin/bash", "/tmp/mount/rook_host_cleanup.sh" ]
volumeMounts:
- name: rook-data
mountPath: /var/lib/ceph
- name: config-rook-ceph-host-cleanup
mountPath: /tmp/mount
nodeName: {{ $mon_host }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,72 @@
{{/*
#
# Copyright (c) 2020 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.hook.cleanup.enable }}
{{ $root := . }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: config-rook-ceph-cleanup
namespace: {{ $root.Release.Namespace }}
data:
rook_clean_up.sh: |-
#!/bin/bash
kubectl delete deployment -n ${NAMESPACE} --selector="app=rook-ceph-osd"
kubectl delete deployment -n ${NAMESPACE} --selector="app=rook-ceph-mgr"
kubectl delete deployment -n ${NAMESPACE} csi-cephfsplugin-provisioner
kubectl delete deployment -n ${NAMESPACE} csi-rbdplugin-provisioner
kubectl delete daemonsets.apps -n kube-system csi-cephfsplugin
kubectl delete daemonsets.apps -n kube-system csi-rbdplugin
kubectl delete configmap -n ${NAMESPACE} rook-ceph-csi-config
echo "rook ceph cluster cleanup"
---
apiVersion: batch/v1
kind: Job
metadata:
name: rook-ceph-cleanup
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
annotations:
"helm.sh/hook": "pre-delete"
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
spec:
template:
metadata:
name: rook-ceph-cleanup
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
spec:
restartPolicy: OnFailure
serviceAccountName: {{ $root.Values.hook.cleanup.rbac.serviceAccount }}
volumes:
- name: config-rook-ceph-cleanup
configMap:
name: config-rook-ceph-cleanup
containers:
- name: rook-cleanup
image: {{ .Values.hook.image }}
command: [ "/bin/bash", "/tmp/mount/rook_clean_up.sh" ]
env:
- name: NAMESPACE
value: {{ $root.Release.Namespace }}
volumeMounts:
- name: config-rook-ceph-cleanup
mountPath: /tmp/mount
{{- end }}

View File

@ -0,0 +1,82 @@
{{/*
#
# Copyright (c) 2020 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.hook.duplexPreparation.enable }}
{{ $root := . }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: config-rook-ceph-duplex-preparation
namespace: {{ $root.Release.Namespace }}
annotations:
"helm.sh/hook": "pre-install"
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
data:
rook_duplex_preparation.sh: |-
#!/bin/bash
cat > endpoint.yaml << EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: rook-ceph-mon-endpoints
namespace: $NAMESPACE
data:
data: a=$FLOAT_IP:6789
mapping: '{"node":{"a":{"Name":"$ACTIVE_CONTROLLER","Hostname":"$ACTIVE_CONTROLLER","Address":"$FLOAT_IP"}}}'
maxMonId: "0"
EOF
kubectl apply -f endpoint.yaml
rm -f endpoint.yaml
---
apiVersion: batch/v1
kind: Job
metadata:
name: rook-ceph-duplex-preparation
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
annotations:
"helm.sh/hook": "pre-install"
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
spec:
template:
metadata:
name: rook-ceph-duplex-preparation
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
spec:
serviceAccountName: rook-ceph-system
restartPolicy: OnFailure
volumes:
- name: config-rook-ceph-duplex-preparation
configMap:
name: config-rook-ceph-duplex-preparation
containers:
- name: duplex-preparation
image: {{ .Values.hook.image }}
command: [ "/bin/bash", "/tmp/mount/rook_duplex_preparation.sh" ]
env:
- name: NAMESPACE
value: {{ $root.Release.Namespace }}
- name: ACTIVE_CONTROLLER
value: {{ $root.Values.hook.duplexPreparation.activeController }}
- name: FLOAT_IP
value: {{ $root.Values.hook.duplexPreparation.floatIP }}
volumeMounts:
- name: config-rook-ceph-duplex-preparation
mountPath: /tmp/mount
{{- end }}

View File

@ -0,0 +1,25 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{ if .Values.hook.cleanup.enable }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ .Values.hook.cleanup.rbac.role }}
namespace: {{ .Release.Namespace }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "list", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "create", "list", "update"]
{{- end}}

View File

@ -0,0 +1,23 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{ if .Values.hook.cleanup.enable }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Values.hook.cleanup.rbac.roleBinding }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Values.hook.cleanup.rbac.role }}
subjects:
- kind: ServiceAccount
name: {{ .Values.hook.cleanup.rbac.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- end}}

View File

@ -0,0 +1,17 @@
{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{ if .Values.hook.cleanup.enable }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.hook.cleanup.rbac.serviceAccount }}
namespace: {{ .Release.Namespace }}
imagePullSecrets:
- name: default-registry-key
{{- end }}

View File

@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: rook-ceph-tools
namespace: kube-system
labels:
app: rook-ceph-tools
spec:
replicas: 1
selector:
matchLabels:
app: rook-ceph-tools
template:
metadata:
labels:
app: rook-ceph-tools
spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: rook-ceph-tools
image: "{{ .Values.toolbox.image.repository }}:{{ .Values.toolbox.image.tag }}"
command: ["/tini"]
args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
imagePullPolicy: IfNotPresent
env:
- name: ROOK_ADMIN_SECRET
valueFrom:
secretKeyRef:
name: rook-ceph-mon
key: admin-secret
volumeMounts:
- mountPath: /etc/ceph
name: ceph-config
- name: mon-endpoint-volume
mountPath: /etc/rook
volumes:
- name: mon-endpoint-volume
configMap:
name: rook-ceph-mon-endpoints
items:
- key: data
path: mon-endpoints
- name: ceph-config
emptyDir: {}
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 5

View File

@ -0,0 +1,52 @@
# Default values for ceph-cluster
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
cluster:
image:
repository: ceph/ceph
tag: v14.2.10
pullPolicy: IfNotPresent
# Tolerations for the ceph-cluster to allow it to run on nodes with particular taints
tolerations: []
mon:
count: 3
allowMultiplePerNode: false
hostNetwork: true
storage:
storeType: bluestore
databaseSizeMB: 1024
## Annotations to be added to pod
annotations: {}
## LogLevel can be set to: TRACE, DEBUG, INFO, NOTICE, WARNING, ERROR or CRITICAL
logLevel: INFO
# Writing to the hostPath is required for the Ceph mon and osd pods. Given the restricted permissions in OpenShift with SELinux,
# the pod must be running privileged in order to write to the hostPath volume, this must be set to true then.
hostpathRequiresPrivileged: false
# Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
toolbox:
image:
prefix: rook
repository: rook/ceph
tag: v1.2.7
pullPolicy: IfNotPresent
hook:
image: docker.io/starlingx/ceph-config-helper:v1.15.0
duplexPreparation:
enable: false
activeController: controller-0
floatIP: 192.188.204.2
cleanup:
enable: true
rbac:
clusterRole: rook-ceph-cleanup
clusterRoleBinding: rook-ceph-cleanup
role: rook-ceph-cleanup
roleBinding: rook-ceph-cleanup
serviceAccount: rook-ceph-cleanup
mon_hosts:
- controller-0

View File

@ -0,0 +1,7 @@
apiVersion: v1
description: File, Block, and Object Storage Services for your Cloud-Native Environment
name: rook-operator
version: 0.1.0
icon: https://rook.io/images/rook-logo.svg
sources:
- https://github.com/rook/rook

View File

@ -0,0 +1 @@
See the [Operator Helm Chart](/Documentation/helm-operator.md) documentation.

View File

@ -0,0 +1,20 @@
The Rook Operator has been installed. Check its status by running:
kubectl --namespace {{ .Release.Namespace }} get pods -l "app=rook-ceph-operator"
Visit https://rook.io/docs/rook/master for instructions on how to create and configure Rook clusters
Note: You cannot just create a CephCluster resource, you need to also create a namespace and
install suitable RBAC roles and role bindings for the cluster. The Rook Operator will not do
this for you. Sample CephCluster manifest templates that include RBAC resources are available:
- https://rook.github.io/docs/rook/master/ceph-quickstart.html
- https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/cluster.yaml
Important Notes:
- The links above are for the unreleased master version, if you deploy a different release you must find matching manifests.
- You must customise the 'CephCluster' resource at the bottom of the sample manifests to met your situation.
- Each CephCluster must be deployed to its own namespace, the samples use `rook-ceph` for the cluster.
- The sample manifests assume you also installed the rook-ceph operator in the `rook-ceph` namespace.
- The helm chart includes all the RBAC required to create a CephCluster CRD in the same namespace.
- Any disk devices you add to the cluster in the 'CephCluster' must be empty (no filesystem and no partitions).
- In the 'CephCluster' you must refer to disk devices by their '/dev/something' name, e.g. 'sdb' or 'xvde'.

View File

@ -0,0 +1,26 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "fullname" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Define imagePullSecrets option to pass to all service accounts
*/}}
{{- define "imagePullSecrets" }}
{{- if .Values.imagePullSecrets -}}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,525 @@
{{- if .Values.operator.rbacEnable }}
# The cluster role for managing all the cluster-specific resources in a namespace
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: rook-ceph-cluster-mgmt
labels:
operator: rook
storage-backend: ceph
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.rook.ceph.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
rules: []
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: rook-ceph-cluster-mgmt-rules
labels:
operator: rook
storage-backend: ceph
rbac.rook.ceph.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
rules:
- apiGroups:
- ""
resources:
- secrets
- pods
- pods/log
- services
- configmaps
verbs:
- get
- list
- watch
- patch
- create
- update
- delete
- apiGroups:
- apps
resources:
- deployments
- daemonsets
verbs:
- get
- list
- watch
- create
- update
- delete
---
# The cluster role for managing the Rook CRDs
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: rook-ceph-global
labels:
operator: rook
storage-backend: ceph
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
rules: []
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: rook-ceph-global-rules
labels:
operator: rook
storage-backend: ceph
rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
rules:
- apiGroups:
- ""
resources:
# Pod access is needed for fencing
- pods
# Node access is needed for determining nodes where mons should run
- nodes
- nodes/proxy
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
# PVs and PVCs are managed by the Rook provisioner
- persistentvolumes
- persistentvolumeclaims
- endpoints
verbs:
- get
- list
- watch
- patch
- create
- update
- delete
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- jobs
- cronjobs
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- ceph.rook.io
resources:
- "*"
verbs:
- "*"
- apiGroups:
- rook.io
resources:
- "*"
verbs:
- "*"
- apiGroups:
- policy
- apps
resources:
# This is for the clusterdisruption controller
- poddisruptionbudgets
# This is for both clusterdisruption and nodedrain controllers
- deployments
- replicasets
verbs:
- "*"
- apiGroups:
- healthchecking.openshift.io
resources:
- machinedisruptionbudgets
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- machine.openshift.io
resources:
- machines
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- storage.k8s.io
resources:
- csidrivers
verbs:
- create
---
# Aspects of ceph-mgr that require cluster-wide access
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr-cluster
labels:
operator: rook
storage-backend: ceph
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
rules: []
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr-cluster-rules
labels:
operator: rook
storage-backend: ceph
rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
rules:
- apiGroups:
- ""
resources:
- configmaps
- nodes
- nodes/proxy
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- list
- get
- watch
---
# Aspects of ceph-mgr that require access to the system namespace
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr-system
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
rules: []
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr-system-rules
labels:
rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-object-bucket
labels:
operator: rook
storage-backend: ceph
rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
rules:
- apiGroups:
- ""
verbs:
- "*"
resources:
- secrets
- configmaps
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- "objectbucket.io"
verbs:
- "*"
resources:
- "*"
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-osd
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
{{- if ((.Values.operator.agent) and .Values.operator.agent.mountSecurityMode) and ne .Values.operator.agent.mountSecurityMode "Any" }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: rook-ceph-agent-mount
labels:
operator: rook
storage-backend: ceph
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.ceph.rook.io/aggregate-to-rook-ceph-agent-mount: "true"
rules: []
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: rook-ceph-agent-mount-rules
labels:
operator: rook
storage-backend: ceph
rbac.ceph.rook.io/aggregate-to-rook-ceph-agent-mount: "true"
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
{{- end }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
rules: []
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin-rules
labels:
rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-external-provisioner-runner
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
rules: []
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-external-provisioner-runner-rules
labels:
rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
rules: []
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin-rules
labels:
rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-runner
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
rules: []
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-runner-rules
labels:
rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete", "get", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
{{- end }}
{{- if .Values.operator.pspEnable }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: rook-ceph-system-psp-user
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.ceph.rook.io/aggregate-to-rook-ceph-system-psp-user: "true"
rules: []
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: 'psp:rook'
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
rbac.ceph.rook.io/aggregate-to-rook-ceph-system-psp-user: "true"
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- 00-rook-ceph-operator
verbs:
- use
{{- end }}

View File

@ -0,0 +1,260 @@
{{- if .Values.operator.rbacEnable }}
# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-global
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-global
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }}
---
# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr-cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-mgr-cluster
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }}
---
# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-osd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-osd
subjects:
- kind: ServiceAccount
name: rook-ceph-osd
namespace: {{ .Release.Namespace }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-object-bucket
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-object-bucket
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-plugin-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: cephfs-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-provisioner-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: cephfs-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-provisioner-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: rbd-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
{{- if .Values.operator.pspEnable }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-ceph-system-psp
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-default-psp
namespace: {{ .Release.Namespace }}
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: default
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: rook-ceph-system-psp-users
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-system-psp-user
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-csi-cephfs-provisioner-sa-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-provisioner-sa
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-csi-cephfs-plugin-sa-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-plugin-sa
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-csi-rbd-plugin-sa-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-plugin-sa
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-csi-rbd-provisioner-sa-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-provisioner-sa
namespace: {{ .Release.Namespace }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-plugin-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: rbd-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-osd-psp
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-ceph-osd
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-mgr-psp
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-cmd-reporter-psp
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-ceph-cmd-reporter
namespace: {{ .Release.Namespace }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,282 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: config-sa-init
namespace: {{ .Release.Namespace }}
data:
check_sa_default.sh: |-
#!/bin/bash
kubectl describe sa default -n $NAMESPACE | grep "${IMAGE_PULL_SECRET}"
if [ $? -ne 0 ]; then
echo "add image pull secret to default account"
kubectl patch sa default -n $NAMESPACE --type=merge -p '{"imagePullSecrets": [{"name": "'${IMAGE_PULL_SECRET}'"}]}'
else
echo "${IMAGE_PULL_SECRET} already set to default service account"
fi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: rook-ceph-operator
namespace: {{ .Release.Namespace }}
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
spec:
replicas: 1
selector:
matchLabels:
app: rook-ceph-operator
template:
metadata:
labels:
app: rook-ceph-operator
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{- if .Values.operator.annotations }}
annotations:
{{ toYaml .Values.operator.annotations | indent 8 }}
{{- end }}
spec:
volumes:
- name: config-sa-volume-init
configMap:
name: config-sa-init
initContainers:
- name: rook-sa-init
image: "{{ .Values.saInit.images.tags.sa_init_provisioner }}"
imagePullPolicy: IfNotPresent
command: [ "/bin/bash", "/tmp/mount/check_sa_default.sh" ]
env:
- name: NAMESPACE
value: {{ .Release.Namespace }}
- name: IMAGE_PULL_SECRET
value: default-registry-key
volumeMounts:
- name: config-sa-volume-init
mountPath: /tmp/mount
containers:
- name: rook-ceph-operator
image: "{{ .Values.operator.image.repository }}:{{ .Values.operator.image.tag }}"
imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
args: ["ceph", "operator"]
env:
- name: ROOK_CURRENT_NAMESPACE_ONLY
value: {{ .Values.operator.currentNamespaceOnly | quote }}
{{- if not .Values.operator.rbacEnable }}
- name: RBAC_ENABLED
value: "false"
{{- end }}
{{- if .Values.operator.agent }}
{{- if .Values.operator.agent.toleration }}
- name: AGENT_TOLERATION
value: {{ .Values.operator.agent.toleration }}
{{- end }}
{{- if .Values.operator.agent.tolerationKey }}
- name: AGENT_TOLERATION_KEY
value: {{ .Values.operator.agent.tolerationKey }}
{{- end }}
{{- if .Values.operator.agent.tolerations }}
- name: AGENT_TOLERATIONS
value: {{ toYaml .Values.operator.agent.tolerations | quote }}
{{- end }}
{{- if .Values.operator.agent.nodeAffinity }}
- name: AGENT_NODE_AFFINITY
value: {{ .Values.operator.agent.nodeAffinity }}
{{- end }}
{{- if .Values.operator.agent.priorityClassName }}
- name: AGENT_PRIORITY_CLASS_NAME
value: {{ .Values.operator.agent.priorityClassName }}
{{- end }}
{{- if .Values.operator.agent.mountSecurityMode }}
- name: AGENT_MOUNT_SECURITY_MODE
value: {{ .Values.operator.agent.mountSecurityMode }}
{{- end }}
{{- if .Values.operator.agent.flexVolumeDirPath }}
- name: FLEXVOLUME_DIR_PATH
value: {{ .Values.operator.agent.flexVolumeDirPath }}
{{- end }}
{{- if .Values.operator.agent.libModulesDirPath }}
- name: LIB_MODULES_DIR_PATH
value: {{ .Values.operator.agent.libModulesDirPath }}
{{- end }}
{{- if .Values.operator.agent.mounts }}
- name: AGENT_MOUNTS
value: {{ .Values.operator.agent.mounts }}
{{- end }}
{{- end }}
{{- if .Values.operator.discover }}
{{- if .Values.operator.discover.toleration }}
- name: DISCOVER_TOLERATION
value: {{ .Values.operator.discover.toleration }}
{{- end }}
{{- if .Values.operator.discover.tolerationKey }}
- name: DISCOVER_TOLERATION_KEY
value: {{ .Values.operator.discover.tolerationKey }}
{{- end }}
{{- if .Values.operator.discover.tolerations }}
- name: DISCOVER_TOLERATIONS
value: {{ toYaml .Values.operator.discover.tolerations | quote }}
{{- end }}
{{- if .Values.operator.discover.priorityClassName }}
- name: DISCOVER_PRIORITY_CLASS_NAME
value: {{ .Values.operator.discover.priorityClassName }}
{{- end }}
{{- if .Values.operator.discover.nodeAffinity }}
- name: DISCOVER_AGENT_NODE_AFFINITY
value: {{ .Values.operator.discover.nodeAffinity }}
{{- end }}
{{- end }}
- name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
value: "{{ .Values.operator.hostpathRequiresPrivileged }}"
- name: ROOK_LOG_LEVEL
value: {{ .Values.operator.logLevel }}
- name: ROOK_ENABLE_SELINUX_RELABELING
value: "{{ .Values.operator.enableSelinuxRelabeling }}"
- name: ROOK_DISABLE_DEVICE_HOTPLUG
value: "{{ .Values.operator.disableDeviceHotplug }}"
{{- if .Values.operator.csi }}
- name: ROOK_CSI_ENABLE_RBD
value: {{ .Values.operator.csi.enableRbdDriver | quote }}
- name: ROOK_CSI_ENABLE_CEPHFS
value: {{ .Values.operator.csi.enableCephfsDriver | quote }}
- name: CSI_ENABLE_SNAPSHOTTER
value: {{ .Values.operator.csi.enableSnapshotter | quote }}
{{- if .Values.operator.csi.cephFSPluginUpdateStrategy }}
- name: CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY
value: {{ .Values.operator.csi.cephFSPluginUpdateStrategy | quote }}
{{- end }}
{{- if .Values.operator.csi.rbdPluginUpdateStrategy }}
- name: CSI_RBD_PLUGIN_UPDATE_STRATEGY
value: {{ .Values.operator.csi.rbdPluginUpdateStrategy | quote }}
{{- end }}
{{- if .Values.operator.csi.kubeletDirPath }}
- name: ROOK_CSI_KUBELET_DIR_PATH
value: {{ .Values.operator.csi.kubeletDirPath | quote }}
{{- end }}
- name: ROOK_CSI_ENABLE_GRPC_METRICS
value: {{ .Values.operator.csi.enableGrpcMetrics | quote }}
{{- if .Values.operator.csi.cephcsi }}
{{- if .Values.operator.csi.cephcsi.image }}
- name: ROOK_CSI_CEPH_IMAGE
value: {{ .Values.operator.csi.cephcsi.image | quote }}
{{- end }}
{{- end }}
{{- if .Values.operator.csi.registrar }}
{{- if .Values.operator.csi.registrar.image }}
- name: ROOK_CSI_REGISTRAR_IMAGE
value: {{ .Values.operator.csi.registrar.image | quote }}
{{- end }}
{{- end }}
{{- if .Values.operator.csi.provisioner }}
{{- if .Values.operator.csi.provisioner.image }}
- name: ROOK_CSI_PROVISIONER_IMAGE
value: {{ .Values.operator.csi.provisioner.image | quote }}
{{- end }}
{{- end }}
{{- if .Values.operator.csi.snapshotter }}
{{- if .Values.operator.csi.snapshotter.image }}
- name: ROOK_CSI_SNAPSHOTTER_IMAGE
value: {{ .Values.operator.csi.snapshotter.image | quote }}
{{- end }}
{{- end }}
{{- if .Values.operator.csi.attacher }}
{{- if .Values.operator.csi.attacher.image }}
- name: ROOK_CSI_ATTACHER_IMAGE
value: {{ .Values.operator.csi.attacher.image | quote }}
{{- end }}
{{- end }}
{{- if .Values.operator.csi.resizer }}
{{- if .Values.operator.csi.resizer.image }}
- name: ROOK_CSI_RESIZER_IMAGE
value: {{ .Values.operator.csi.resizer.image | quote }}
{{- end }}
{{- end }}
{{- if .Values.operator.csi.provisionerTolerations }}
- name: CSI_PROVISIONER_TOLERATIONS
value: {{ toYaml .Values.operator.csi.provisionerTolerations | quote }}
{{- end }}
{{- if .Values.operator.csi.provisionerNodeAffinity }}
- name: CSI_PROVISIONER_NODE_AFFINITY
value: {{ .Values.operator.csi.provisionerNodeAffinity }}
{{- end }}
{{- if .Values.operator.csi.pluginTolerations }}
- name: CSI_PLUGIN_TOLERATIONS
value: {{ toYaml .Values.operator.csi.pluginTolerations | quote }}
{{- end }}
{{- if .Values.operator.csi.pluginNodeAffinity }}
- name: CSI_PLUGIN_NODE_AFFINITY
value: {{ .Values.operator.csi.pluginNodeAffinity }}
{{- end }}
{{- if .Values.operator.csi.cephfsGrpcMetricsPort }}
- name: CSI_CEPHFS_GRPC_METRICS_PORT
value: {{ .Values.operator.csi.cephfsGrpcMetricsPort | quote }}
{{- end }}
{{- if .Values.operator.csi.cephfsLivenessMetricsPort }}
- name: CSI_CEPHFS_LIVENESS_METRICS_PORT
value: {{ .Values.operator.csi.cephfsLivenessMetricsPort | quote }}
{{- end }}
{{- if .Values.operator.csi.rbdGrpcMetricsPort }}
- name: CSI_RBD_GRPC_METRICS_PORT
value: {{ .Values.operator.csi.rbdGrpcMetricsPort | quote }}
{{- end }}
{{- if .Values.operator.csi.rbdLivenessMetricsPort }}
- name: CSI_RBD_LIVENESS_METRICS_PORT
value: {{ .Values.operator.csi.rbdLivenessMetricsPort | quote }}
{{- end }}
{{- if .Values.operator.csi.forceCephFSKernelClient }}
- name: CSI_FORCE_CEPHFS_KERNEL_CLIENT
value: {{ .Values.operator.csi.forceCephFSKernelClient | quote }}
{{- end }}
{{- end }}
- name: ROOK_ENABLE_FLEX_DRIVER
value: "{{ .Values.operator.enableFlexDriver }}"
- name: ROOK_ENABLE_DISCOVERY_DAEMON
value: "{{ .Values.operator.enableDiscoveryDaemon }}"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.operator.cephStatusCheckInterval }}
- name: ROOK_CEPH_STATUS_CHECK_INTERVAL
value: {{ .Values.operator.cephStatusCheckInterval }}
{{- end }}
{{- if .Values.operator.mon }}
{{- if .Values.operator.mon.healthCheckInterval }}
- name: ROOK_MON_HEALTHCHECK_INTERVAL
value: {{ .Values.operator.mon.healthCheckInterval }}
{{- end }}
{{- if .Values.operator.mon.monOutTimeout }}
- name: ROOK_MON_OUT_TIMEOUT
value: {{ .Values.operator.mon.monOutTimeout }}
{{- end }}
{{- end }}
{{- if .Values.operator.unreachableNodeTolerationSeconds }}
- name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS
value: {{ .Values.operator.unreachableNodeTolerationSeconds | quote }}
{{- end }}
resources:
{{ toYaml .Values.operator.resources | indent 10 }}
{{- if .Values.operator.useOperatorHostNetwork }}
hostNetwork: true
{{- end }}
{{- if .Values.operator.nodeSelector }}
nodeSelector:
{{ toYaml .Values.operator.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.operator.tolerations }}
tolerations:
{{ toYaml .Values.operator.tolerations | indent 8 }}
{{- end }}
{{- if .Values.operator.rbacEnable }}
serviceAccountName: rook-ceph-system
{{- end }}

View File

@ -0,0 +1,66 @@
{{/*
#
# Copyright (c) 2020 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.cleanup.enable }}
{{ $root := . }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: config-rook-cleanup
namespace: {{ $root.Release.Namespace }}
data:
rook_clean_up.sh: |-
#!/bin/bash
kubectl delete cronjob -n ${NAMESPACE} stx-ceph-mon-audit
kubectl delete cephclusters.ceph.rook.io ${CLUSTER_NAME} -n ${NAMESPACE}
echo "delete ceph cluster"
---
apiVersion: batch/v1
kind: Job
metadata:
name: rook-cleanup
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
annotations:
"helm.sh/hook": "pre-delete"
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
spec:
template:
metadata:
name: rook-pre-delete-cleanup
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
spec:
restartPolicy: OnFailure
serviceAccountName: rook-ceph-system
volumes:
- name: config-rook-cleanup
configMap:
name: config-rook-cleanup
containers:
- name: rook-cleanup
image: {{ .Values.saInit.images.tags.sa_init_provisioner }}
command: [ "/bin/bash", "/tmp/mount/rook_clean_up.sh" ]
env:
- name: NAMESPACE
value: {{ $root.Release.Namespace }}
- name: CLUSTER_NAME
value: {{ .Values.cleanup.cluster_cleanup }}
volumeMounts:
- name: config-rook-cleanup
mountPath: /tmp/mount
{{- end }}

View File

@ -0,0 +1,80 @@
{{- if .Values.pspEnable }}
# PSP for rook-ceph-operator
# Most of the teams follow the kubernetes docs and have these PSPs.
# * privileged (for kube-system namespace)
# * restricted (for all logged in users)
#
# If we name it as `rook-ceph-operator`, it comes next to `restricted` PSP alphabetically,
# and applies `restricted` capabilities to `rook-system`. Thats reason this is named with `00-rook-ceph-operator`,
# so it stays somewhere close to top and `rook-system` gets the intended PSP.
#
# More info on PSP ordering : https://kubernetes.io/docs/concepts/policy/pod-security-policy/#policy-order
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: 00-rook-ceph-operator
spec:
privileged: true
allowedCapabilities:
# required by CSI
- SYS_ADMIN
# fsGroup - the flexVolume agent has fsGroup capabilities and could potentially be any group
fsGroup:
rule: RunAsAny
# runAsUser, supplementalGroups - Rook needs to run some pods as root
# Ceph pods could be run as the Ceph user, but that user isn't always known ahead of time
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
# seLinux - seLinux context is unknown ahead of time; set if this is well-known
seLinux:
rule: RunAsAny
volumes:
# recommended minimum set
- configMap
- downwardAPI
- emptyDir
- persistentVolumeClaim
- secret
- projected
# required for Rook
- hostPath
- flexVolume
# allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known
# directory-based OSDs make this hard to nail down
# allowedHostPaths:
# - pathPrefix: "/run/udev" # for OSD prep
# readOnly: false
# - pathPrefix: "/dev" # for OSD prep
# readOnly: false
# - pathPrefix: "/var/lib/rook" # or whatever the dataDirHostPath value is set to
# readOnly: false
# Ceph requires host IPC for setting up encrypted devices
hostIPC: true
# Ceph OSDs need to share the same PID namespace
hostPID: true
# hostNetwork can be set to 'false' if host networking isn't used
hostNetwork: true
hostPorts:
# Ceph messenger protocol v1
- min: 6789
max: 6790 # <- support old default port
# Ceph messenger protocol v2
- min: 3300
max: 3300
# Ceph RADOS ports for OSDs, MDSes
- min: 6800
max: 7300
# # Ceph dashboard port HTTP (not recommended)
# - min: 7000
# max: 7000
# Ceph dashboard port HTTPS
- min: 8443
max: 8443
# Ceph mgr Prometheus Metrics
- min: 9283
max: 9283
{{- end }}

View File

@ -0,0 +1,499 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephclusters.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephCluster
listKind: CephClusterList
plural: cephclusters
singular: cephcluster
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
spec:
properties:
annotations: {}
cephVersion:
properties:
allowUnsupported:
type: boolean
image:
type: string
dashboard:
properties:
enabled:
type: boolean
urlPrefix:
type: string
port:
type: integer
minimum: 0
maximum: 65535
ssl:
type: boolean
dataDirHostPath:
pattern: ^/(\S+)
type: string
disruptionManagement:
properties:
machineDisruptionBudgetNamespace:
type: string
managePodBudgets:
type: boolean
osdMaintenanceTimeout:
type: integer
manageMachineDisruptionBudgets:
type: boolean
skipUpgradeChecks:
type: boolean
continueUpgradeAfterChecksEvenIfNotHealthy:
type: boolean
mon:
properties:
allowMultiplePerNode:
type: boolean
count:
maximum: 9
minimum: 0
type: integer
volumeClaimTemplate: {}
mgr:
properties:
modules:
items:
properties:
name:
type: string
enabled:
type: boolean
network:
properties:
hostNetwork:
type: boolean
provider:
type: string
selectors: {}
storage:
properties:
disruptionManagement:
properties:
machineDisruptionBudgetNamespace:
type: string
managePodBudgets:
type: boolean
osdMaintenanceTimeout:
type: integer
manageMachineDisruptionBudgets:
type: boolean
useAllNodes:
type: boolean
nodes:
items:
properties:
name:
type: string
config:
properties:
metadataDevice:
type: string
storeType:
type: string
pattern: ^(filestore|bluestore)$
databaseSizeMB:
type: string
walSizeMB:
type: string
journalSizeMB:
type: string
osdsPerDevice:
type: string
encryptedDevice:
type: string
pattern: ^(true|false)$
useAllDevices:
type: boolean
deviceFilter: {}
directories:
type: array
items:
properties:
path:
type: string
devices:
type: array
items:
properties:
name:
type: string
config: {}
resources: {}
type: array
useAllDevices:
type: boolean
deviceFilter: {}
directories:
type: array
items:
properties:
path:
type: string
config: {}
storageClassDeviceSets: {}
monitoring:
properties:
enabled:
type: boolean
rulesNamespace:
type: string
rbdMirroring:
properties:
workers:
type: integer
removeOSDsIfOutAndSafeToRemove:
type: boolean
external:
properties:
enable:
type: boolean
placement: {}
resources: {}
additionalPrinterColumns:
- name: DataDirHostPath
type: string
description: Directory used on the K8s nodes
JSONPath: .spec.dataDirHostPath
- name: MonCount
type: string
description: Number of MONs
JSONPath: .spec.mon.count
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
- name: State
type: string
description: Current State
JSONPath: .status.state
- name: Health
type: string
description: Ceph Health
JSONPath: .status.ceph.health
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephfilesystems.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephFilesystem
listKind: CephFilesystemList
plural: cephfilesystems
singular: cephfilesystem
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
spec:
properties:
metadataServer:
properties:
activeCount:
minimum: 1
maximum: 10
type: integer
activeStandby:
type: boolean
annotations: {}
placement: {}
resources: {}
metadataPool:
properties:
failureDomain:
type: string
replicated:
properties:
size:
minimum: 0
maximum: 10
type: integer
erasureCoded:
properties:
dataChunks:
minimum: 0
maximum: 10
type: integer
codingChunks:
minimum: 0
maximum: 10
type: integer
dataPools:
type: array
items:
properties:
failureDomain:
type: string
replicated:
properties:
size:
minimum: 0
maximum: 10
type: integer
erasureCoded:
properties:
dataChunks:
minimum: 0
maximum: 10
type: integer
codingChunks:
minimum: 0
maximum: 10
type: integer
preservePoolsOnDelete:
type: boolean
additionalPrinterColumns:
- name: ActiveMDS
type: string
description: Number of desired active MDS daemons
JSONPath: .spec.metadataServer.activeCount
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephnfses.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephNFS
listKind: CephNFSList
plural: cephnfses
singular: cephnfs
shortNames:
- nfs
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
spec:
properties:
rados:
properties:
pool:
type: string
namespace:
type: string
server:
properties:
active:
type: integer
annotations: {}
placement: {}
resources: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephobjectstores.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephObjectStore
listKind: CephObjectStoreList
plural: cephobjectstores
singular: cephobjectstore
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
spec:
properties:
gateway:
properties:
type:
type: string
sslCertificateRef: {}
port:
type: integer
securePort: {}
instances:
type: integer
annotations: {}
placement: {}
resources: {}
metadataPool:
properties:
failureDomain:
type: string
replicated:
properties:
size:
type: integer
erasureCoded:
properties:
dataChunks:
type: integer
codingChunks:
type: integer
dataPool:
properties:
failureDomain:
type: string
replicated:
properties:
size:
type: integer
erasureCoded:
properties:
dataChunks:
type: integer
codingChunks:
type: integer
preservePoolsOnDelete:
type: boolean
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephobjectstoreusers.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephObjectStoreUser
listKind: CephObjectStoreUserList
plural: cephobjectstoreusers
singular: cephobjectstoreuser
shortNames:
- rcou
- objectuser
scope: Namespaced
version: v1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephblockpools.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephBlockPool
listKind: CephBlockPoolList
plural: cephblockpools
singular: cephblockpool
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
spec:
properties:
failureDomain:
type: string
replicated:
properties:
size:
type: integer
minimum: 0
maximum: 9
targetSizeRatio:
type: number
erasureCoded:
properties:
dataChunks:
type: integer
minimum: 0
maximum: 9
codingChunks:
type: integer
minimum: 0
maximum: 9
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: volumes.rook.io
spec:
group: rook.io
names:
kind: Volume
listKind: VolumeList
plural: volumes
singular: volume
shortNames:
- rv
scope: Namespaced
version: v1alpha2
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: objectbuckets.objectbucket.io
spec:
group: objectbucket.io
versions:
- name: v1alpha1
served: true
storage: true
names:
kind: ObjectBucket
listKind: ObjectBucketList
plural: objectbuckets
singular: objectbucket
shortNames:
- ob
- obs
scope: Cluster
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: objectbucketclaims.objectbucket.io
spec:
versions:
- name: v1alpha1
served: true
storage: true
group: objectbucket.io
names:
kind: ObjectBucketClaim
listKind: ObjectBucketClaimList
plural: objectbucketclaims
singular: objectbucketclaim
shortNames:
- obc
- obcs
scope: Namespaced
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cephclients.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: CephClient
listKind: CephClientList
plural: cephclients
singular: cephclient
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
spec:
properties:
caps:
type: object

View File

@ -0,0 +1,133 @@
{{- if .Values.operator.rbacEnable }}
# The role for the operator to manage resources in its own namespace
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: rook-ceph-system
labels:
operator: rook
storage-backend: ceph
rules:
- apiGroups:
- ""
resources:
- pods
- configmaps
- services
- serviceaccounts
verbs:
- get
- list
- watch
- patch
- create
- update
- delete
- patch
- apiGroups:
- apps
resources:
- daemonsets
- statefulsets
- deployment
verbs:
- get
- list
- watch
- create
- update
- delete
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-osd
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: [ "get", "list", "watch", "create", "update", "delete" ]
- apiGroups: ["ceph.rook.io"]
resources: ["cephclusters", "cephclusters/finalizers"]
verbs: [ "get", "list", "create", "update", "delete" ]
---
# Aspects of ceph-mgr that operate within the cluster's namespace
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr
rules:
- apiGroups:
- ""
resources:
- pods
- services
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- ceph.rook.io
resources:
- "*"
verbs:
- "*"
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-cmd-reporter
rules:
- apiGroups:
- ""
resources:
- pods
- configmaps
verbs:
- get
- list
- watch
- create
- update
- delete
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-external-provisioner-cfg
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-cfg
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
{{- end }}

View File

@ -0,0 +1,121 @@
{{- if .Values.operator.rbacEnable }}
# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-system
namespace: {{ .Release.Namespace }}
labels:
operator: rook
storage-backend: ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-system
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }}
---
# Allow the operator to create resources in this cluster's namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-cluster-mgmt
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-cluster-mgmt
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }}
---
# Allow the osd pods in this namespace to work with configmaps
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-osd
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-osd
subjects:
- kind: ServiceAccount
name: rook-ceph-osd
namespace: {{ .Release.Namespace }}
---
# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-mgr
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }}
---
# Allow the ceph mgr to access the rook system resources necessary for the mgr modules
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-mgr-system
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-mgr-system
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }}
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-cmd-reporter
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-cmd-reporter
subjects:
- kind: ServiceAccount
name: rook-ceph-cmd-reporter
namespace: {{ .Release.Namespace }}
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-provisioner-role-cfg
namespace: {{ .Release.Namespace }}
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-provisioner-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: cephfs-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role-cfg
namespace: {{ .Release.Namespace }}
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-provisioner-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: rbd-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -0,0 +1,32 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: rook-config-override
namespace: {{ .Release.Namespace }}
data:
config: |
[global]
osd_journal_size = 1024
osd_pool_default_size = 1
osd_pool_default_min_size = 1
osd_pool_default_pg_num = 64
osd_pool_default_pgp_num = 64
osd_crush_chooseleaf_type = 1
setuser match path = /var/lib/ceph/$type/$cluster-$id
mon_osd_min_down_reporters = 1
osd_mon_report_interval_max = 120
mon_max_pg_per_osd = 2048
osd_max_pg_per_osd_hard_ratio = 1.2
ms_bind_ipv6 = false
[osd]
osd_mkfs_type = xfs
osd_mkfs_options_xfs = "-f"
osd_mount_options_xfs = "rw,noatime,inode64,logbufs=8,logbsize=256k"
[mon]
mon warn on legacy crush tunables = false
mon pg warn max per osd = 2048
mon pg warn max object skew = 0
mon clock drift allowed = .1
mon warn on pool no redundancy = false

View File

@ -0,0 +1,70 @@
# Service acccount for the operator
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-system
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{ template "imagePullSecrets" . }}
---
# Service account for the Ceph OSDs. Must exist and cannot be renamed.
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-osd
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{ template "imagePullSecrets" . }}
---
# Service account for the Ceph Mgr. Must exist and cannot be renamed.
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-mgr
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{ template "imagePullSecrets" . }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-cmd-reporter
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{ template "imagePullSecrets" . }}
---
# Service account for the cephfs csi driver
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-cephfs-plugin-sa
{{ template "imagePullSecrets" . }}
---
# Service account for the cephfs csi provisioner
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-cephfs-provisioner-sa
{{ template "imagePullSecrets" . }}
---
# Service account for the rbd csi driver
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-rbd-plugin-sa
{{ template "imagePullSecrets" . }}
---
# Service account for the rbd csi provisioner
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-rbd-provisioner-sa
{{ template "imagePullSecrets" . }}

View File

@ -0,0 +1,150 @@
# Default values for rook-ceph-operator
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
operator:
image:
prefix: rook
repository: rook/ceph
tag: v1.2.7
pullPolicy: IfNotPresent
resources:
limits:
cpu: 500m
memory: 256Mi
requests:
cpu: 100m
memory: 256Mi
# Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints
tolerations: []
# Delay to use in node.kubernetes.io/unreachable toleration
unreachableNodeTolerationSeconds: 5
# Whether rook watches its current namespace for CRDs or the entire cluster, defaults to false
currentNamespaceOnly: false
# Interval at which to get the ceph status and update the cluster custom resource status
cephStatusCheckInterval: "60s"
mon:
healthCheckInterval: "45s"
monOutTimeout: "600s"
## Annotations to be added to pod
annotations: {}
## LogLevel can be set to: TRACE, DEBUG, INFO, NOTICE, WARNING, ERROR or CRITICAL
logLevel: INFO
## If true, create & use RBAC resources
##
rbacEnable: true
## If true, create & use PSP resources
##
pspEnable: false
## Settings for whether to disable the drivers or other daemons if they are not
## needed
csi:
enableRbdDriver: true
enableCephfsDriver: true
enableGrpcMetrics: true
enableSnapshotter: true
# CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
# Default value is RollingUpdate.
#rbdPluginUpdateStrategy: OnDelete
# CSI Rbd plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
# Default value is RollingUpdate.
#cephFSPluginUpdateStrategy: OnDelete
# Set provisonerTolerations and provisionerNodeAffinity for provisioner pod.
# The CSI provisioner would be best to start on the same nodes as other ceph daemons.
# provisionerTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# provisionerNodeAffinity: key1=value1,value2; key2=value3
# Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
# The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# pluginTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# pluginNodeAffinity: key1=value1,value2; key2=value3
#cephfsGrpcMetricsPort: 9091
#cephfsLivenessMetricsPort: 9081
#rbdGrpcMetricsPort: 9090
# Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
# you may want to disable this setting. However, this will cause an issue during upgrades
# with the FUSE client. See the upgrade guide: https://rook.io/docs/rook/v1.2/ceph-upgrade.html
forceCephFSKernelClient: true
#rbdLivenessMetricsPort: 9080
kubeletDirPath: /var/lib/kubelet
cephcsi:
image: quay.io/cephcsi/cephcsi:v2.0.0
registrar:
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
provisioner:
image: quay.io/k8scsi/csi-provisioner:v1.4.0
snapshotter:
image: quay.io/k8scsi/csi-snapshotter:v1.2.2
attacher:
image: quay.io/k8scsi/csi-attacher:v2.1.0
resizer:
image: quay.io/k8scsi/csi-resizer:v0.4.0
enableFlexDriver: false
enableDiscoveryDaemon: true
## if true, run rook operator on the host network
## useOperatorHostNetwork: true
## Rook Agent configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
## tolerations: Array of tolerations in YAML format which will be added to agent deployment
## nodeAffinity: Set to labels of the node to match
## flexVolumeDirPath: The path where the Rook agent discovers the flex volume plugins
## libModulesDirPath: The path where the Rook agent can find kernel modules
# agent:
# toleration: NoSchedule
# tolerationKey: key
# tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# nodeAffinity: key1=value1,value2; key2=value3
# mountSecurityMode: Any
## For information on FlexVolume path, please refer to https://rook.io/docs/rook/master/flexvolume.html
# flexVolumeDirPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
# libModulesDirPath: /lib/modules
# mounts: mount1=/host/path:/container/path,/host/path2:/container/path2
## Rook Discover configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
## tolerations: Array of tolerations in YAML format which will be added to agent deployment
## nodeAffinity: Set to labels of the node to match
# discover:
# toleration: NoSchedule
# tolerationKey: key
# tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# nodeAffinity: key1=value1,value2; key2=value3
# In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
# Disable it here if you have similar issues.
# For more details see https://github.com/rook/rook/issues/2417
enableSelinuxRelabeling: true
# Writing to the hostPath is required for the Ceph mon and osd pods. Given the restricted permissions in OpenShift with SELinux,
# the pod must be running privileged in order to write to the hostPath volume, this must be set to true then.
hostpathRequiresPrivileged: false
# Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
# Blacklist certain disks according to the regex provided.
discoverDaemonUdev:
# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
imagePullSecrets:
- name: default-registry-key
saInit:
name: sa-init
images:
tags:
sa_init_provisioner: docker.io/starlingx/ceph-config-helper:v1.15.0
cleanup:
enable: true
cluster_cleanup: ceph-cluster

View File

@ -0,0 +1,99 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kube-system-rook-operator
data:
chart_name: rook-operator
release: rook-operator
namespace: kube-system
wait:
resources:
- type: pod
labels:
app: rook-ceph-operator
timeout: 1800
install:
no_hooks: false
upgrade:
no_hooks: false
source:
type: tar
location: http://172.17.0.1:8080/helm_charts/stx-platform/rook-operator-0.1.0.tgz
subpath: rook-operator
reference: master
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kube-system-rook-ceph
data:
chart_name: rook-ceph
release: rook-ceph
namespace: kube-system
wait:
resources:
- type: pod
labels:
app: rook-ceph-mgr
- type: pod
labels:
app: rook-ceph-mon
- type: pod
labels:
app: rook-ceph-osd
timeout: 1800
install:
no_hooks: false
upgrade:
no_hooks: false
source:
type: tar
location: http://172.17.0.1:8080/helm_charts/stx-platform/rook-ceph-0.1.0.tgz
subpath: rook-ceph
reference: master
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kube-system-rook-ceph-provisioner
data:
chart_name: rook-ceph-provisioner
release: rook-ceph-provisioner
namespace: kube-system
wait:
resources:
- type: job
labels:
release: stx-rook-ceph-provisioner
install:
no_hooks: false
source:
type: tar
location: http://172.17.0.1:8080/helm_charts/stx-platform/rook-ceph-provisioner-0.1.0.tgz
subpath: rook-ceph-provisioner
reference: master
dependencies: []
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: starlingx-rook-charts
data:
description: StarlingX Rook Ceph Charts
sequenced: true
chart_group:
- kube-system-rook-operator
- kube-system-rook-ceph
- kube-system-rook-ceph-provisioner
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: rook-ceph-manifest
data:
release_prefix: stx
chart_groups:
- starlingx-rook-charts

2
test-requirements.txt Normal file
View File

@ -0,0 +1,2 @@
# hacking pulls in flake8
bashate >= 0.2

50
tox.ini Normal file
View File

@ -0,0 +1,50 @@
[tox]
envlist = linters
minversion = 2.3
skipsdist = True
#sitepackages=False
[testenv]
install_command = pip install \
-v -v -v \
-c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/stable/stein/upper-constraints.txt} \
{opts} {packages}
setenv =
VIRTUAL_ENV={envdir}
OS_STDOUT_CAPTURE=1
OS_STDERR_CAPTURE=1
OS_DEBUG=1
OS_LOG_CAPTURE=1
deps =
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
whitelist_externals =
bash
[testenv:bashate]
# Treat all E* codes as Errors rather than warnings using: -e 'E*'
commands =
bash -c "find {toxinidir} \
-not \( -type d -name .?\* -prune \) \
-type f \
-not -name \*~ \
-not -name \*.md \
-name \*.sh \
-print0 | xargs -r -n 1 -0 bashate -v \
-e 'E*'"
[testenv:linters]
commands =
{[testenv:bashate]commands}
[testenv:flake8]
basepython = python3
description = Dummy environment to allow flake8 to be run in subdir tox
[testenv:pylint]
basepython = python3
description = Dummy environment to allow pylint to be run in subdir tox
[testenv:bandit]
basepython = python3
description = Dummy environment to allow bandit to be run in subdir tox