Adds init functionality, PSP and ArmadaManifestOperator

Manager container handles the initialization of vault and the auto
unseal operations if a vault pod is restarted. Also integrates the
podsecuritpolicy plugin and the ArmadaManifestOperator plugin

Story: 2007718
Task: 40326

Change-Id: Ibdd62197d95089b69035707c176788e8599121fd
Signed-off-by: Cole Walker <cole.walker@windriver.com>
This commit is contained in:
Cole Walker 2020-07-13 08:56:35 -04:00
parent 431f8e0874
commit 77715cf914
21 changed files with 1057 additions and 19 deletions

View File

@ -1,2 +1,3 @@
stx-vault-helm
vault-helm
python-k8sapp-vault

View File

@ -0,0 +1,19 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import yaml
class quoted_str(str):
pass
# force strings to be single-quoted to avoid interpretation as numeric values
def quoted_presenter(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
yaml.add_representer(quoted_str, quoted_presenter)

View File

@ -0,0 +1,38 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# All Rights Reserved.
#
""" System inventory Armada manifest operator."""
from k8sapp_vault.helm.ceph_pools_audit import PSPRolebindingHelm
from k8sapp_vault.helm.rbd_provisioner import VaultHelm
from sysinv.common import constants
from sysinv.helm import manifest_base as base
class VaultArmadaManifestOperator(base.ArmadaManifestOperator):
APP = constants.HELM_APP_VAULT
ARMADA_MANIFEST = 'armada-manifest'
CHART_GROUP_VAULT = 'vault'
CHART_GROUPS_LUT = {
VaultHelm.CHART: CHART_GROUP_VAULT
}
CHARTS_LUT = {
Vault.CHART: 'vault'
}
def platform_mode_manifest_updates(self, dbapi, mode):
""" Update the application manifest based on the platform
:param dbapi: DB api object
:param mode: mode to control how to apply the application manifest
"""
pass

View File

@ -4,7 +4,10 @@ TAR_NAME=helm-charts-vault-0-6-0
VERSION=1.0.0
TAR="$TAR_NAME.tar.gz"
COPY_LIST="${CGCS_BASE}/downloads/$TAR $PKG_BASE/$SRC_DIR/files/* $PKG_BASE/$SRC_DIR/manifests/*"
COPY_LIST_TO_TAR="\
$STX_BASE/helm-charts/psp-rolebinding/psp-rolebinding/helm-charts \
"
#COPY_LIST="${CGCS_BASE}/downloads/$TAR $PKG_BASE/$SRC_DIR/files/* $PKG_BASE/$SRC_DIR/manifests/* $PKG_BASE/$SRC_DIR/helm-charts/*"
TIS_PATCH_VER=0

View File

@ -21,23 +21,28 @@ Packager: Wind River <info@windriver.com>
URL: unknown
Source0: helm-charts-vault-0-6-0.tar.gz
Source1: repositories.yaml
Source2: index.yaml
Source3: Makefile
Source4: metadata.yaml
Source5: vault_manifest.yaml
#Source1: repositories.yaml
#Source2: index.yaml
#Source3: Makefile
#Source4: metadata.yaml
#Source5: vault_manifest.yaml
#Source6: vault-init.yaml
#Source7: vault-certificates.yaml
#Source8: _helpers-CA.tpl
BuildArch: noarch
BuildRequires: helm
BuildRequires: vault-helm
BuildRequires: python-k8sapp-vault
BuildRequires: python-k8sapp-vault-wheels
Requires: vault-helm
%description
StarlingX Vault Helm Charts
%prep
%setup -n helm-charts-vault
%setup -n helm-charts-vault-0-6-0-1.0.0
%build
# initialize helm and build the toolkit
@ -54,10 +59,10 @@ mkdir %{helm_home}/cache
mkdir %{helm_home}/cache/archive
# Stage a repository file that only has a local repo
cp %{SOURCE1} %{helm_home}/repository/repositories.yaml
cp files/repositories.yaml %{helm_home}/repository/repositories.yaml
# Stage a local repo index that can be updated by the build
cp %{SOURCE2} %{helm_home}/repository/local/index.yaml
cp files/index.yaml %{helm_home}/repository/local/index.yaml
# Host a server for the charts
helm serve --repo-path . &
@ -65,13 +70,16 @@ helm repo rm local
helm repo add local http://localhost:8879/charts
# Create the tgz file
cp %{SOURCE3} ./
mkdir ./vault
cp ./Chart.yaml ./vault
mv ./values.yaml ./vault
mv ./templates ./vault/templates
#cp %{SOURCE3} ./
#mkdir ./vault
#cp ./Chart.yaml ./vault
#mv ./values.yaml ./vault
#cp %{SOURCE6} ./templates
#cp %{SOURCE7} ./templates
#cat %{SOURCE8} >> ./templates/_helpers.tpl
#mv ./templates ./vault/templates
make vault
cd helm-charts
make psp-rolebinding
cd -
@ -84,10 +92,11 @@ kill %1
# Setup staging
mkdir -p %{app_staging}
cp %{SOURCE4} %{app_staging}
cp %{SOURCE5} %{app_staging}
cp files/metadata.yaml %{app_staging}
cp manifests/*.yaml %{app_staging}
mkdir -p %{app_staging}/charts
cp ./helm-charts-vault/*.tgz %{app_staging}/charts
cp helm-charts/*.tgz %{app_staging}/charts
cp %{helm_folder}/vault*.tgz %{app_staging}/charts
cd %{app_staging}
# Populate metadata

View File

@ -0,0 +1,11 @@
{{/*
Generate certificates for vault CA
*/}}
{{- define "vault.gen-certs" -}}
{{- $altNames := list ( printf "%s.%s" (include "vault.name" .) .Release.Namespace ) ( printf "%s.%s.svc" (include "vault.name" .) .Release.Namespace ) -}}
{{- $ca := genCA "vault-ca" 365 -}}
{{- $cert := genSignedCert ( include "vault.name" . ) nil $altNames 365 $ca -}}
tls.crt: {{ $ca.Cert | b64enc }}
tls.key: {{ $ca.Key | b64enc }}
{{- end -}}

View File

@ -0,0 +1,64 @@
apiVersion: v1
kind: Secret
type: kubernetes.io/tls
metadata:
name: {{ template "vault.name" . }}-ca
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "vault.name" . }}
chart: {{ template "vault.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
annotations:
"helm.sh/hook": "pre-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
data:
{{ ( include "vault.gen-certs" . ) | indent 2 }}
---
apiVersion: cert-manager.io/v1alpha2
kind: Issuer
metadata:
name: ca-issuer
namespace: {{ .Release.Namespace }}
spec:
ca:
secretName: {{ template "vault.name" . }}-ca
---
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: vault-server-tls
namespace: {{ .Release.Namespace }}
spec:
# Secret names are always required.
secretName: vault-server-tls
duration: 2160h # 90d
renewBefore: 360h # 15d
organization:
- stx
isCA: false
keySize: 2048
keyAlgorithm: rsa
keyEncoding: pkcs1
usages:
- server auth
- client auth
# At least one of a DNS Name, URI, or IP address is required.
dnsNames:
- sva-{{ template "vault.name" . }}
- '*.sva-{{ template "vault.name" . }}-internal'
- '*.{{ .Release.Namespace }}.pod.cluster.local'
- sva-{{ template "vault.name" . }}.{{ .Release.Namespace }}
- sva-{{ template "vault.name" . }}.{{ .Release.Namespace }}.svc
- sva-{{ template "vault.name" . }}.{{ .Release.Namespace }}.svc.cluster.local
ipAddresses:
- 127.0.0.1
# Issuer references are always required.
issuerRef:
name: ca-issuer
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer
# This is optional since cert-manager will default to this value however
# if you are using an external issuer, change this to that issuer group.
group: cert-manager.io

View File

@ -0,0 +1,217 @@
apiVersion: v1
data:
init.sh: |
#!/bin/bash
CERT=$CA_CERT # Get the CA path from environment vars
CA_ONELINE=$(awk '{printf "%s\\n", $0}' $CERT) # Store cert as a oneliner for curl purposes
DOMAIN={{ .Release.Namespace }}.pod.cluster.local # Set the domain for resolving pod names
WORKDIR=$PVCDIR # PVC location so that keys can be persisted
# FUNCTIONS
# Creates a list of all k8s vault pods and stores in text file.
# Converts ips from X.X.X.X to X-X-X-X for use as pod dns names
function getVaultPods {
kubectl get pods -n {{ .Release.Namespace }} -l component=server,app.kubernetes.io/name=vault -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.podIPs[].ip}{"\n"}{end}' > $WORKDIR/pods.txt
sed -i 's/\./-/g' $WORKDIR/pods.txt
}
# Wait for the vault servers in the stateful set to be created before initializing
function waitForPods {
CURRENT_PODS=$(kubectl get pods -l component=server,app.kubernetes.io/name=vault \
-o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.podIPs[].ip}{"\t"}{.status.phase}{"\n"} \
{end}' | grep Running | wc -l)
DESIRED_PODS={{ .Values.server.ha.replicas }}
while [ $CURRENT_PODS != $DESIRED_PODS ]; do
sleep 5
echo "Waiting for {{ template "vault.fullname" . }} statefulset running pods ($CURRENT_PODS) to equal desired pods ($DESIRED_PODS)"
CURRENT_PODS=$(kubectl get pods -l component=server,app.kubernetes.io/name=vault \
-o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.podIPs[].ip}{"\t"}{.status.phase}{"\n"} \
{end}' | grep Running | wc -l)
done
}
# Initializes the first vault pod, only needs to be performed once after deploying the helm chart
# Stores the root token and master key shards in plaintext in working directory as cluster_keys.json - insecure.
function initVault {
V0=$(awk 'NR==1{print $2}' $WORKDIR/pods.txt)
echo "Initializing $V0"
curl -s --cacert $CERT --request POST --data '{"secret_shares": 5, "secret_threshold": 3}' https://$V0.$DOMAIN:8200/v1/sys/init > $WORKDIR/cluster_keys.json
}
# Uses the master key shards in cluster_keys.json to unseal vault
function unsealVault {
for shard in $(cat $WORKDIR/cluster_keys.json | jq -r .keys_base64[]); do
echo {\"key\": \"$shard\"} | curl -s --cacert $CERT --request POST -d @- https://$VAULT.$DOMAIN:8200/v1/sys/unseal > /dev/null
sleep 3 #Some sleep is required to allow Raft convergence
done
}
# Takes the address of vault-0 as the cluster leader and joins other nodes to raft
function joinRaft {
CLUSTER_LEAD=$(awk 'NR==1{print $2}' $WORKDIR/pods.txt)
ROOT_TOKEN=$(cat $WORKDIR/cluster_keys.json | jq -r .root_token)
curl -s --cacert $CERT -H "X-Vault-Token: $ROOT_TOKEN" --request POST --data "{\"leader_api_addr\": \"https://$CLUSTER_LEAD.$DOMAIN:8200\", \"leader_ca_cert\": \"$CA_ONELINE\"}" https://$row.$DOMAIN:8200/v1/sys/storage/raft/join
}
# Simply calls the status check of a vault, used to check if it is initialized, unsealed, or part of raft cluster
function vaultServerStatus {
curl --cacert $CERT -s https://$row.$DOMAIN:8200/v1/sys/health | jq
}
#
# LOGIC
#
# Waiting for vault servers to come up
waitForPods
echo ""
echo "Putting a list of vault pods and ip in $WORKDIR/pods.txt"
getVaultPods
echo ""
row=$(awk 'NR==1{print $2}' $WORKDIR/pods.txt)
vaultServerStatus > $WORKDIR/healthcheck.txt
TEMP=$(cat $WORKDIR/healthcheck.txt | jq -r .initialized)
grep $row $WORKDIR/pods.txt & echo "Initialized status is $TEMP"
if [ ! -z $TEMP ] && [ $TEMP = false ]; then
echo "Initializing the vault on vault-0 and storing keys in $WORKDIR/cluster_keys.json"
initVault
sleep 10 #Some sleep required to allow convergence"
echo ""
echo "Unsealing vault-0 using the init shards"
for row in $(awk 'NR==1{print $2}' $WORKDIR/pods.txt); do
VAULT=$row
unsealVault
done
echo ""
echo "Joining other vault servers to the HA Raft cluster"
for row in $(awk 'NR>1{print $2}' $WORKDIR/pods.txt); do
grep $row $WORKDIR/pods.txt
joinRaft
sleep 5
done
echo ""
echo "Unsealing the remaining vaults"
for row in $(awk 'NR>1{print $2}' $WORKDIR/pods.txt); do
grep $row $WORKDIR/pods.txt
VAULT=$row
unsealVault
sleep 10
done
fi
# Loop forever to check the seal status of vaults and unseal if required
while true; do
sleep 5
echo "Checking vault pods seal status"
rm $WORKDIR/pods.txt
getVaultPods
for row in $(awk '{print $2}' $WORKDIR/pods.txt); do
vaultServerStatus > $WORKDIR/healthcheck.txt
TEMP=$(cat $WORKDIR/healthcheck.txt | jq -r .sealed)
grep $row $WORKDIR/pods.txt & echo "Sealed status is $TEMP"
if [ ! -z $TEMP ] && [ $TEMP = true ]; then
VAULT=$row
echo "Unsealing $row"
unsealVault
fi
done
done
kind: ConfigMap
metadata:
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:data:
.: {}
f:init.sh: {}
manager: vault-init-unseal
name: vault-init-unseal
namespace: {{ .Release.Namespace }}
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: manager-pvc
namespace: {{ .Release.Namespace }}
spec:
accessModes:
- ReadWriteOnce
storageClassName: general
resources:
requests:
storage: 1Gi
---
{{- if and (eq (.Values.injector.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") }}
# Deployment for the unsealer
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "vault.fullname" . }}-manager
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "vault.name" . }}-manager
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
component: webhook
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: {{ .Release.Name }}
component: webhook
template:
metadata:
labels:
app.kubernetes.io/name: {{ template "vault.name" . }}-manager
app.kubernetes.io/instance: {{ .Release.Name }}
component: webhook
spec:
serviceAccountName: "{{ template "vault.fullname" . }}"
containers:
- name: manager
image: cwalops/k8
imagePullPolicy: "{{ .Values.injector.image.pullPolicy }}"
args:
- bash
- /opt/script/init.sh
env:
- name: PVCDIR
value: /mnt/data
- name: CA_CERT
value: /mnt/data/ca/tls.crt
volumeMounts:
- name: vault-init-unseal
mountPath: /opt/script
readOnly: false
- name: manager-pvc
mountPath: /mnt/data
readOnly: false
- name: vault-ca
mountPath: /mnt/data/ca
readOnly: true
volumes:
- name: vault-init-unseal
configMap:
name: vault-init-unseal
- name: manager-pvc
persistentVolumeClaim:
claimName: manager-pvc
- name: vault-ca
secret:
secretName: vault-ca
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml .Values.global.imagePullSecrets | nindent 8 }}
{{- end }}
{{ end }}

View File

@ -1,5 +1,35 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: vault-psp-rolebinding
data:
chart_name: psp-rolebinding
release: vault-psp-rolebinding
namespace: vault
values:
rolebindingNamespace: vault
serviceAccount: vault
source:
location: http://172.17.0.1:8080/helm_charts/stx-platform/psp-rolebinding-0.1.0.tgz
subpath: psp-rolebinding
type: tar
reference: master
upgrade:
no_hooks: false
pre:
delete:
- labels:
release_group: vault-psp-rolebinding
type: job
wait:
labels:
release_group: vault-psp-rolebinding
resources: []
timeout: 1800
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: vault
@ -23,6 +53,7 @@ data:
values:
global:
enabled: true
tlsDisable: false
injector:
enabled: true
image:
@ -43,8 +74,30 @@ data:
replicas: 3
raft:
enabled: true
config: |
ui = true
listener "tcp" {
tls_disable = 0
address = "[::]:8200"
cluster_address = "[::]:8201"
tls_cert_file = "/vault/userconfig/vault-server-tls/tls.crt"
tls_key_file = "/vault/userconfig/vault-server-tls/tls.key"
tls_client_ca_file = "/vault/userconfig/vault-server-tls/ca.crt"
}
storage "raft" {
path = "/vault/data"
}
service_registration "kubernetes" {}
extraLabels:
app: vault
extraEnvironmentVars:
VAULT_CACERT: /vault/userconfig/vault-server-tls/ca.crt
extraVolumes:
- type: secret
name: vault-server-tls
source:
type: tar
location: http://172.17.0.1/helm_charts/stx-platform/vault-0.6.0.tgz
@ -61,12 +114,13 @@ data:
sequenced: false
chart_group:
- vault
- vault-psp-rolebinding
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: vault-manifest
data:
release_prefix: stx
release_prefix: sva
chart_groups:
- vault

View File

@ -0,0 +1,14 @@
SRC_DIR="vault-helm"
TAR_NAME=helm-charts-vault-0-6-0
VERSION=1.0.0
TAR="$TAR_NAME.tar.gz"
COPY_LIST="${CGCS_BASE}/downloads/$TAR $PKG_BASE/$SRC_DIR/files/* $PKG_BASE/$SRC_DIR/manifests/* $PKG_BASE/$SRC_DIR/helm-charts/*"
TIS_PATCH_VER=0
# Keep the SRCREV in sync with python-k8sapp-cert-manager so the app version is
# the same as the plugin version
#TIS_BASE_SRCREV=94d4c26f982e2e8c222517900c504580d1e3a09d
#TIS_PATCH_VER=GITREVCOUNT

View File

@ -0,0 +1,121 @@
# Application tunables (maps to metadata)
%global app_name vault
%global helm_repo stx-platform
%global armada_folder /usr/lib/armada
# Install location
%global app_folder /usr/local/share/applications/helm
# Build variables
%global helm_folder /usr/lib/helm
%global toolkit_version 0.1.0
Summary: StarlingX Vault Armada Helm Charts
Name: vault-helm
Version: 1.0
Release: %{tis_patch_ver}%{?_tis_dist}
License: Apache-2.0
Group: base
Packager: Wind River <info@windriver.com>
URL: unknown
Source0: helm-charts-vault-0-6-0.tar.gz
Source1: repositories.yaml
Source2: index.yaml
Source3: Makefile
Source4: metadata.yaml
Source5: vault_manifest.yaml
Source6: vault-init.yaml
Source7: vault-certificates.yaml
Source8: _helpers-CA.tpl
BuildArch: noarch
BuildRequires: helm
%description
StarlingX Vault Helm Charts
%prep
%setup -n helm-charts-vault
%build
# initialize helm and build the toolkit
# helm init --client-only does not work if there is no networking
# The following commands do essentially the same as: helm init
%define helm_home %{getenv:HOME}/.helm
mkdir %{helm_home}
mkdir %{helm_home}/repository
mkdir %{helm_home}/repository/cache
mkdir %{helm_home}/repository/local
mkdir %{helm_home}/plugins
mkdir %{helm_home}/starters
mkdir %{helm_home}/cache
mkdir %{helm_home}/cache/archive
# Stage a repository file that only has a local repo
cp %{SOURCE1} %{helm_home}/repository/repositories.yaml
# Stage a local repo index that can be updated by the build
cp %{SOURCE2} %{helm_home}/repository/local/index.yaml
# Host a server for the charts
helm serve --repo-path . &
helm repo rm local
helm repo add local http://localhost:8879/charts
# Create the tgz file
cp %{SOURCE3} ./
mkdir ./vault
cp ./Chart.yaml ./vault
mv ./values.yaml ./vault
cp %{SOURCE6} ./templates
cp %{SOURCE7} ./templates
cat %{SOURCE8} >> ./templates/_helpers.tpl
mv ./templates ./vault/templates
make vault
cd -
# Terminate helm server (the last backgrounded task)
kill %1
# Create a chart tarball compliant with sysinv kube-app.py
#%define app_staging %{_builddir}/staging
#%define app_tarball %{app_name}-%{version}-%{tis_patch_ver}.tgz
# Setup staging
mkdir -p %{app_staging}
cp %{SOURCE4} %{app_staging}
cp %{SOURCE5} %{app_staging}
mkdir -p %{app_staging}/charts
cp ./helm-charts-vault/*.tgz %{app_staging}/charts
cd %{app_staging}
# Populate metadata
#sed -i 's/@APP_NAME@/%{app_name}/g' %{app_staging}/metadata.yaml
#sed -i 's/@APP_VERSION@/%{version}-%{tis_patch_ver}/g' %{app_staging}/metadata.yaml
#sed -i 's/@HELM_REPO@/%{helm_repo}/g' %{app_staging}/metadata.yaml
# Copy the plugins: installed in the buildroot
#mkdir -p %{app_staging}/plugins
#cp /plugins/%{app_name}/*.whl %{app_staging}/plugins
# package it up
find . -type f ! -name '*.md5' -print0 | xargs -0 md5sum > checksum.md5
tar -zcf %{_builddir}/%{app_tarball} -C %{app_staging}/ .
# Cleanup staging
#rm -fr %{app_staging}
%install
install -d -m 755 ${RPM_BUILD_ROOT}%{helm_folder}
install -p -D -m 755 %{app_staging}/charts/*.tgz ${RPM_BUILD_ROOT}%{helm_folder}
%files
%defattr(-,root,root,-)
%{helm_folder}/*

View File

@ -0,0 +1,5 @@
This directory contains all StarlingX charts that need to be built for this
application. Some charts are common across applications. These common charts
reside in the stx-config/kubernetes/helm-charts directory. To include these in
this application update the build_srpm.data file and use the COPY_LIST_TO_TAR
mechanism to populate these common charts.

View File

@ -0,0 +1,43 @@
#
# Copyright 2017 The Openstack-Helm Authors.
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# It's necessary to set this because some environments don't link sh -> bash.
SHELL := /bin/bash
TASK := build
EXCLUDES := helm-toolkit doc tests tools logs tmp
CHARTS := helm-toolkit $(filter-out $(EXCLUDES), $(patsubst %/.,%,$(wildcard */.)))
.PHONY: $(EXCLUDES) $(CHARTS)
all: $(CHARTS)
$(CHARTS):
@if [ -d $@ ]; then \
echo; \
echo "===== Processing [$@] chart ====="; \
make $(TASK)-$@; \
fi
init-%:
if [ -f $*/Makefile ]; then make -C $*; fi
if [ -f $*/requirements.yaml ]; then helm dep up $*; fi
lint-%: init-%
if [ -d $* ]; then helm lint $*; fi
build-%:
if [ -d $* ]; then helm package $*; fi
clean:
@echo "Clean all build artifacts"
rm -f */templates/_partials.tpl */templates/_globals.tpl
rm -f *tgz */charts/*tgz */requirements.lock
rm -rf */charts */tmpcharts
%:
@:

View File

@ -0,0 +1,3 @@
apiVersion: v1
entries: {}
generated: 2019-01-07T12:33:46.098166523-06:00

View File

@ -0,0 +1,6 @@
maintain_user_overrides: true
app_name: @APP_NAME@
app_version: @APP_VERSION@
helm_repo: @HELM_REPO@

View File

@ -0,0 +1,12 @@
apiVersion: v1
generated: 2019-01-02T15:19:36.215111369-06:00
repositories:
- caFile: ""
cache: /builddir/.helm/repository/cache/local-index.yaml
certFile: ""
keyFile: ""
name: local
password: ""
url: http://127.0.0.1:8879/charts
username: ""

View File

@ -0,0 +1,11 @@
{{/*
Generate certificates for vault CA
*/}}
{{- define "vault.gen-certs" -}}
{{- $altNames := list ( printf "%s.%s" (include "vault.name" .) .Release.Namespace ) ( printf "%s.%s.svc" (include "vault.name" .) .Release.Namespace ) -}}
{{- $ca := genCA "vault-ca" 365 -}}
{{- $cert := genSignedCert ( include "vault.name" . ) nil $altNames 365 $ca -}}
tls.crt: {{ $ca.Cert | b64enc }}
tls.key: {{ $ca.Key | b64enc }}
{{- end -}}

View File

@ -0,0 +1,64 @@
apiVersion: v1
kind: Secret
type: kubernetes.io/tls
metadata:
name: {{ template "vault.name" . }}-ca
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "vault.name" . }}
chart: {{ template "vault.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
annotations:
"helm.sh/hook": "pre-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
data:
{{ ( include "vault.gen-certs" . ) | indent 2 }}
---
apiVersion: cert-manager.io/v1alpha2
kind: Issuer
metadata:
name: ca-issuer
namespace: {{ .Release.Namespace }}
spec:
ca:
secretName: {{ template "vault.name" . }}-ca
---
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: vault-server-tls
namespace: {{ .Release.Namespace }}
spec:
# Secret names are always required.
secretName: vault-server-tls
duration: 2160h # 90d
renewBefore: 360h # 15d
organization:
- stx
isCA: false
keySize: 2048
keyAlgorithm: rsa
keyEncoding: pkcs1
usages:
- server auth
- client auth
# At least one of a DNS Name, URI, or IP address is required.
dnsNames:
- sva-{{ template "vault.name" . }}
- '*.sva-{{ template "vault.name" . }}-internal'
- '*.{{ .Release.Namespace }}.pod.cluster.local'
- sva-{{ template "vault.name" . }}.{{ .Release.Namespace }}
- sva-{{ template "vault.name" . }}.{{ .Release.Namespace }}.svc
- sva-{{ template "vault.name" . }}.{{ .Release.Namespace }}.svc.cluster.local
ipAddresses:
- 127.0.0.1
# Issuer references are always required.
issuerRef:
name: ca-issuer
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer
# This is optional since cert-manager will default to this value however
# if you are using an external issuer, change this to that issuer group.
group: cert-manager.io

View File

@ -0,0 +1,217 @@
apiVersion: v1
data:
init.sh: |
#!/bin/bash
CERT=$CA_CERT # Get the CA path from environment vars
CA_ONELINE=$(awk '{printf "%s\\n", $0}' $CERT) # Store cert as a oneliner for curl purposes
DOMAIN={{ .Release.Namespace }}.pod.cluster.local # Set the domain for resolving pod names
WORKDIR=$PVCDIR # PVC location so that keys can be persisted
# FUNCTIONS
# Creates a list of all k8s vault pods and stores in text file.
# Converts ips from X.X.X.X to X-X-X-X for use as pod dns names
function getVaultPods {
kubectl get pods -n {{ .Release.Namespace }} -l component=server,app.kubernetes.io/name=vault -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.podIPs[].ip}{"\n"}{end}' > $WORKDIR/pods.txt
sed -i 's/\./-/g' $WORKDIR/pods.txt
}
# Wait for the vault servers in the stateful set to be created before initializing
function waitForPods {
CURRENT_PODS=$(kubectl get pods -l component=server,app.kubernetes.io/name=vault \
-o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.podIPs[].ip}{"\t"}{.status.phase}{"\n"} \
{end}' | grep Running | wc -l)
DESIRED_PODS={{ .Values.server.ha.replicas }}
while [ $CURRENT_PODS != $DESIRED_PODS ]; do
sleep 5
echo "Waiting for {{ template "vault.fullname" . }} statefulset running pods ($CURRENT_PODS) to equal desired pods ($DESIRED_PODS)"
CURRENT_PODS=$(kubectl get pods -l component=server,app.kubernetes.io/name=vault \
-o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.podIPs[].ip}{"\t"}{.status.phase}{"\n"} \
{end}' | grep Running | wc -l)
done
}
# Initializes the first vault pod, only needs to be performed once after deploying the helm chart
# Stores the root token and master key shards in plaintext in working directory as cluster_keys.json - insecure.
function initVault {
V0=$(awk 'NR==1{print $2}' $WORKDIR/pods.txt)
echo "Initializing $V0"
curl -s --cacert $CERT --request POST --data '{"secret_shares": 5, "secret_threshold": 3}' https://$V0.$DOMAIN:8200/v1/sys/init > $WORKDIR/cluster_keys.json
}
# Uses the master key shards in cluster_keys.json to unseal vault
function unsealVault {
for shard in $(cat $WORKDIR/cluster_keys.json | jq -r .keys_base64[]); do
echo {\"key\": \"$shard\"} | curl -s --cacert $CERT --request POST -d @- https://$VAULT.$DOMAIN:8200/v1/sys/unseal > /dev/null
sleep 3 #Some sleep is required to allow Raft convergence
done
}
# Takes the address of vault-0 as the cluster leader and joins other nodes to raft
function joinRaft {
CLUSTER_LEAD=$(awk 'NR==1{print $2}' $WORKDIR/pods.txt)
ROOT_TOKEN=$(cat $WORKDIR/cluster_keys.json | jq -r .root_token)
curl -s --cacert $CERT -H "X-Vault-Token: $ROOT_TOKEN" --request POST --data "{\"leader_api_addr\": \"https://$CLUSTER_LEAD.$DOMAIN:8200\", \"leader_ca_cert\": \"$CA_ONELINE\"}" https://$row.$DOMAIN:8200/v1/sys/storage/raft/join
}
# Simply calls the status check of a vault, used to check if it is initialized, unsealed, or part of raft cluster
function vaultServerStatus {
curl --cacert $CERT -s https://$row.$DOMAIN:8200/v1/sys/health | jq
}
#
# LOGIC
#
# Waiting for vault servers to come up
waitForPods
echo ""
echo "Putting a list of vault pods and ip in $WORKDIR/pods.txt"
getVaultPods
echo ""
row=$(awk 'NR==1{print $2}' $WORKDIR/pods.txt)
vaultServerStatus > $WORKDIR/healthcheck.txt
TEMP=$(cat $WORKDIR/healthcheck.txt | jq -r .initialized)
grep $row $WORKDIR/pods.txt & echo "Initialized status is $TEMP"
if [ ! -z $TEMP ] && [ $TEMP = false ]; then
echo "Initializing the vault on vault-0 and storing keys in $WORKDIR/cluster_keys.json"
initVault
sleep 10 #Some sleep required to allow convergence"
echo ""
echo "Unsealing vault-0 using the init shards"
for row in $(awk 'NR==1{print $2}' $WORKDIR/pods.txt); do
VAULT=$row
unsealVault
done
echo ""
echo "Joining other vault servers to the HA Raft cluster"
for row in $(awk 'NR>1{print $2}' $WORKDIR/pods.txt); do
grep $row $WORKDIR/pods.txt
joinRaft
sleep 5
done
echo ""
echo "Unsealing the remaining vaults"
for row in $(awk 'NR>1{print $2}' $WORKDIR/pods.txt); do
grep $row $WORKDIR/pods.txt
VAULT=$row
unsealVault
sleep 10
done
fi
# Loop forever to check the seal status of vaults and unseal if required
while true; do
sleep 5
echo "Checking vault pods seal status"
rm $WORKDIR/pods.txt
getVaultPods
for row in $(awk '{print $2}' $WORKDIR/pods.txt); do
vaultServerStatus > $WORKDIR/healthcheck.txt
TEMP=$(cat $WORKDIR/healthcheck.txt | jq -r .sealed)
grep $row $WORKDIR/pods.txt & echo "Sealed status is $TEMP"
if [ ! -z $TEMP ] && [ $TEMP = true ]; then
VAULT=$row
echo "Unsealing $row"
unsealVault
fi
done
done
kind: ConfigMap
metadata:
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:data:
.: {}
f:init.sh: {}
manager: vault-init-unseal
name: vault-init-unseal
namespace: {{ .Release.Namespace }}
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: manager-pvc
namespace: {{ .Release.Namespace }}
spec:
accessModes:
- ReadWriteOnce
storageClassName: general
resources:
requests:
storage: 1Gi
---
{{- if and (eq (.Values.injector.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") }}
# Deployment for the unsealer
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "vault.fullname" . }}-manager
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "vault.name" . }}-manager
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
component: webhook
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: {{ .Release.Name }}
component: webhook
template:
metadata:
labels:
app.kubernetes.io/name: {{ template "vault.name" . }}-manager
app.kubernetes.io/instance: {{ .Release.Name }}
component: webhook
spec:
serviceAccountName: "{{ template "vault.fullname" . }}"
containers:
- name: manager
image: cwalops/k8
imagePullPolicy: "{{ .Values.injector.image.pullPolicy }}"
args:
- bash
- /opt/script/init.sh
env:
- name: PVCDIR
value: /mnt/data
- name: CA_CERT
value: /mnt/data/ca/tls.crt
volumeMounts:
- name: vault-init-unseal
mountPath: /opt/script
readOnly: false
- name: manager-pvc
mountPath: /mnt/data
readOnly: false
- name: vault-ca
mountPath: /mnt/data/ca
readOnly: true
volumes:
- name: vault-init-unseal
configMap:
name: vault-init-unseal
- name: manager-pvc
persistentVolumeClaim:
claimName: manager-pvc
- name: vault-ca
secret:
secretName: vault-ca
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml .Values.global.imagePullSecrets | nindent 8 }}
{{- end }}
{{ end }}

View File

@ -0,0 +1,126 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: vault-psp-rolebinding
data:
chart_name: psp-rolebinding
release: vault-psp-rolebinding
namespace: vault
values:
rolebindingNamespace: vault
serviceAccount: vault
source:
location: http://172.17.0.1:8080/helm_charts/stx-platform/psp-rolebinding-0.1.0.tgz
subpath: psp-rolebinding
type: tar
reference: master
upgrade:
no_hooks: false
pre:
delete:
- labels:
release_group: vault-psp-rolebinding
type: job
wait:
labels:
release_group: vault-psp-rolebinding
resources: []
timeout: 1800
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: vault
data:
chart_name: vault
release: vault
namespace: vault
wait:
timeout: 1800
labels:
app: vault
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
app: vault
values:
global:
enabled: true
tlsDisable: false
injector:
enabled: true
image:
repository: hashicorp/vault-k8s
tag: 0.4.0
agentImage:
repository: vault
tag: 1.4.2
server:
image:
repository: vault
tag: 1.4.2
auditStorage:
enabled: true
size: 10Gi
ha:
enabled: true
replicas: 3
raft:
enabled: true
config: |
ui = true
listener "tcp" {
tls_disable = 0
address = "[::]:8200"
cluster_address = "[::]:8201"
tls_cert_file = "/vault/userconfig/vault-server-tls/tls.crt"
tls_key_file = "/vault/userconfig/vault-server-tls/tls.key"
tls_client_ca_file = "/vault/userconfig/vault-server-tls/ca.crt"
}
storage "raft" {
path = "/vault/data"
}
service_registration "kubernetes" {}
extraLabels:
app: vault
extraEnvironmentVars:
VAULT_CACERT: /vault/userconfig/vault-server-tls/ca.crt
extraVolumes:
- type: secret
name: vault-server-tls
source:
type: tar
location: http://172.17.0.1/helm_charts/stx-platform/vault-0.6.0.tgz
subpath: vault
reference: master
dependencies: []
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: vault
data:
description: "Deploy Vault"
sequenced: false
chart_group:
- vault
- vault-psp-rolebinding
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: vault-manifest
data:
release_prefix: sva
chart_groups:
- vault