Merge pull request #11 from mark-burnett/config-overhaul

Configuration overhaul
This commit is contained in:
Mark Burnett 2017-06-21 08:00:06 -05:00 committed by GitHub
commit 66c173e6da
84 changed files with 834 additions and 664 deletions

View File

@ -16,8 +16,8 @@ FROM python:3.6
ENV CNI_VERSION=v0.5.2 \
HELM_VERSION=v2.4.2 \
KUBECTL_VERSION=v1.6.2 \
KUBELET_VERSION=v1.6.2
KUBECTL_VERSION=v1.6.4 \
KUBELET_VERSION=v1.6.4
VOLUME /etc/promenade
VOLUME /target
@ -28,30 +28,23 @@ WORKDIR /promenade
RUN set -ex \
&& export BIN_DIR=/assets/usr/local/bin \
&& mkdir -p $BIN_DIR \
&& curl -sLo $BIN_DIR/kubelet http://storage.googleapis.com/kubernetes-release/release/$KUBELET_VERSION/bin/linux/amd64/kubelet \
&& curl -sLo $BIN_DIR/kubectl http://storage.googleapis.com/kubernetes-release/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl \
&& curl -sLo $BIN_DIR/kubelet https://storage.googleapis.com/kubernetes-release/release/$KUBELET_VERSION/bin/linux/amd64/kubelet \
&& curl -sLo $BIN_DIR/kubectl https://storage.googleapis.com/kubernetes-release/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl \
&& chmod 555 $BIN_DIR/kubelet \
&& chmod 555 $BIN_DIR/kubectl \
&& mkdir -p /assets/opt/cni/bin \
&& curl -sL https://github.com/containernetworking/cni/releases/download/$CNI_VERSION/cni-amd64-$CNI_VERSION.tgz | tar -zxv -C /assets/opt/cni/bin/ \
&& curl -sL https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxv -C /tmp linux-amd64/helm \
&& mv /tmp/linux-amd64/helm $BIN_DIR/helm \
&& chmod 555 $BIN_DIR/helm
RUN set -ex \
&& chmod 555 $BIN_DIR/helm \
&& curl -sLo /usr/local/bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \
&& chmod 555 /usr/local/bin/cfssl \
&& apt-get update -qq \
&& apt-get install --no-install-recommends -y \
libyaml-dev \
openssl \
rsync \
&& rm -rf /var/lib/apt/lists/*
RUN set -ex \
&& curl -sLo /usr/local/bin/cfssl https://pkg.cfssl.org/R1.1/cfssl_linux-amd64 \
&& chmod 555 /usr/local/bin/cfssl \
&& curl -sLo /usr/local/bin/cfssljson https://pkg.cfssl.org/R1.1/cfssljson_linux-amd64 \
&& chmod 555 /usr/local/bin/cfssljson
COPY requirements-frozen.txt /promenade
RUN pip install --no-cache-dir -r requirements-frozen.txt

View File

@ -7,6 +7,13 @@ Promenade is tool for deploying self-hosted, highly resilient Kubernetes cluster
Make sure you have [Vagrant](https://vagrantup.com) and
[VirtualBox](https://www.virtualbox.org/wiki/Downloads) installed.
Generate the certificates and keys to be used:
```bash
mkdir configs
docker run --rm -t -v $(pwd):/target quay.io/attcomdev/promenade:experimental promenade -v generate -c /target/example/vagrant-input-config.yaml -o /target/configs
```
Start the VMs:
```bash
@ -16,26 +23,20 @@ vagrant up
Start the genesis node:
```bash
vagrant ssh n0 -c 'sudo /vagrant/genesis.sh /vagrant/example/vagrant-config.yaml'
vagrant ssh n0 -c 'sudo /vagrant/genesis.sh /vagrant/configs/n0.yaml'
```
Join the master nodes:
```bash
vagrant ssh n1 -c 'sudo /vagrant/join.sh /vagrant/example/vagrant-config.yaml'
vagrant ssh n2 -c 'sudo /vagrant/join.sh /vagrant/example/vagrant-config.yaml'
vagrant ssh n1 -c 'sudo /vagrant/join.sh /vagrant/configs/n1.yaml'
vagrant ssh n2 -c 'sudo /vagrant/join.sh /vagrant/configs/n2.yaml'
```
Join the worker node:
```bash
vagrant ssh n3 -c 'sudo /vagrant/join.sh /vagrant/example/vagrant-config.yaml'
```
## Building the image
```bash
docker build -t quay.io/attcomdev/promenade:experimental .
vagrant ssh n3 -c 'sudo /vagrant/join.sh /vagrant/configs/n3.yaml'
```
## Using Promenade Behind a Proxy
@ -48,5 +49,32 @@ cd /vagrant
export DOCKER_HTTP_PROXY="http://proxy.server.com:8080"
export DOCKER_HTTPS_PROXY="https://proxy.server.com:8080"
export DOCKER_NO_PROXY="localhost,127.0.0.1"
sudo -E /vagrant/genesis.sh /vagrant/example/vagrant-config.yaml
sudo -E /vagrant/genesis.sh /vagrant/configs/n0.yaml
```
## Building the image
```bash
docker build -t quay.io/attcomdev/promenade:experimental .
```
For development, you may wish to save it and have the `genesis.sh` and
`join.sh` scripts load it:
```bash
docker save -o promenade.tar quay.io/attcomdev/promenade:experimental
```
Then on a node:
```bash
PROMENADE_LOAD_IMAGE=/vagrant/promenade.tar /vagrant/genesis.sh /vagrant/path/to/node-config.yaml
```
To build the image from behind a proxy, you can:
```bash
export http_proxy=...
export no_proxy=...
docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$http_proxy --build-arg no_proxy=$no_proxy -t quay.io/attcomdev/promenade:experimental .
```

View File

@ -1,129 +0,0 @@
---
network:
cluster_domain: cluster.local
cluster_dns: 10.96.0.10
kube_service_ip: 10.96.0.1
pod_ip_cidr: 10.97.0.0/16
service_ip_cidr: 10.96.0.0/16
nodes:
n0:
ip: 192.168.77.10
roles:
- master
- genesis
additional_labels:
- beta.kubernetes.io/arch=amd64
n1:
ip: 192.168.77.11
roles:
- master
additional_labels:
- beta.kubernetes.io/arch=amd64
n2:
ip: 192.168.77.12
roles:
- master
additional_labels:
- beta.kubernetes.io/arch=amd64
n3:
ip: 192.168.77.13
roles:
- worker
additional_labels:
- beta.kubernetes.io/arch=amd64
pki:
cluster-ca: |-
-----BEGIN CERTIFICATE-----
MIIDzjCCAragAwIBAgIUKwePtKtZf/KbwdhRke8d38V294IwDQYJKoZIhvcNAQEL
BQAwbTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCE1pc3NvdXJpMRQwEgYDVQQHEwtT
YWludCBMb3VpczETMBEGA1UEChMKS3ViZXJuZXRlczELMAkGA1UECxMCQ0ExEzAR
BgNVBAMTCkt1YmVybmV0ZXMwHhcNMTcwNjEzMTY1NzAwWhcNMjIwNjEyMTY1NzAw
WjBtMQswCQYDVQQGEwJVUzERMA8GA1UECBMITWlzc291cmkxFDASBgNVBAcTC1Nh
aW50IExvdWlzMRMwEQYDVQQKEwpLdWJlcm5ldGVzMQswCQYDVQQLEwJDQTETMBEG
A1UEAxMKS3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AO8vjAoGyv6KigTnF6WZMoskzfCC2ZsLT22y457/irOe2EYazHbeXz/7Jlb8LwWn
uMSaGlu/x5XfF3VGlMkq392S2CsfqLOO8AjUTn4YGOUx5IU++hh3SQ+cFrv/CF5l
jeeXZGoSbMLhMvaWc3MHGCTNktBe4Q+DyRCyw81fMH+2C9dZtjH+cKOBUvhcMIDW
z7i3MJ0th23PLyd9ZwVHDgyqUkzaY/zTgmSk1V++VJ9BwIn41/J/bW8peqPW1/cq
B8BX45SBgyKgVRkQGppsDH7+MqDFzU2ZWP0R1EtGz68+TJObtl6yHY/pj8ksg7T1
uQgZXzMrzsVoQNkiBKpiau0CAwEAAaNmMGQwDgYDVR0PAQH/BAQDAgEGMBIGA1Ud
EwEB/wQIMAYBAf8CAQIwHQYDVR0OBBYEFOGG+QV7EZ7kGhFxzB3P+ve4MdCsMB8G
A1UdIwQYMBaAFOGG+QV7EZ7kGhFxzB3P+ve4MdCsMA0GCSqGSIb3DQEBCwUAA4IB
AQCvth3gmSivlS+6dBuoKxK52pqzqtVCMr3YSqc1ORsWh6FQA+2M2ZSHKgfgkqfK
WaDkgV0FZl5IIQ2t3V8ZQEj+WI2crnoR6cTTz+vXOJXm780IpH717d3PTYKBv4sU
t8BpNhePPNeH7ZrW5P9+EVZ0ZFPSICbI9k8MFGlSJp5zgM6sinXmRaK59cnfBgEc
cCnjvuY/BzNIiABBSsg8Pj2hOduIVK0xP3DnqGkPV5BEQP/dmhe81CG1v6WQZpev
qC+jSvZYETWMg3sCQoyYveBRBce9vo94VqcA99FNnDoYsf16dZnKO6mP8rta21zp
O1G/5Sc5HA/MvMldKvLrtqG4
-----END CERTIFICATE-----
cluster-ca-key: |-
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEA7y+MCgbK/oqKBOcXpZkyiyTN8ILZmwtPbbLjnv+Ks57YRhrM
dt5fP/smVvwvBae4xJoaW7/Hld8XdUaUySrf3ZLYKx+os47wCNROfhgY5THkhT76
GHdJD5wWu/8IXmWN55dkahJswuEy9pZzcwcYJM2S0F7hD4PJELLDzV8wf7YL11m2
Mf5wo4FS+FwwgNbPuLcwnS2Hbc8vJ31nBUcODKpSTNpj/NOCZKTVX75Un0HAifjX
8n9tbyl6o9bX9yoHwFfjlIGDIqBVGRAammwMfv4yoMXNTZlY/RHUS0bPrz5Mk5u2
XrIdj+mPySyDtPW5CBlfMyvOxWhA2SIEqmJq7QIDAQABAoIBAQCwCyLbTlyiNH2Z
Vi2FaNhWqWQaHXTkNNLlPsFiCVuhEMzF7HuJEeqxQLzbUQma8/N+YJ394Y2YtXai
jqx7096pSqdoNgkI/6+UEA8lp77LEonLuKqCz2kq4Aurmu4h7EUhq7/wglciqHXG
IL4gb5xJmjTwwKSNssWOUMTkp6celwakyzh1w+Sgo0qRKu75RtdkBnaLd2i8DI9F
N0v9aMO8zC317DVhTBw2Wl6ZK2P2kdh2BB54NPrRm8edfViz5p7oq/Fs3YHC6+Hn
cJMU87Wkxi/tbs2YKdnQraokLK40EpdDOsokW/IguHanvY55VTllzT9o5lEvsFCA
u0ZOasSBAoGBAPjDGgNkZP8WcmxhRFQRdaNn5/37g0I7mspdELNA8/7hPJGn9BCK
r+Ozf6LSjW6m2XVmluyCJSU/HbETfz1lo5HHUCV6uyIZHuHRF0ORovGTZJFSzYzL
WFs5JLe6dXwS096oxq2knWaVEocNbUOue2Ptui1izNlQ7yDFeS27VJ95AoGBAPYl
Ha7ZbAsY5M7VIzJTie3dt6QnWs8qfd7pV3IUrAuCjYSDOBUWommV1Mbxw2SyYntf
AvXBIbuzsbpFsjKEypyyud0XNj3hNFI1xAJKdAF213zQYs4nZZnI5YST7GGDEGwP
jCBm1MKLzHyUJ2ip1hc5zEM11hA8OsvK0vvyuIYVAoGBAI4sc6Gcr1xbJ+ppbPPf
RqytphmytcIU7tLZfcH1TX5OnJ9irksF+KDa5gfY7pxfH8nJaFijyTcQa5fY3M/q
VyHqGBRToMBMOyo0pmcnxUjsRH4KJRBi54y7jBC1sI/I8u4+5842Vv9aE8y8D8au
4jaql814ujs51nGUaz2H40WBAoGBAO+zM1XLu7CO3HsjCjR/L8mpaaV9AazO92a1
m4en4+cNitzpoBrBQQZLd7sJQrt0D/2Oh+Zk3oHYuxHnv2H8+QZh8igA67yU7AvG
+gs1EAVBAxY0JJQXv5RkFEboeoB3Tu28sjv3h+ewlkEXUc1V3vwdN/KXoc+Lp8I/
0Piz5MgFAoGAJQMFyA6sU03vW9nmmZuT5SYOgDm/JpJ9/fSwCxtmOxlRjzxL0G/l
OhnsGBv1NbtoDZ+YMYY/0BhOhv6yzIJMCDmi5yuCw0FysL4pAaW40NKiMtZSOBdH
ZuATA+uF7kV7K+NbO7FT0knfNjFkk9jVbjq+To3D3/FbVxS9VTbu9nk=
-----END RSA PRIVATE KEY-----
sa: |-
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6jYQ1LKjd/s7bcgxlw1o
RR91Vb0MnSCUA4OSzJ5Hh0x8gOpllMpbeRdY4X605aOjYwku1Xlc9HFtjxMSDxjR
jDaLQnVy+stNScFuOLn5VfWtgHJ68WlgZSzIjxveDGVFw2YguQMj8vMPNeCq2EAc
/VFBWUXdNUC8/ipn2T4VA7DSjkZheNhHwigPIlS/kumfSXiIshMLM0P+Yx0wp72D
vqp93C4523COw2DTyiv4azUYIGHBkyWtgfIES4gavxp2oFgvxcPvl1Y7XuHJzH0g
ncZJVJS5o0WPFUzRlipyyZa0CxDKFkOy3pLQDEvn2mb5zL1rzd58kQowmLtP1aX7
mQIDAQAB
-----END PUBLIC KEY-----
sa-key: |-
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEA6jYQ1LKjd/s7bcgxlw1oRR91Vb0MnSCUA4OSzJ5Hh0x8gOpl
lMpbeRdY4X605aOjYwku1Xlc9HFtjxMSDxjRjDaLQnVy+stNScFuOLn5VfWtgHJ6
8WlgZSzIjxveDGVFw2YguQMj8vMPNeCq2EAc/VFBWUXdNUC8/ipn2T4VA7DSjkZh
eNhHwigPIlS/kumfSXiIshMLM0P+Yx0wp72Dvqp93C4523COw2DTyiv4azUYIGHB
kyWtgfIES4gavxp2oFgvxcPvl1Y7XuHJzH0gncZJVJS5o0WPFUzRlipyyZa0CxDK
FkOy3pLQDEvn2mb5zL1rzd58kQowmLtP1aX7mQIDAQABAoIBADdEhNo8QVjpvw9b
41/auRU+pCiUUOqvKl5d6QFCBG0H/oVJSqk+yzEa8k1b4gIiiEaxfwy+89F3Brxx
apyHZcNph5kqL/TAjr9t1r2qHQ1MySF7YkmfbTDSzYz/rXlNWJYQfn5KIGyPMLKt
DoOzNWQNjZcsZlPPsAlmJlVcUgcpeiPKEGYBwi/Xfp7kJZjr+jxn3U/VImiDBuA/
ipdqfzUsQc363mSnRCHGptmv3TBJh4TXpuoxAkjEryKhXDTjsDGWt4hqZJBZiF0I
eGAnhvignqle+fkTGwszUrz/8PMAdWUGeTQ/DsWcUUgGzbu7Q1libFo0mj+BA9fM
Y9De4wECgYEA97UDxjZX58RHTedpnUQFgg64ZPmKMYe9nQHvRCw1/9SRoUN/1zid
Zaz+IbNvjpBpwBwhxg1ISG0Wo02iMlbtOXsJnmE9o45FnyH/8uDfxj93pklaopxY
1GwGnR4q8xgUxol7rbL5mHBbcwXxAbU7uCFlTKmXEs5SzvJflMBCaqECgYEA8g1i
QPFSCdqXVRRm/u6js62QYyitXQLrlQWhr2Jd1vxD4ngPRE0mR3qe37RldcEO6x8Y
zeurj5g1pZFZOOcLZvBSE0TxFYMtsxa+42huAgUOs9RKtDfjgcMCRTcuCBQkpGXb
hpVPUTpm/VcAmoUYu1frFoo/0vkS3e/JLCPDJfkCgYB9Q+cSt6ygohvFA7/fLeTz
LmqFdcQy5Ag5fB75hLoSE/dJbA8cUZ8XKfKiLFG/8Lvp0NArjc/+AFywXLQnbNou
dVAZ7ebz7SC8Jr9+ncXMRZBGYVYaYaJyWebGUdk6cfUfqasH3jhmpHs6ociNKo92
wDywFhs2AWzTBrLbUJbFwQKBgAam2YFhYFjG+gurgN0Wn8cLSQGAl6sLrn+s5PGV
6XBEBHWPyROebyPducn6AiPHR2qssxjNlixfCXJgWSxYJRcSGZ9P8LQfo7zdLie/
se46R1onxlnHg2gIfOJ8DrbIHu2pouvC5Kgdy8DAiFK2v6Q+WUaITBK3J46TzVp6
LR25AoGAJF0PwL19DWsJq/lfftgaUqSBwgdJh4ene+lvatdAfFZ1D6LUE+wUXXd+
EyVxLnvg4Yp2j0ZxTPc2Bv/9/H/Rso79kdZgyt/cSA+FpgZRTy/zKl7BsNnJxgQJ
cpNottrjMWgRXrbmTkqmqUtkqc31HMTmZ3U1Fum/uh0sEOv7Rd0=
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,46 @@
---
apiVersion: promenade/v1
kind: Cluster
metadata:
name: example
target: none
spec:
nodes:
n0:
ip: 192.168.77.10
roles:
- master
- genesis
additional_labels:
- beta.kubernetes.io/arch=amd64
n1:
ip: 192.168.77.11
roles:
- master
additional_labels:
- beta.kubernetes.io/arch=amd64
n2:
ip: 192.168.77.12
roles:
- master
additional_labels:
- beta.kubernetes.io/arch=amd64
n3:
ip: 192.168.77.13
roles:
- worker
additional_labels:
- beta.kubernetes.io/arch=amd64
---
apiVersion: promenade/v1
kind: Network
metadata:
cluster: example
name: example
target: all
spec:
cluster_domain: cluster.local
cluster_dns: 10.96.0.10
kube_service_ip: 10.96.0.1
pod_ip_cidr: 10.97.0.0/16
service_ip_cidr: 10.96.0.0/16

View File

@ -1,13 +0,0 @@
from promenade import logging
import os
import subprocess
__all__ = ['rsync']
LOG = logging.getLogger(__name__)
def rsync(*, src, dest):
LOG.info('Syncing assets from "%s" to "%s".', src, dest)
subprocess.run(['/usr/bin/rsync', '-r', os.path.join(src, ''), dest], check=True)

View File

@ -1,4 +1,4 @@
from . import logging, operator
from . import generator, logging, operator
import click
__all__ = []
@ -18,9 +18,7 @@ def promenade(*, verbose):
type=click.Path(exists=True, file_okay=False,
dir_okay=True, resolve_path=True),
help='Source path for binaries to deploy.')
@click.option('-c', '--config-path',
type=click.Path(exists=True, file_okay=True,
dir_okay=False, resolve_path=True),
@click.option('-c', '--config-path', type=click.File(),
help='Location of cluster configuration data.')
@click.option('--hostname', help='Current hostname.')
@click.option('-t', '--target-dir', default='/target',
@ -41,9 +39,7 @@ def genesis(*, asset_dir, config_path, hostname, target_dir):
type=click.Path(exists=True, file_okay=False,
dir_okay=True, resolve_path=True),
help='Source path for binaries to deploy.')
@click.option('-c', '--config-path',
type=click.Path(exists=True, file_okay=True,
dir_okay=False, resolve_path=True),
@click.option('-c', '--config-path', type=click.File(),
help='Location of cluster configuration data.')
@click.option('--hostname', help='Current hostname.')
@click.option('-t', '--target-dir', default='/target',
@ -57,3 +53,17 @@ def join(*, asset_dir, config_path, hostname, target_dir):
target_dir=target_dir)
op.join(asset_dir=asset_dir)
@promenade.command(help='Generate certs and keys')
@click.option('-c', '--config-path', type=click.File(),
required=True,
help='Location of cluster configuration data.')
@click.option('-o', '--output-dir', default='.',
type=click.Path(exists=True, file_okay=False, dir_okay=True,
resolve_path=True),
required=True,
help='Location to write complete cluster configuration.')
def generate(*, config_path, output_dir):
g = generator.Generator.from_config(config_path=config_path)
g.generate_all(output_dir)

View File

@ -1,113 +1,120 @@
from . import logging
from operator import itemgetter
from operator import attrgetter, itemgetter
import itertools
import yaml
__all__ = ['load_config_file']
__all__ = ['Configuration', 'Document', 'load']
LOG = logging.getLogger(__name__)
def load_config_file(*, config_path, hostname):
LOG.debug('Loading genesis configuration from "%s"', config_path)
cluster_data = yaml.load(open(config_path))
LOG.debug('Loaded genesis configruation from "%s"', config_path)
node_data = extract_node_data(hostname, cluster_data)
def load(f):
return Configuration(list(map(Document, yaml.load_all(f))))
return {
'cluster_data': cluster_data,
'node_data': node_data,
class Document:
KEYS = {
'apiVersion',
'metadata',
'kind',
'spec',
}
def extract_node_data(hostname, cluster_data):
genesis = _extract_genesis_data(cluster_data['nodes'])
masters = _extract_master_data(cluster_data['nodes'])
return {
'cluster': cluster_data['nodes'],
'current_node': _extract_current_node_data(cluster_data['nodes'],
hostname),
'etcd': _extract_etcd_data(hostname, genesis, masters),
'genesis': genesis,
'masters': masters,
'network': cluster_data['network'],
SUPPORTED_KINDS = {
'Certificate',
'CertificateAuthority',
'CertificateAuthorityKey',
'CertificateKey',
'Cluster',
'Etcd',
'Masters',
'Network',
'Node',
'PrivateKey',
'PublicKey',
}
def __init__(self, data):
if set(data.keys()) != self.KEYS:
LOG.error('data.keys()=%s expected %s', data.keys(), self.KEYS)
raise AssertionError('Did not get expected keys')
assert data['apiVersion'] == 'promenade/v1'
assert data['kind'] in self.SUPPORTED_KINDS
assert data['metadata']['name']
def _extract_etcd_data(hostname, genesis, masters):
LOG.info('hostname=%r genesis=%r masters=%r',
hostname, genesis, masters)
non_genesis_masters = [d for d in masters if d['hostname'] != genesis['hostname']]
boot_order = [genesis] + sorted(non_genesis_masters, key=itemgetter('hostname'))
self.data = data
result = {
'boot_order': boot_order,
'env': {},
}
@property
def kind(self):
return self.data['kind']
peers = [
{
'hostname': 'auxiliary-etcd-%d' % i,
'peer_port': 2380 + (i + 1) * 10000
}
for i in range(2)
]
peers.append({
'hostname': genesis['hostname'],
})
@property
def name(self):
return self.metadata['name']
if hostname == genesis['hostname']:
result['env']['ETCD_INITIAL_CLUSTER_STATE'] = 'new'
else:
result['env']['ETCD_INITIAL_CLUSTER_STATE'] = 'existing'
for host in non_genesis_masters:
peers.append({'hostname': host['hostname']})
@property
def target(self):
return self.metadata.get('target')
result['env']['ETCD_INITIAL_CLUSTER'] = ','.join(
'%s=https://%s:%d' % (p['hostname'], p['hostname'], p.get('peer_port', 2380))
for p in peers)
@property
def metadata(self):
return self.data['metadata']
return result
def __getitem__(self, key):
return self.data['spec'][key]
def _extract_current_node_data(nodes, hostname):
base = nodes[hostname]
return {
'hostname': hostname,
'labels': _extract_node_labels(base),
**base,
}
class Configuration:
def __init__(self, documents):
self.documents = sorted(documents, key=attrgetter('kind', 'target'))
self.validate()
ROLE_LABELS = {
'genesis': [
'promenade=genesis',
],
'master': [
'node-role.kubernetes.io/master=',
],
}
def validate(self):
identifiers = set()
for document in self.documents:
identifier = (document.kind, document.name)
if identifier in identifiers:
LOG.error('Found duplicate document in config: kind=%s name=%s',
document.kind, document.name)
raise RuntimeError('Duplicate document')
else:
identifiers.add(identifier)
def __getitem__(self, key):
results = [d for d in self.documents if d.kind == key]
if len(results) < 1:
raise KeyError
elif len(results) > 1:
raise KeyError('Too many results.')
else:
return results[0]
def _extract_node_labels(data):
labels = set(itertools.chain.from_iterable(
map(lambda k: ROLE_LABELS.get(k, []), ['common'] + data['roles'])))
labels.update(data.get('additional_labels', []))
return sorted(labels)
def get(self, *, kind, name):
for document in self.documents:
if document.kind == kind and document.name == name:
return document
def iterate(self, *, kind=None, target=None):
if target:
docs = self._iterate_with_target(target)
else:
docs = self.documents
def _extract_genesis_data(nodes):
for hostname, node in nodes.items():
if 'genesis' in node['roles']:
return {
'hostname': hostname,
'ip': node['ip'],
}
for document in docs:
if not kind or document.kind == kind:
yield document
def _iterate_with_target(self, target):
for document in self.documents:
if document.target == target or document.target == 'all':
yield document
def _extract_master_data(nodes):
return sorted(({'hostname': hostname, 'ip': node['ip']}
for hostname, node in nodes.items()
if 'master' in node['roles']),
key=itemgetter('hostname'))
def write(self, path):
with open(path, 'w') as f:
yaml.dump_all(map(attrgetter('data'), self.documents),
default_flow_style=False,
explicit_start=True,
indent=2,
stream=f)

341
promenade/generator.py Normal file
View File

@ -0,0 +1,341 @@
from . import config, logging, pki
import os
__all__ = ['Generator']
LOG = logging.getLogger(__name__)
class Generator:
@classmethod
def from_config(cls, *, config_path):
return cls(input_config=(config.load(config_path)))
def __init__(self, *, input_config):
self.input_config = input_config
self.validate()
def validate(self):
required_kinds = ['Cluster', 'Network']
for required_kind in required_kinds:
try:
self.input_config[required_kind]
except KeyError:
LOG.error('Generator requires one "%s" document to function.',
required_kind)
raise
assert self.input_config['Cluster'].metadata['name'] \
== self.input_config['Network'].metadata['cluster']
def generate_all(self, output_dir):
cluster = self.input_config['Cluster']
network = self.input_config['Network']
cluster_name = cluster.metadata['name']
LOG.info('Generating configuration for cluster "%s"', cluster_name)
masters = self.construct_masters(cluster_name)
LOG.info('Generating common PKI for cluster "%s"', cluster_name)
keys = pki.PKI(cluster_name)
cluster_ca, cluster_ca_key = keys.generate_ca(
ca_name='cluster',
cert_target='all',
key_target='masters')
etcd_client_ca, etcd_client_ca_key = keys.generate_ca(
ca_name='etcd-client',
cert_target='all',
key_target='masters')
etcd_peer_ca, etcd_peer_ca_key = keys.generate_ca(
ca_name='etcd-peer',
cert_target='all',
key_target='masters')
admin_cert, admin_cert_key = keys.generate_certificate(
name='admin',
ca_name='cluster',
groups=['system:masters'],
target='masters',
)
sa_pub, sa_priv = keys.generate_keypair(
name='service-account',
target='masters',
)
config.Configuration([
cluster_ca,
cluster_ca_key,
etcd_client_ca,
etcd_client_ca_key,
etcd_peer_ca,
etcd_peer_ca_key,
sa_pub,
sa_priv,
]).write(os.path.join(output_dir, 'admin-bundle.yaml'))
for hostname, data in cluster['nodes'].items():
if 'genesis' in data['roles']:
genesis_hostname = hostname
break
for hostname, data in cluster['nodes'].items():
LOG.debug('Generating configuration & PKI for hostname=%s',
hostname)
node = _construct_node_config(cluster_name, hostname, data)
kubelet_cert, kubelet_cert_key = keys.generate_certificate(
alias='kubelet',
name='system:node:%s' % hostname,
ca_name='cluster',
groups=['system:nodes'],
hosts=[
hostname,
data['ip'],
],
target=hostname)
proxy_cert, proxy_cert_key = keys.generate_certificate(
alias='proxy',
name='system:kube-proxy',
ca_name='cluster',
hosts=[
hostname,
data['ip'],
],
target=hostname)
common_documents = [
cluster_ca,
kubelet_cert,
kubelet_cert_key,
masters,
network,
node,
proxy_cert,
proxy_cert_key,
]
role_specific_documents = []
if 'master' in data['roles']:
role_specific_documents.extend([
admin_cert,
admin_cert_key,
cluster_ca_key,
etcd_client_ca,
etcd_peer_ca,
sa_priv,
sa_pub,
])
if 'genesis' not in data['roles']:
role_specific_documents.append(
_master_etcd_config(cluster_name, genesis_hostname,
hostname, masters)
)
role_specific_documents.extend(_master_config(hostname, data,
masters, network, keys))
if 'genesis' in data['roles']:
role_specific_documents.extend(_genesis_config(hostname, data,
masters, network, keys))
role_specific_documents.append(_genesis_etcd_config(cluster_name, hostname))
node.data['spec']['is_genesis'] = True
c = config.Configuration(common_documents + role_specific_documents)
c.write(os.path.join(output_dir, hostname + '.yaml'))
def construct_masters(self, cluster_name):
masters = []
for hostname, data in self.input_config['Cluster']['nodes'].items():
if 'master' in data['roles'] or 'genesis' in data['roles']:
masters.append({'hostname': hostname, 'ip': data['ip']})
return config.Document({
'apiVersion': 'promenade/v1',
'kind': 'Masters',
'metadata': {
'cluster': cluster_name,
'name': cluster_name,
'target': 'all',
},
'spec': {
'nodes': masters,
},
})
def _master_etcd_config(cluster_name, genesis_hostname, hostname, masters):
initial_cluster = ['%s=https://%s:2380' % (m['hostname'],
m['hostname'])
for m in masters['nodes']]
initial_cluster.extend([
'auxiliary-etcd-0=https://%s:12380' % genesis_hostname,
'auxiliary-etcd-1=https://%s:22380' % genesis_hostname,
])
return _etcd_config(cluster_name, name='master-etcd',
target=hostname,
initial_cluster=initial_cluster,
initial_cluster_state='existing')
def _genesis_etcd_config(cluster_name, hostname):
initial_cluster = [
'%s=https://%s:2380' % (hostname, hostname),
'auxiliary-etcd-0=https://%s:12380' % hostname,
'auxiliary-etcd-1=https://%s:22380' % hostname,
]
return _etcd_config(cluster_name, name='genesis-etcd',
target=hostname,
initial_cluster=initial_cluster,
initial_cluster_state='new')
def _etcd_config(cluster_name, *, name, target,
initial_cluster, initial_cluster_state):
return config.Document({
'apiVersion': 'promenade/v1',
'kind': 'Etcd',
'metadata': {
'cluster': cluster_name,
'name': name,
'target': target,
},
'spec': {
'initial_cluster': initial_cluster,
'initial_cluster_state': initial_cluster_state,
},
})
def _master_config(hostname, host_data, masters, network, keys):
kube_domains = [
'kubernetes',
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.cluster.local',
'127.0.0.1',
]
docs = []
docs.extend(keys.generate_certificate(
alias='etcd-client',
name='etcd:client:%s' % hostname,
ca_name='etcd-client',
hosts=kube_domains + [hostname, host_data['ip']],
target=hostname,
))
docs.extend(keys.generate_certificate(
alias='etcd-apiserver-client',
name='etcd:client:apiserver:%s' % hostname,
ca_name='etcd-client',
hosts=[hostname, host_data['ip']],
target=hostname,
))
docs.extend(keys.generate_certificate(
alias='etcd-peer',
name='etcd:peer:%s' % hostname,
ca_name='etcd-peer',
hosts=kube_domains + [hostname, host_data['ip']],
target=hostname,
))
docs.extend(keys.generate_certificate(
alias='apiserver',
name='apiserver:%s' % hostname,
ca_name='cluster',
hosts=kube_domains + [
network['kube_service_ip'],
hostname,
host_data['ip'],
],
target=hostname,
))
docs.extend(keys.generate_certificate(
alias='controller-manager',
name='system:kube-controller-manager',
ca_name='cluster',
hosts=[
hostname,
host_data['ip'],
],
target=hostname,
))
docs.extend(keys.generate_certificate(
alias='scheduler',
name='system:kube-scheduler',
ca_name='cluster',
hosts=[
hostname,
host_data['ip'],
],
target=hostname,
))
return docs
def _genesis_config(hostname, host_data, masters, network, keys):
docs = []
for i in range(2):
docs.extend(keys.generate_certificate(
name='auxiliary-etcd-%d-client' % i,
ca_name='etcd-client',
hosts=[hostname, host_data['ip']],
target=hostname,
))
docs.extend(keys.generate_certificate(
name='auxiliary-etcd-%d-peer' % i,
ca_name='etcd-peer',
hosts=[hostname, host_data['ip']],
target=hostname,
))
return docs
def _construct_node_config(cluster_name, hostname, data):
spec = {
'hostname': hostname,
'ip': data['ip'],
'labels': _labels(data['roles'], data.get('additional_labels', [])),
'templates': _templates(data['roles']),
}
return config.Document({
'apiVersion': 'promenade/v1',
'kind': 'Node',
'metadata': {
'cluster': cluster_name,
'name': hostname,
'target': hostname,
},
'spec': spec,
})
ROLE_LABELS = {
'genesis': [
'promenade=genesis',
],
'master': [
'node-role.kubernetes.io/master=',
],
}
def _labels(roles, additional_labels):
result = set()
for role in roles:
result.update(ROLE_LABELS.get(role, []))
result.update(additional_labels)
return sorted(result)
def _templates(roles):
return ['common'] + roles

View File

@ -1,4 +1,4 @@
from . import config, logging, pki, renderer
from . import config, logging, renderer
import os
import subprocess
@ -10,18 +10,13 @@ LOG = logging.getLogger(__name__)
class Operator:
@classmethod
def from_config(cls, *,
config_path,
hostname,
target_dir):
def from_config(cls, *, config_path, hostname, target_dir):
return cls(hostname=hostname, target_dir=target_dir,
**config.load_config_file(config_path=config_path,
hostname=hostname))
config_=config.load(config_path))
def __init__(self, *, cluster_data, hostname, node_data, target_dir):
self.cluster_data = cluster_data
def __init__(self, *, config_, hostname, target_dir):
self.config = config_
self.hostname = hostname
self.node_data = node_data
self.target_dir = target_dir
def genesis(self, *, asset_dir=None):
@ -33,7 +28,6 @@ class Operator:
def setup(self, *, asset_dir):
self.rsync_from(asset_dir)
self.render()
self.install_keys()
self.bootstrap()
@ -48,14 +42,10 @@ class Operator:
def render(self):
r = renderer.Renderer(node_data=self.node_data,
r = renderer.Renderer(config=self.config,
target_dir=self.target_dir)
r.render()
def install_keys(self):
pki.generate_keys(initial_pki=self.cluster_data['pki'],
target_dir=self.target_dir)
def bootstrap(self):
LOG.debug('Running genesis script with chroot "%s"', self.target_dir)
subprocess.run([os.path.join(self.target_dir, 'usr/sbin/chroot'),

View File

@ -1,143 +1,154 @@
from promenade import logging
from . import config, logging
import json
import os
import shutil
import subprocess
import tempfile
import yaml
__all__ = ['generate_keys']
__all__ = ['PKI']
LOG = logging.getLogger(__name__)
CA_ONLY_MAP = {
'cluster-ca': [
'kubelet',
],
}
class PKI:
def __init__(self, cluster_name, *, ca_config=None):
self.certificate_authorities = {}
self.cluster_name = cluster_name
self._ca_config_string = None
if ca_config:
self._ca_config_string = json.dumps(ca_config)
@property
def ca_config(self):
if not self._ca_config_string:
self._ca_config_string = json.dumps({
'signing': {
'default': {
'expiry': '8760h',
'usages': ['signing', 'key encipherment', 'server auth', 'client auth'],
},
},
})
return self._ca_config_string
def generate_ca(self, *, ca_name, cert_target, key_target):
result = self._cfssl(['gencert', '-initca', 'csr.json'],
files={
'csr.json': self.csr(
name='Kubernetes',
groups=['Kubernetes']),
})
LOG.debug('ca_cert=%r', result['cert'])
self.certificate_authorities[ca_name] = result
return (self._wrap('CertificateAuthority', result['cert'],
name=ca_name,
target=cert_target),
self._wrap('CertificateAuthorityKey', result['key'],
name=ca_name,
target=key_target))
def generate_keypair(self, *, alias=None, name, target):
priv_result = self._openssl(['genrsa', '-out', 'priv.pem'])
pub_result = self._openssl(['rsa', '-in', 'priv.pem', '-pubout', '-out', 'pub.pem'],
files={
'priv.pem': priv_result['priv.pem'],
})
if not alias:
alias = name
return (self._wrap('PublicKey', pub_result['pub.pem'],
name=alias,
target=target),
self._wrap('PrivateKey', priv_result['priv.pem'],
name=alias,
target=target))
FULL_DISTRIBUTION_MAP = {
'apiserver': [
'apiserver',
],
'apiserver-key': [
'apiserver',
],
'controller-manager': [
'controller-manager',
],
'controller-manager-key': [
'controller-manager',
],
'kubelet': [
'kubelet',
],
'kubelet-key': [
'kubelet',
],
'proxy': [
'proxy',
],
'proxy-key': [
'proxy',
],
'scheduler': [
'scheduler',
],
'scheduler-key': [
'scheduler',
],
def generate_certificate(self, *, alias=None, ca_name, groups=[], hosts=[], name, target):
result = self._cfssl(
['gencert',
'-ca', 'ca.pem',
'-ca-key', 'ca-key.pem',
'-config', 'ca-config.json',
'csr.json'],
files={
'ca-config.json': self.ca_config,
'ca.pem': self.certificate_authorities[ca_name]['cert'],
'ca-key.pem': self.certificate_authorities[ca_name]['key'],
'csr.json': self.csr(name=name, groups=groups, hosts=hosts),
})
'cluster-ca': [
'admin',
'apiserver',
'asset-loader',
'controller-manager',
'etcd',
'genesis',
'kubelet',
'proxy',
'scheduler',
],
'cluster-ca-key': [
'controller-manager',
],
if not alias:
alias = name
'sa': [
'apiserver',
],
'sa-key': [
'controller-manager',
],
return (self._wrap('Certificate', result['cert'],
name=alias,
target=target),
self._wrap('CertificateKey', result['key'],
name=alias,
target=target))
'etcd': [
'etcd',
],
'etcd-key': [
'etcd',
],
def csr(self, *, name, groups=[], hosts=[], key={'algo': 'rsa', 'size': 2048}):
return json.dumps({
'CN': name,
'key': key,
'hosts': hosts,
'names': [{'O': g} for g in groups],
})
'admin': [
'admin',
],
'admin-key': [
'admin',
],
'asset-loader': [
'asset-loader',
],
'asset-loader-key': [
'asset-loader',
],
'genesis': [
'genesis',
],
'genesis-key': [
'genesis',
],
}
def generate_keys(*, initial_pki, target_dir):
if os.path.exists(os.path.join(target_dir, 'etc/kubernetes/cfssl')):
def _cfssl(self, command, *, files=None):
if not files:
files = {}
with tempfile.TemporaryDirectory() as tmp:
_write_initial_pki(tmp, initial_pki)
for filename, data in files.items():
with open(os.path.join(tmp, filename), 'w') as f:
f.write(data)
_generate_certs(tmp, target_dir)
return json.loads(subprocess.check_output(
['cfssl'] + command, cwd=tmp))
_distribute_files(tmp, target_dir, FULL_DISTRIBUTION_MAP)
def _openssl(self, command, *, files=None):
if not files:
files = {}
with tempfile.TemporaryDirectory() as tmp:
for filename, data in files.items():
with open(os.path.join(tmp, filename), 'w') as f:
f.write(data)
subprocess.check_call(['openssl'] + command, cwd=tmp)
result = {}
for filename in os.listdir(tmp):
if filename not in files:
with open(os.path.join(tmp, filename)) as f:
result[filename] = f.read()
return result
def _wrap(self, kind, data, **metadata):
return config.Document({
'apiVersion': 'promenade/v1',
'kind': kind,
'metadata': {
'cluster': self.cluster_name,
**metadata,
},
'spec': {
'data': block_literal(data),
},
})
def _write_initial_pki(tmp, initial_pki):
for filename, data in initial_pki.items():
path = os.path.join(tmp, filename + '.pem')
with open(path, 'w') as f:
LOG.debug('Writing data for "%s" to path "%s"', filename, path)
f.write(data)
class block_literal(str): pass
def _generate_certs(dest, target):
ca_config_path = os.path.join(target, 'etc/kubernetes/cfssl/ca-config.json')
ca_path = os.path.join(dest, 'cluster-ca.pem')
ca_key_path = os.path.join(dest, 'cluster-ca-key.pem')
search_dir = os.path.join(target, 'etc/kubernetes/cfssl/csr-configs')
for filename in os.listdir(search_dir):
name, _ext = os.path.splitext(filename)
LOG.info('Generating cert for %s', name)
path = os.path.join(search_dir, filename)
cfssl_result = subprocess.check_output([
'cfssl', 'gencert', '-ca', ca_path, '-ca-key', ca_key_path,
'-config', ca_config_path, '-profile', 'kubernetes', path])
subprocess.run(['cfssljson', '-bare', name], cwd=dest,
input=cfssl_result, check=True)
def block_literal_representer(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
def _distribute_files(src, dest, distribution_map):
for filename, destinations in distribution_map.items():
src_path = os.path.join(src, filename + '.pem')
if os.path.exists(src_path):
for destination in destinations:
dest_dir = os.path.join(dest, 'etc/kubernetes/%s/pki' % destination)
os.makedirs(dest_dir, exist_ok=True)
shutil.copy(src_path, dest_dir)
yaml.add_representer(block_literal, block_literal_representer)

View File

@ -10,19 +10,14 @@ LOG = logging.getLogger(__name__)
class Renderer:
def __init__(self, *, node_data, target_dir):
self.data = node_data
def __init__(self, *, config, target_dir):
self.config = config
self.target_dir = target_dir
@property
def template_paths(self):
return ['common'] + self.data['current_node']['roles']
def render(self):
for template_dir in self.template_paths:
for template_dir in self.config['Node']['templates']:
self.render_template_dir(template_dir)
def render_template_dir(self, template_dir):
source_root = pkg_resources.resource_filename(
'promenade', os.path.join('templates', template_dir))
@ -46,7 +41,7 @@ class Renderer:
with open(path) as f:
template = env.from_string(f.read())
rendered_data = template.render(**self.data)
rendered_data = template.render(config=self.config)
with open(target_path, 'w') as f:
f.write(rendered_data)

View File

@ -1,2 +0,0 @@
host-record=auxiliary-etcd-0,{{ genesis['ip'] }}
host-record=auxiliary-etcd-1,{{ genesis['ip'] }}

View File

@ -1,4 +1,4 @@
{% for master in masters %}
{% for master in config['Masters']['nodes'] %}
host-record=kubernetes,{{ master['ip'] }}
host-record={{ master['hostname'] }},{{ master['ip'] }}
{% endfor %}

View File

@ -1,13 +0,0 @@
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "8760h"
}
}
}
}

View File

@ -1,16 +0,0 @@
{
"CN": "system:node:{{ current_node['hostname'] }}",
"hosts": [
"{{ current_node['hostname'] }}",
"{{ current_node['ip'] }}"
],
"names": [
{
"O": "system:nodes"
}
],
"key": {
"algo": "rsa",
"size": 2048
}
}

View File

@ -1,11 +0,0 @@
{
"CN": "system:kube-proxy",
"hosts": [
"{{ current_node['hostname'] }}",
"{{ current_node['ip'] }}"
],
"key": {
"algo": "rsa",
"size": 2048
}
}

View File

@ -12,13 +12,13 @@ metadata:
spec:
containers:
- name: kube-proxy
image: gcr.io/google_containers/hyperkube-amd64:v1.6.2
image: gcr.io/google_containers/hyperkube-amd64:v1.6.4
command:
- /hyperkube
- proxy
- --cluster-cidr={{ network.pod_ip_cidr }}
- --cluster-cidr={{ config['Network']['pod_ip_cidr'] }}
- --hostname-override=$(NODE_NAME)
- --kubeconfig=/etc/kubernetes/config/kubeconfig.yaml
- --kubeconfig=/etc/kubernetes/proxy/kubeconfig.yaml
- --proxy-mode=iptables
- --v=5
env:
@ -30,7 +30,7 @@ spec:
privileged: true
volumeMounts:
- name: config
mountPath: /etc/kubernetes
mountPath: /etc/kubernetes/proxy
readOnly: true
hostNetwork: true
volumes:

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='cluster')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='kubelet')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='kubelet')['data'] }}

View File

@ -3,7 +3,7 @@ apiVersion: v1
clusters:
- cluster:
server: https://kubernetes
certificate-authority: /etc/kubernetes/pki/cluster-ca.pem
certificate-authority: /etc/kubernetes/proxy/pki/cluster-ca.pem
name: kubernetes
contexts:
- context:
@ -16,5 +16,5 @@ preferences: {}
users:
- name: proxy
user:
client-certificate: /etc/kubernetes/pki/proxy.pem
client-key: /etc/kubernetes/pki/proxy-key.pem
client-certificate: /etc/kubernetes/proxy/pki/proxy.pem
client-key: /etc/kubernetes/proxy/pki/proxy-key.pem

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='cluster')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='proxy')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='proxy')['data'] }}

View File

@ -5,15 +5,15 @@ Documentation=https://kubernetes.io/docs/admin/kubelet/
[Service]
ExecStart=/usr/local/bin/kubelet \
--allow-privileged=true \
--cluster-dns={{ network.cluster_dns }} \
--cluster-domain={{ network.cluster_domain }} \
--cluster-dns={{ config['Network']['cluster_dns'] }} \
--cluster-domain={{ config['Network']['cluster_domain'] }} \
--cni-bin-dir=/opt/cni/bin \
--cni-conf-dir=/etc/cni/net.d \
--hostname-override={{ current_node.hostname }} \
--hostname-override={{ config['Node']['hostname'] }} \
--kubeconfig=/etc/kubernetes/kubelet/kubeconfig.yaml \
--network-plugin=cni \
--node-ip={{ current_node.ip }} \
--node-labels={{ current_node.labels | join(',') }} \
--node-ip={{ config['Node']['ip'] }} \
--node-labels={{ config['Node']['labels'] | join(',') }} \
--pod-manifest-path=/etc/kubernetes/kubelet/manifests \
--require-kubeconfig=true \
--v=5

View File

@ -67,7 +67,7 @@ data:
}
net-conf.json: |
{
"Network": "{{ network.pod_ip_cidr }}",
"Network": "{{ config['Network']['pod_ip_cidr'] }}",
"Backend": {
"Type": "vxlan"
}

View File

@ -23,7 +23,7 @@ metadata:
spec:
selector:
k8s-app: kube-dns
clusterIP: {{ network.cluster_dns }}
clusterIP: {{ config['Network']['cluster_dns'] }}
ports:
- name: dns
port: 53

View File

@ -3,7 +3,7 @@ apiVersion: v1
clusters:
- cluster:
server: https://kubernetes
certificate-authority: /etc/kubernetes/pki/cluster-ca.pem
certificate-authority: /etc/kubernetes/asset-loader/pki/cluster-ca.pem
name: kubernetes
contexts:
- context:
@ -16,5 +16,5 @@ preferences: {}
users:
- name: asset-loader
user:
client-certificate: /etc/kubernetes/pki/asset-loader.pem
client-key: /etc/kubernetes/pki/asset-loader-key.pem
client-certificate: /etc/kubernetes/asset-loader/pki/asset-loader.pem
client-key: /etc/kubernetes/asset-loader/pki/asset-loader-key.pem

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='admin')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='admin')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='cluster')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='etcd-client')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='auxiliary-etcd-0-client')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='auxiliary-etcd-0-client')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='auxiliary-etcd-0-peer')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='auxiliary-etcd-0-peer')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='etcd-peer')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='etcd-client')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='auxiliary-etcd-1-client')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='auxiliary-etcd-1-client')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='auxiliary-etcd-1-peer')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='auxiliary-etcd-1-peer')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='etcd-peer')['data'] }}

View File

@ -1,16 +0,0 @@
{
"CN": "asset-loader",
"hosts": [
"{{ current_node['hostname'] }}",
"{{ current_node['ip'] }}"
],
"names": [
{
"O": "system:masters"
}
],
"key": {
"algo": "rsa",
"size": 2048
}
}

View File

@ -1,19 +0,0 @@
{
"CN": "etcd:{{ current_node['hostname'] }}",
"hosts": [
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster.local",
"127.0.0.1",
"{{ current_node['hostname'] }}",
"auxiliary-etcd-0",
"auxiliary-etcd-1",
"{{ current_node['ip'] }}",
"{{ network.kube_service_ip }}"
],
"key": {
"algo": "rsa",
"size": 2048
}
}

View File

@ -1,16 +0,0 @@
{
"CN": "genesis",
"hosts": [
"{{ current_node['hostname'] }}",
"{{ current_node['ip'] }}"
],
"names": [
{
"O": "system:masters"
}
],
"key": {
"algo": "rsa",
"size": 2048
}
}

View File

@ -3,7 +3,7 @@ apiVersion: v1
clusters:
- cluster:
server: https://127.0.0.1
certificate-authority: /target/etc/kubernetes/genesis/pki/cluster-ca.pem
certificate-authority: /target/etc/kubernetes/admin/pki/cluster-ca.pem
name: kubernetes
contexts:
- context:
@ -16,5 +16,5 @@ preferences: {}
users:
- name: genesis
user:
client-certificate: /target/etc/kubernetes/genesis/pki/genesis.pem
client-key: /target/etc/kubernetes/genesis/pki/genesis-key.pem
client-certificate: /target/etc/kubernetes/admin/pki/admin.pem
client-key: /target/etc/kubernetes/admin/pki/admin-key.pem

View File

@ -12,7 +12,7 @@ spec:
hostNetwork: true
containers:
- name: loader
image: gcr.io/google_containers/hyperkube-amd64:v1.6.2
image: gcr.io/google_containers/hyperkube-amd64:v1.6.4
command:
- /bin/bash
- -c
@ -21,12 +21,12 @@ spec:
while true; do
sleep 60
/kubectl \
--kubeconfig /etc/kubernetes/kubeconfig.yaml \
apply -f /etc/kubernetes/assets
--kubeconfig /etc/kubernetes/asset-loader/kubeconfig.yaml \
apply -f /etc/kubernetes/asset-loader/assets
done
volumeMounts:
- name: config
mountPath: /etc/kubernetes
mountPath: /etc/kubernetes/asset-loader
readOnly: true
volumes:
- name: config

View File

@ -22,31 +22,31 @@ spec:
- name: ETCD_DATA_DIR
value: /var/lib/auxiliary-etcd-0
- name: ETCD_TRUSTED_CA_FILE
value: /etc/etcd-pki/cluster-ca.pem
value: /etc/kubernetes/auxiliary-etcd-0/pki/client-ca.pem
- name: ETCD_CERT_FILE
value: /etc/etcd-pki/etcd.pem
value: /etc/kubernetes/auxiliary-etcd-0/pki/etcd-client.pem
- name: ETCD_KEY_FILE
value: /etc/etcd-pki/etcd-key.pem
value: /etc/kubernetes/auxiliary-etcd-0/pki/etcd-client-key.pem
- name: ETCD_PEER_TRUSTED_CA_FILE
value: /etc/etcd-pki/cluster-ca.pem
value: /etc/kubernetes/auxiliary-etcd-0/pki/peer-ca.pem
- name: ETCD_PEER_CERT_FILE
value: /etc/etcd-pki/etcd.pem
value: /etc/kubernetes/auxiliary-etcd-0/pki/etcd-peer.pem
- name: ETCD_PEER_KEY_FILE
value: /etc/etcd-pki/etcd-key.pem
value: /etc/kubernetes/auxiliary-etcd-0/pki/etcd-peer-key.pem
- name: ETCD_ADVERTISE_CLIENT_URLS
value: https://$(ETCD_NAME):12379
value: https://{{ config['Node']['hostname'] }}:12379
- name: ETCD_INITIAL_ADVERTISE_PEER_URLS
value: https://$(ETCD_NAME):12380
value: https://{{ config['Node']['hostname'] }}:12380
- name: ETCD_INITIAL_CLUSTER_TOKEN
value: promenade-kube-etcd-token
- name: ETCD_LISTEN_CLIENT_URLS
value: https://0.0.0.0:12379
- name: ETCD_LISTEN_PEER_URLS
value: https://0.0.0.0:12380
{%- for env_name, env_value in etcd['env'].items() %}
- name: {{ env_name }}
value: {{ env_value }}
{%- endfor %}
- name: ETCD_INITIAL_CLUSTER_STATE
value: {{ config['Etcd']['initial_cluster_state'] }}
- name: ETCD_INITIAL_CLUSTER
value: {{ config['Etcd']['initial_cluster'] | join(',') }}
ports:
- name: client
containerPort: 12379
@ -60,8 +60,8 @@ spec:
volumeMounts:
- name: data-0
mountPath: /var/lib/auxiliary-etcd-0
- name: pki
mountPath: /etc/etcd-pki
- name: pki-0
mountPath: /etc/kubernetes/auxiliary-etcd-0/pki
readOnly: true
- name: auxiliary-etcd-1
image: quay.io/coreos/etcd:v3.0.17
@ -75,31 +75,31 @@ spec:
- name: ETCD_DATA_DIR
value: /var/lib/auxiliary-etcd-1
- name: ETCD_TRUSTED_CA_FILE
value: /etc/etcd-pki/cluster-ca.pem
value: /etc/kubernetes/auxiliary-etcd-1/pki/client-ca.pem
- name: ETCD_CERT_FILE
value: /etc/etcd-pki/etcd.pem
value: /etc/kubernetes/auxiliary-etcd-1/pki/etcd-client.pem
- name: ETCD_KEY_FILE
value: /etc/etcd-pki/etcd-key.pem
value: /etc/kubernetes/auxiliary-etcd-1/pki/etcd-client-key.pem
- name: ETCD_PEER_TRUSTED_CA_FILE
value: /etc/etcd-pki/cluster-ca.pem
value: /etc/kubernetes/auxiliary-etcd-1/pki/peer-ca.pem
- name: ETCD_PEER_CERT_FILE
value: /etc/etcd-pki/etcd.pem
value: /etc/kubernetes/auxiliary-etcd-1/pki/etcd-peer.pem
- name: ETCD_PEER_KEY_FILE
value: /etc/etcd-pki/etcd-key.pem
value: /etc/kubernetes/auxiliary-etcd-1/pki/etcd-peer-key.pem
- name: ETCD_ADVERTISE_CLIENT_URLS
value: https://$(ETCD_NAME):22379
value: https://{{ config['Node']['hostname'] }}:22379
- name: ETCD_INITIAL_ADVERTISE_PEER_URLS
value: https://$(ETCD_NAME):22380
value: https://{{ config['Node']['hostname'] }}:22380
- name: ETCD_INITIAL_CLUSTER_TOKEN
value: promenade-kube-etcd-token
- name: ETCD_LISTEN_CLIENT_URLS
value: https://0.0.0.0:22379
- name: ETCD_LISTEN_PEER_URLS
value: https://0.0.0.0:22380
{%- for env_name, env_value in etcd['env'].items() %}
- name: {{ env_name }}
value: {{ env_value }}
{%- endfor %}
- name: ETCD_INITIAL_CLUSTER_STATE
value: {{ config['Etcd']['initial_cluster_state'] }}
- name: ETCD_INITIAL_CLUSTER
value: {{ config['Etcd']['initial_cluster'] | join(',') }}
ports:
- name: client
containerPort: 22379
@ -113,8 +113,8 @@ spec:
volumeMounts:
- name: data-1
mountPath: /var/lib/auxiliary-etcd-1
- name: pki
mountPath: /etc/etcd-pki
- name: pki-1
mountPath: /etc/kubernetes/auxiliary-etcd-1/pki
readOnly: true
- name: cluster-monitor
image: quay.io/coreos/etcd:v3.0.17
@ -124,8 +124,8 @@ spec:
- |-
set -x
while true; do
if [ $(etcdctl member list | grep -v unstarted | wc -l || echo 0) -ge {{ masters | length }} ]; then
{%- for master in masters %}
if [ $(etcdctl member list | grep -v unstarted | wc -l || echo 0) -ge {{ config['Masters']['nodes'] | length }} ]; then
{%- for master in config['Masters']['nodes'] %}
etcdctl member add {{ master['hostname'] }} --peer-urls https://{{ master['hostname'] }}:2380
{%- endfor %}
break
@ -133,11 +133,16 @@ spec:
done
while true; do
sleep 5
if [ $(etcdctl member list | grep -v unstarted | wc -l || echo 0) -eq {{ 2 + (masters | length) }} ]; then
if [ $(etcdctl member list | grep -v unstarted | wc -l || echo 0) -eq {{ 2 + (config['Masters']['nodes'] | length) }} ]; then
etcdctl member remove $(etcdctl member list | grep auxiliary-etcd-1 | cut -d , -f 1)
etcdctl member remove $(etcdctl member list | grep auxiliary-etcd-0 | cut -d , -f 1)
sleep 60
rm -rf /var/lib/auxiliary-etcd-0 /var/lib/auxiliary-etcd-1 /etc/kubernetes/kubelet/manifests/auxiliary-etcd.yaml
rm -rf \
/var/lib/auxiliary-etcd-0 \
/var/lib/auxiliary-etcd-1 \
/etc/kubernetes/auxiliary-etcd-0 \
/etc/kubernetes/auxiliary-etcd-1 \
/etc/kubernetes/kubelet/manifests/auxiliary-etcd.yaml
sleep 10000
fi
done
@ -150,16 +155,16 @@ spec:
- name: ETCDCTL_API
value: "3"
- name: ETCDCTL_CACERT
value: /etc/etcd-pki/cluster-ca.pem
value: /etc/kubernetes/etcd/pki/client-ca.pem
- name: ETCDCTL_CERT
value: /etc/etcd-pki/etcd.pem
value: /etc/kubernetes/etcd/pki/etcd-client.pem
- name: ETCDCTL_ENDPOINTS
value: https://127.0.0.1:12379
value: https://{{ config['Node']['ip'] }}:2379
- name: ETCDCTL_KEY
value: /etc/etcd-pki/etcd-key.pem
value: /etc/kubernetes/etcd/pki/etcd-client-key.pem
volumeMounts:
- name: pki
mountPath: /etc/etcd-pki
mountPath: /etc/kubernetes/etcd/pki
readOnly: true
- name: manifests
mountPath: /etc/kubernetes/kubelet/manifests
@ -175,6 +180,12 @@ spec:
- name: pki
hostPath:
path: /etc/kubernetes/etcd/pki
- name: pki-0
hostPath:
path: /etc/kubernetes/auxiliary-etcd-0/pki
- name: pki-1
hostPath:
path: /etc/kubernetes/auxiliary-etcd-1/pki
- name: manifests
hostPath:
path: /etc/kubernetes/kubelet/manifests

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='admin')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='admin')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='cluster')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='apiserver')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='apiserver')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='cluster')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='etcd-client')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='etcd-apiserver-client')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='etcd-apiserver-client')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='PublicKey', name='service-account')['data'] }}

View File

@ -1,16 +0,0 @@
{
"CN": "admin",
"hosts": [
"{{ current_node['hostname'] }}",
"{{ current_node['ip'] }}"
],
"names": [
{
"O": "system:masters"
}
],
"key": {
"algo": "rsa",
"size": 2048
}
}

View File

@ -1,17 +0,0 @@
{
"CN": "system:kube-apiserver",
"hosts": [
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster.local",
"127.0.0.1",
"{{ current_node['hostname'] }}",
"{{ current_node['ip'] }}",
"{{ network.kube_service_ip }}"
],
"key": {
"algo": "rsa",
"size": 2048
}
}

View File

@ -1,11 +0,0 @@
{
"CN": "system:kube-controller-manager",
"hosts": [
"{{ current_node['hostname'] }}",
"{{ current_node['ip'] }}"
],
"key": {
"algo": "rsa",
"size": 2048
}
}

View File

@ -1,17 +0,0 @@
{
"CN": "etcd:{{ current_node['hostname'] }}",
"hosts": [
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster.local",
"127.0.0.1",
"{{ current_node['hostname'] }}",
"{{ current_node['ip'] }}",
"{{ network.kube_service_ip }}"
],
"key": {
"algo": "rsa",
"size": 2048
}
}

View File

@ -1,11 +0,0 @@
{
"CN": "system:kube-scheduler",
"hosts": [
"{{ current_node['hostname'] }}",
"{{ current_node['ip'] }}"
],
"key": {
"algo": "rsa",
"size": 2048
}
}

View File

@ -3,7 +3,7 @@ apiVersion: v1
clusters:
- cluster:
server: https://kubernetes
certificate-authority: /etc/kubernetes/pki/cluster-ca.pem
certificate-authority: /etc/kubernetes/controller-manager/pki/cluster-ca.pem
name: kubernetes
contexts:
- context:
@ -16,5 +16,5 @@ preferences: {}
users:
- name: controller-manager
user:
client-certificate: /etc/kubernetes/pki/controller-manager.pem
client-key: /etc/kubernetes/pki/controller-manager-key.pem
client-certificate: /etc/kubernetes/controller-manager/pki/controller-manager.pem
client-key: /etc/kubernetes/controller-manager/pki/controller-manager-key.pem

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthorityKey', name='cluster')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='cluster')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='controller-manager')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='controller-manager')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='PrivateKey', name='service-account')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='etcd-client')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='etcd-client')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='etcd-client')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='etcd-peer')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='etcd-peer')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='etcd-peer')['data'] }}

View File

@ -13,11 +13,11 @@ spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: gcr.io/google_containers/hyperkube-amd64:v1.6.2
image: gcr.io/google_containers/hyperkube-amd64:v1.6.4
command:
- /hyperkube
- apiserver
- --advertise-address={{ current_node.ip }}
- --advertise-address={{ config['Node']['ip'] }}
- --authorization-mode=RBAC
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds
- --anonymous-auth=false
@ -27,12 +27,12 @@ spec:
- --secure-port=443
- --allow-privileged=true
- --etcd-servers=https://kubernetes:2379
- --etcd-cafile=/etc/kubernetes/pki/cluster-ca.pem
- --etcd-certfile=/etc/kubernetes/pki/apiserver.pem
- --etcd-keyfile=/etc/kubernetes/pki/apiserver-key.pem
- --service-cluster-ip-range={{ network.service_ip_cidr }}
- --etcd-cafile=/etc/kubernetes/pki/etcd-client-ca.pem
- --etcd-certfile=/etc/kubernetes/pki/etcd-client.pem
- --etcd-keyfile=/etc/kubernetes/pki/etcd-client-key.pem
- --service-cluster-ip-range={{ config['Network']['service_ip_cidr'] }}
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --service-account-key-file=/etc/kubernetes/pki/sa.pem
- --service-account-key-file=/etc/kubernetes/pki/service-account.pub
- --tls-cert-file=/etc/kubernetes/pki/apiserver.pem
- --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem
- --v=5

View File

@ -14,25 +14,25 @@ spec:
dnsPolicy: Default # Don't use cluster DNS.
containers:
- name: kube-controller-manager
image: gcr.io/google_containers/hyperkube-amd64:v1.6.2
image: gcr.io/google_containers/hyperkube-amd64:v1.6.4
command:
- ./hyperkube
- controller-manager
- --allocate-node-cidrs=true
- --cluster-cidr={{ network.pod_ip_cidr }}
- --cluster-signing-cert-file=/etc/kubernetes/pki/cluster-ca.pem
- --cluster-signing-key-file=/etc/kubernetes/pki/cluster-ca-key.pem
- --cluster-cidr={{ config['Network']['pod_ip_cidr'] }}
- --cluster-signing-cert-file=/etc/kubernetes/controller-manager/pki/cluster-ca.pem
- --cluster-signing-key-file=/etc/kubernetes/controller-manager/pki/cluster-ca-key.pem
- --configure-cloud-routes=false
- --leader-elect=true
- --kubeconfig=/etc/kubernetes/kubeconfig.yaml
- --root-ca-file=/etc/kubernetes/pki/cluster-ca.pem
- --service-account-private-key-file=/etc/kubernetes/pki/sa-key.pem
- --service-cluster-ip-range={{ network.service_ip_cidr }}
- --kubeconfig=/etc/kubernetes/controller-manager/kubeconfig.yaml
- --root-ca-file=/etc/kubernetes/controller-manager/pki/cluster-ca.pem
- --service-account-private-key-file=/etc/kubernetes/controller-manager/pki/service-account.key
- --service-cluster-ip-range={{ config['Network']['service_ip_cidr'] }}
- --use-service-account-credentials=true
- --v=5
volumeMounts:
- name: config
mountPath: /etc/kubernetes
mountPath: /etc/kubernetes/controller-manager
readOnly: true
volumes:
- name: config

View File

@ -24,17 +24,17 @@ spec:
- name: ETCD_DATA_DIR
value: /var/lib/kube-etcd
- name: ETCD_TRUSTED_CA_FILE
value: /etc/etcd-pki/cluster-ca.pem
value: /etc/kubernetes/etcd/pki/client-ca.pem
- name: ETCD_CERT_FILE
value: /etc/etcd-pki/etcd.pem
value: /etc/kubernetes/etcd/pki/etcd-client.pem
- name: ETCD_KEY_FILE
value: /etc/etcd-pki/etcd-key.pem
value: /etc/kubernetes/etcd/pki/etcd-client-key.pem
- name: ETCD_PEER_TRUSTED_CA_FILE
value: /etc/etcd-pki/cluster-ca.pem
value: /etc/kubernetes/etcd/pki/peer-ca.pem
- name: ETCD_PEER_CERT_FILE
value: /etc/etcd-pki/etcd.pem
value: /etc/kubernetes/etcd/pki/etcd-peer.pem
- name: ETCD_PEER_KEY_FILE
value: /etc/etcd-pki/etcd-key.pem
value: /etc/kubernetes/etcd/pki/etcd-peer-key.pem
- name: ETCD_ADVERTISE_CLIENT_URLS
value: https://$(ETCD_NAME):2379
- name: ETCD_INITIAL_ADVERTISE_PEER_URLS
@ -45,10 +45,10 @@ spec:
value: https://0.0.0.0:2379
- name: ETCD_LISTEN_PEER_URLS
value: https://0.0.0.0:2380
{%- for env_name, env_value in etcd['env'].items() %}
- name: {{ env_name }}
value: {{ env_value }}
{%- endfor %}
- name: ETCD_INITIAL_CLUSTER_STATE
value: {{ config['Etcd']['initial_cluster_state'] }}
- name: ETCD_INITIAL_CLUSTER
value: {{ config['Etcd']['initial_cluster'] | join(',') }}
ports:
- name: client
containerPort: 2379
@ -58,7 +58,7 @@ spec:
- name: data
mountPath: /var/lib/kube-etcd
- name: pki
mountPath: /etc/etcd-pki
mountPath: /etc/kubernetes/etcd/pki
volumes:
- name: data
hostPath:

View File

@ -13,16 +13,16 @@ spec:
hostNetwork: true
containers:
- name: kube-scheduler
image: gcr.io/google_containers/hyperkube-amd64:v1.6.2
image: gcr.io/google_containers/hyperkube-amd64:v1.6.4
command:
- ./hyperkube
- scheduler
- --leader-elect=true
- --kubeconfig=/etc/kubernetes/kubeconfig.yaml
- --kubeconfig=/etc/kubernetes/scheduler/kubeconfig.yaml
- --v=5
volumeMounts:
- name: config
mountPath: /etc/kubernetes
mountPath: /etc/kubernetes/scheduler
volumes:
- name: config
hostPath:

View File

@ -3,7 +3,7 @@ apiVersion: v1
clusters:
- cluster:
server: https://kubernetes
certificate-authority: /etc/kubernetes/pki/cluster-ca.pem
certificate-authority: /etc/kubernetes/scheduler/pki/cluster-ca.pem
name: kubernetes
contexts:
- context:
@ -16,5 +16,5 @@ preferences: {}
users:
- name: scheduler
user:
client-certificate: /etc/kubernetes/pki/scheduler.pem
client-key: /etc/kubernetes/pki/scheduler-key.pem
client-certificate: /etc/kubernetes/scheduler/pki/scheduler.pem
client-key: /etc/kubernetes/scheduler/pki/scheduler-key.pem

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateAuthority', name='cluster')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='CertificateKey', name='scheduler')['data'] }}

View File

@ -0,0 +1 @@
{{ config.get(kind='Certificate', name='scheduler')['data'] }}