Merge "Add deploy promenade gate check"

This commit is contained in:
Zuul 2019-06-12 18:13:25 +00:00 committed by Gerrit Code Review
commit fefd664cd8
12 changed files with 1502 additions and 2 deletions

View File

@ -20,6 +20,7 @@
- airship-promenade-chart-build-gate
- airship-promenade-chart-build-latest-htk
- airship-promenade-unit-py35
- airship-promenade-genesis-gate
gate:
jobs:
@ -29,6 +30,7 @@
- airship-promenade-doc-build
- airship-promenade-chart-build-gate
- airship-promenade-unit-py35
- airship-promenade-genesis-gate
post:
jobs:
@ -41,6 +43,14 @@
- name: primary
label: ubuntu-xenial
- job:
name: airship-promenade-genesis-gate
description: |
Deploy airship promenade genesis
run: tools/zuul/playbooks/deploy-promenade.yaml
timeout: 3600
nodeset: airship-promenade-single-node
- job:
name: airship-promenade-lint-ws
description: |

18
examples/gate/Docker.yaml Normal file
View File

@ -0,0 +1,18 @@
---
schema: promenade/Docker/v1
metadata:
schema: metadata/Document/v1
name: docker
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
config:
insecure-registries:
- registry:5000
live-restore: true
max-concurrent-downloads: 10
oom-score-adjust: -999
storage-driver: overlay2
...

View File

@ -0,0 +1,19 @@
---
schema: promenade/EncryptionPolicy/v1
metadata:
schema: metadata/Document/v1
name: encryption-policy
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
etcd:
- resources:
- 'secrets'
providers:
- secretbox:
keys:
- name: key1
secret: Xw2UcbjILTJM6QiFZ0WPSbUvjtoT8OJC/Nl8qqYWjGk=
...

View File

@ -0,0 +1,76 @@
---
schema: promenade/Genesis/v1
metadata:
schema: metadata/Document/v1
name: genesis
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
substitutions:
- src:
schema: promenade/EncryptionPolicy/v1
name: encryption-policy
path: .etcd
dest:
path: .apiserver.encryption
data:
hostname: n0
ip: 192.168.77.10
external_ip: 192.168.77.10
apiserver:
arguments:
- --authorization-mode=Node,RBAC
- --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,EventRateLimit,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota
- --service-cluster-ip-range=10.96.0.0/16
- --endpoint-reconciler-type=lease
- --feature-gates=PodShareProcessNamespace=true
# NOTE(mark-burnett): This flag is removed in Kubernetes 1.11
- --repair-malformed-updates=false
- --admission-control-config-file=/etc/kubernetes/apiserver/acconfig.yaml
- --experimental-encryption-provider-config=/etc/kubernetes/apiserver/encryption_provider.yaml
- --v=3
armada:
target_manifest: cluster-bootstrap
labels:
dynamic:
- calico-etcd=enabled
- coredns=enabled
- kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled
- kubernetes-scheduler=enabled
- promenade-genesis=enabled
- ucp-control-plane=enabled
images:
armada: quay.io/airshipit/armada:master
helm:
tiller: gcr.io/kubernetes-helm/tiller:v2.14.0
kubernetes:
apiserver: gcr.io/google_containers/hyperkube-amd64:v1.11.6
controller-manager: gcr.io/google_containers/hyperkube-amd64:v1.11.6
etcd: quay.io/coreos/etcd:v3.2.18
scheduler: gcr.io/google_containers/hyperkube-amd64:v1.11.6
files:
- path: /var/lib/anchor/calico-etcd-bootstrap
content: "# placeholder for triggering calico etcd bootstrapping"
mode: 0644
# NOTE(mark-burnett): These are referenced by the apiserver arguments above.
- path: /etc/genesis/apiserver/acconfig.yaml
mode: 0444
content: |
kind: AdmissionConfiguration
apiVersion: apiserver.k8s.io/v1alpha1
plugins:
- name: EventRateLimit
path: eventconfig.yaml
- path: /etc/genesis/apiserver/eventconfig.yaml
mode: 0444
content: |
kind: Configuration
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
limits:
- type: Server
qps: 1000
burst: 10000
...

View File

@ -0,0 +1,91 @@
---
schema: promenade/HostSystem/v1
metadata:
schema: metadata/Document/v1
name: host-system
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
files:
- path: /opt/kubernetes/bin/hyperkube
docker_image: gcr.io/google_containers/hyperkube-amd64:v1.11.6
file_path: /hyperkube
mode: 0555
- path: /opt/kubernetes/bin/kubelet
symlink: /opt/kubernetes/bin/hyperkube
mode: 0555
- path: /usr/local/bin/kubectl
symlink: /opt/kubernetes/bin/hyperkube
mode: 0555
- path: /etc/logrotate.d/json-logrotate
mode: 0444
content: |-
/var/lib/docker/containers/*/*-json.log
{
compress
copytruncate
create 0644 root root
daily
dateext
dateformat -%Y%m%d-%s
maxsize 10M
missingok
notifempty
su root root
rotate 1
}
- path: /etc/profile.d/kubeconfig.sh
mode: 0744
content: |-
export KUBECONFIG=/etc/kubernetes/admin/kubeconfig.yaml
images:
monitoring_image: busybox:1.28.3
haproxy: haproxy:1.8.3
helm:
helm: lachlanevenson/k8s-helm:v2.14.0
packages:
repositories:
- deb http://apt.dockerproject.org/repo ubuntu-xenial main
keys:
- |-
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBFWln24BEADrBl5p99uKh8+rpvqJ48u4eTtjeXAWbslJotmC/CakbNSqOb9o
ddfzRvGVeJVERt/Q/mlvEqgnyTQy+e6oEYN2Y2kqXceUhXagThnqCoxcEJ3+KM4R
mYdoe/BJ/J/6rHOjq7Omk24z2qB3RU1uAv57iY5VGw5p45uZB4C4pNNsBJXoCvPn
TGAs/7IrekFZDDgVraPx/hdiwopQ8NltSfZCyu/jPpWFK28TR8yfVlzYFwibj5WK
dHM7ZTqlA1tHIG+agyPf3Rae0jPMsHR6q+arXVwMccyOi+ULU0z8mHUJ3iEMIrpT
X+80KaN/ZjibfsBOCjcfiJSB/acn4nxQQgNZigna32velafhQivsNREFeJpzENiG
HOoyC6qVeOgKrRiKxzymj0FIMLru/iFF5pSWcBQB7PYlt8J0G80lAcPr6VCiN+4c
NKv03SdvA69dCOj79PuO9IIvQsJXsSq96HB+TeEmmL+xSdpGtGdCJHHM1fDeCqkZ
hT+RtBGQL2SEdWjxbF43oQopocT8cHvyX6Zaltn0svoGs+wX3Z/H6/8P5anog43U
65c0A+64Jj00rNDr8j31izhtQMRo892kGeQAaaxg4Pz6HnS7hRC+cOMHUU4HA7iM
zHrouAdYeTZeZEQOA7SxtCME9ZnGwe2grxPXh/U/80WJGkzLFNcTKdv+rwARAQAB
tDdEb2NrZXIgUmVsZWFzZSBUb29sIChyZWxlYXNlZG9ja2VyKSA8ZG9ja2VyQGRv
Y2tlci5jb20+iQI4BBMBAgAiBQJVpZ9uAhsvBgsJCAcDAgYVCAIJCgsEFgIDAQIe
AQIXgAAKCRD3YiFXLFJgnbRfEAC9Uai7Rv20QIDlDogRzd+Vebg4ahyoUdj0CH+n
Ak40RIoq6G26u1e+sdgjpCa8jF6vrx+smpgd1HeJdmpahUX0XN3X9f9qU9oj9A4I
1WDalRWJh+tP5WNv2ySy6AwcP9QnjuBMRTnTK27pk1sEMg9oJHK5p+ts8hlSC4Sl
uyMKH5NMVy9c+A9yqq9NF6M6d6/ehKfBFFLG9BX+XLBATvf1ZemGVHQusCQebTGv
0C0V9yqtdPdRWVIEhHxyNHATaVYOafTj/EF0lDxLl6zDT6trRV5n9F1VCEh4Aal8
L5MxVPcIZVO7NHT2EkQgn8CvWjV3oKl2GopZF8V4XdJRl90U/WDv/6cmfI08GkzD
YBHhS8ULWRFwGKobsSTyIvnbk4NtKdnTGyTJCQ8+6i52s+C54PiNgfj2ieNn6oOR
7d+bNCcG1CdOYY+ZXVOcsjl73UYvtJrO0Rl/NpYERkZ5d/tzw4jZ6FCXgggA/Zxc
jk6Y1ZvIm8Mt8wLRFH9Nww+FVsCtaCXJLP8DlJLASMD9rl5QS9Ku3u7ZNrr5HWXP
HXITX660jglyshch6CWeiUATqjIAzkEQom/kEnOrvJAtkypRJ59vYQOedZ1sFVEL
MXg2UCkD/FwojfnVtjzYaTCeGwFQeqzHmM241iuOmBYPeyTY5veF49aBJA1gEJOQ
TvBR8Q==
=Fm3p
-----END PGP PUBLIC KEY BLOCK-----
additional:
- curl
- jq
required:
docker: docker.io
socat: socat
validation:
pod_logs:
image: busybox:1.28.3
...

View File

@ -0,0 +1,23 @@
---
schema: promenade/Kubelet/v1
metadata:
schema: metadata/Document/v1
name: kubelet
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
arguments:
- --cni-bin-dir=/opt/cni/bin
- --cni-conf-dir=/etc/cni/net.d
- --eviction-max-pod-grace-period=-1
- --network-plugin=cni
- --node-status-update-frequency=5s
- --serialize-image-pulls=false
- --anonymous-auth=false
- --feature-gates=PodShareProcessNamespace=true
- --v=3
images:
pause: gcr.io/google_containers/pause-amd64:3.0
...

View File

@ -0,0 +1,43 @@
---
schema: promenade/KubernetesNetwork/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-network
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
dns:
cluster_domain: cluster.local
service_ip: 10.96.0.10
bootstrap_validation_checks:
- calico-etcd.kube-system.svc.cluster.local
- google.com
- kubernetes-etcd.kube-system.svc.cluster.local
- kubernetes.default.svc.cluster.local
upstream_servers:
- 8.8.8.8
- 8.8.4.4
kubernetes:
apiserver_port: 6443
haproxy_port: 6553
pod_cidr: 10.97.0.0/16
service_cidr: 10.96.0.0/16
service_ip: 10.96.0.1
etcd:
container_port: 2379
haproxy_port: 2378
hosts_entries:
- ip: 192.168.77.1
names:
- registry
# proxy:
# url: http://proxy.example.com:8080
# additional_no_proxy:
# - 10.0.1.1
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/LayeringPolicy/v1
metadata:
schema: metadata/Control/v1
name: layering-policy
data:
layerOrder:
- global
- type
- site
...

View File

@ -0,0 +1,128 @@
---
schema: promenade/PKICatalog/v1
metadata:
schema: metadata/Document/v1
name: cluster-certificates
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
certificate_authorities:
kubernetes:
description: CA for Kubernetes components
certificates:
- document_name: apiserver
description: Service certificate for Kubernetes apiserver
common_name: apiserver
hosts:
- localhost
- 127.0.0.1
- 10.96.0.1
kubernetes_service_names:
- kubernetes.default.svc.cluster.local
- document_name: kubelet-genesis
common_name: system:node:n0
hosts:
- n0
- 192.168.77.10
groups:
- system:nodes
- document_name: kubelet-n0
common_name: system:node:n0
hosts:
- n0
- 192.168.77.10
groups:
- system:nodes
- document_name: scheduler
description: Service certificate for Kubernetes scheduler
common_name: system:kube-scheduler
- document_name: controller-manager
description: certificate for controller-manager
common_name: system:kube-controller-manager
- document_name: admin
common_name: admin
groups:
- system:masters
- document_name: armada
common_name: armada
groups:
- system:masters
kubernetes-etcd:
description: Certificates for Kubernetes's etcd servers
certificates:
- document_name: apiserver-etcd
description: etcd client certificate for use by Kubernetes apiserver
common_name: apiserver
# NOTE(mark-burnett): hosts not required for client certificates
- document_name: kubernetes-etcd-anchor
description: anchor
common_name: anchor
- document_name: kubernetes-etcd-genesis
common_name: kubernetes-etcd-genesis
hosts:
- n0
- 192.168.77.10
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- document_name: kubernetes-etcd-n0
common_name: kubernetes-etcd-n0
hosts:
- n0
- 192.168.77.10
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
kubernetes-etcd-peer:
certificates:
- document_name: kubernetes-etcd-genesis-peer
common_name: kubernetes-etcd-genesis-peer
hosts:
- n0
- 192.168.77.10
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- document_name: kubernetes-etcd-n0-peer
common_name: kubernetes-etcd-n0-peer
hosts:
- n0
- 192.168.77.10
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
calico-etcd:
description: Certificates for Calico etcd client traffic
certificates:
- document_name: calico-etcd-anchor
description: anchor
common_name: anchor
- document_name: calico-etcd-n0
common_name: calico-etcd-n0
hosts:
- n0
- 192.168.77.10
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-node
common_name: calcico-node
calico-etcd-peer:
description: Certificates for Calico etcd clients
certificates:
- document_name: calico-etcd-n0-peer
common_name: calico-etcd-n0-peer
hosts:
- n0
- 192.168.77.10
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-node-peer
common_name: calcico-node-peer
keypairs:
- name: service-account
description: Service account signing key for use by Kubernetes controller-manager.
...

View File

@ -0,0 +1,989 @@
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: cluster-bootstrap
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
release_prefix: ucp
chart_groups:
- kubernetes-proxy
- container-networking
- dns
- kubernetes
- ucp-services
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-proxy
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
description: Kubernetes proxy
sequenced: true
chart_group:
- kubernetes-proxy
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: container-networking
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
description: Container networking via Calico
sequenced: true
chart_group:
- calico-etcd
- calico
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: dns
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
description: Cluster DNS
chart_group:
- coredns
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
description: Kubernetes components
sequenced: true
chart_group:
- haproxy
- kubernetes-etcd
- kubernetes-apiserver
- kubernetes-controller-manager
- kubernetes-scheduler
- tiller
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-services
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
description: Airship platform components
sequenced: true
chart_group:
- promenade
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: helm-toolkit
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
chart_name: helm-toolkit
release: helm-toolkit
namespace: helm-toolkit
wait:
timeout: 600
upgrade:
no_hooks: true
values: {}
source:
type: git
location: https://opendev.org/openstack/openstack-helm-infra.git
subpath: helm-toolkit
reference: d6996b8004db35acad7d51412b0b3216189e745f
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: helm-toolkit-tiller
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
chart_name: helm-toolkit
release: helm-toolkit
namespace: helm-toolkit
wait:
timeout: 600
upgrade:
no_hooks: true
values: {}
source:
type: git
location: https://opendev.org/openstack/openstack-helm.git
subpath: helm-toolkit
reference: f902cd14fac7de4c4c9f7d019191268a6b4e9601
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: infra-helm-toolkit
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
chart_name: infra-helm-toolkit
release: infra-helm-toolkit
namespace: infra-helm-toolkit
wait:
timeout: 600
upgrade:
no_hooks: true
values: {}
source:
type: git
location: https://opendev.org/openstack/openstack-helm-infra.git
subpath: helm-toolkit
reference: 681dee71b7befd199509b17852b3385d359a15a5
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-proxy
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
chart_name: proxy
release: kubernetes-proxy
namespace: kube-system
wait:
timeout: 600
labels:
release_group: ucp-kubernetes-proxy
upgrade:
no_hooks: true
values:
images:
tags:
proxy: gcr.io/google_containers/hyperkube-amd64:v1.11.6
network:
kubernetes_netloc: 127.0.0.1:6553
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: proxy
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: calico-etcd
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
substitutions:
-
src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd
path: .
dest:
path: '.values.secrets.tls.client.ca'
-
src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd-peer
path: .
dest:
path: '.values.secrets.tls.peer.ca'
-
src:
schema: deckhand/Certificate/v1
name: calico-etcd-anchor
path: .
dest:
path: '.values.secrets.anchor.tls.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-anchor
path: .
dest:
path: '.values.secrets.anchor.tls.key'
-
src:
schema: deckhand/Certificate/v1
name: calico-etcd-n0
path: .
dest:
path: '.values.nodes[0].tls.client.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-n0
path: .
dest:
path: '.values.nodes[0].tls.client.key'
-
src:
schema: deckhand/Certificate/v1
name: calico-etcd-n0-peer
path: .
dest:
path: '.values.nodes[0].tls.peer.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-n0-peer
path: .
dest:
path: '.values.nodes[0].tls.peer.key'
data:
chart_name: etcd
release: calico-etcd
namespace: kube-system
test:
enabled: false
wait:
timeout: 600
labels:
release_group: ucp-calico-etcd
upgrade:
no_hooks: true
values:
anchor:
etcdctl_endpoint: 10.96.232.136
labels:
anchor:
node_selector_key: calico-etcd
node_selector_value: enabled
secrets:
anchor:
tls:
cert: placeholder
key: placeholder
tls:
client:
ca: placeholder
peer:
ca: placeholder
etcd:
host_data_path: /var/lib/etcd/calico
host_etc_path: /etc/etcd/calico
bootstrapping:
enabled: true
host_directory: /var/lib/anchor
filename: calico-etcd-bootstrap
images:
tags:
etcd: quay.io/coreos/etcd:v3.2.18
etcdctl: quay.io/coreos/etcd:v3.2.18
nodes:
- name: n0
tls:
client:
cert: placeholder
key: placeholder
peer:
cert: placeholder
key: placeholder
service:
name: calico-etcd
ip: 10.96.232.136
network:
service_client:
name: service_client
port: 6666
target_port: 6666
service_peer:
name: service_peer
port: 6667
target_port: 6667
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: etcd
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: calico
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
substitutions:
-
src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd
path: .
dest:
path: '.values.endpoints.etcd.auth.client.tls.ca'
-
src:
schema: deckhand/Certificate/v1
name: calico-node
path: .
dest:
path: '.values.endpoints.etcd.auth.client.tls.crt'
-
src:
schema: deckhand/CertificateKey/v1
name: calico-node
path: .
dest:
path: '.values.endpoints.etcd.auth.client.tls.key'
data:
chart_name: calico
release: calico
namespace: kube-system
wait:
timeout: 600
labels:
release_group: ucp-calico
upgrade:
no_hooks: true
values:
conf:
cni_network_config:
name: k8s-pod-network
cniVersion: 0.1.0
type: calico
etcd_endpoints: __ETCD_ENDPOINTS__
etcd_ca_cert_file: /etc/calico/pki/ca
etcd_cert_file: /etc/calico/pki/crt
etcd_key_file: /etc/calico/pki/key
log_level: debug
mtu: 1500
ipam:
type: calico-ipam
policy:
type: k8s
k8s_api_root: https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__
k8s_auth_token: __SERVICEACCOUNT_TOKEN__
policy_controller:
K8S_API: "https://10.96.0.1:443"
node:
CALICO_STARTUP_LOGLEVEL: INFO
CLUSTER_TYPE:
- k8s
- bgp
IP_AUTODETECTION_METHOD: interface=ens3
WAIT_FOR_STORAGE: "true"
endpoints:
etcd:
hosts:
default: calico-etcd
host_fqdn_override:
default: 10.96.232.136
scheme:
default: https
networking:
podSubnet: 10.97.0.0/16
mtu: 1500
images:
tags:
calico_node: quay.io/calico/node:v2.6.5
calico_cni: quay.io/calico/cni:v1.11.2
calico_ctl: quay.io/calico/ctl:v1.6.2
calico_settings: quay.io/calico/ctl:v1.6.2
calico_kube_policy_controller: quay.io/calico/kube-policy-controller:v0.7.0
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
manifests:
daemonset_calico_etcd: false
job_image_repo_sync: false
service_calico_etcd: false
source:
type: git
location: https://opendev.org/openstack/openstack-helm-infra.git
reference: 681dee71b7befd199509b17852b3385d359a15a5
subpath: calico
dependencies:
- infra-helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: coredns
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
chart_name: coredns
release: coredns
namespace: kube-system
wait:
timeout: 600
labels:
release_group: ucp-coredns
upgrade:
no_hooks: true
values:
conf:
test:
names_to_resolve:
- att.com
- calico-etcd.kube-system.svc.cluster.local
- google.com
- kubernetes-etcd.kube-system.svc.cluster.local
- kubernetes.default.svc.cluster.local
images:
tags:
coredns: coredns/coredns:1.1.3
test: coredns/coredns:1.1.3
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: coredns
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: haproxy
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
chart_name: haproxy
release: haproxy
namespace: kube-system
wait:
timeout: 600
labels:
release_group: ucp-haproxy
upgrade:
no_hooks: true
values:
conf:
anchor:
enable_cleanup: false
kubernetes_url: https://10.96.0.1:443
services:
kube-system:
kubernetes-apiserver:
server_opts: "check port 6443"
conf_parts:
global:
- timeout connect 5000ms
- timeout client 30s
- timeout server 30s
frontend:
- mode tcp
- bind *:6553
backend:
- mode tcp
- option tcp-check
- option redispatch
kubernetes-etcd:
server_opts: "check port 2379"
conf_parts:
frontend:
- mode tcp
- bind *:2378
backend:
- mode tcp
- option tcp-check
- option redispatch
images:
tags:
anchor: gcr.io/google_containers/hyperkube-amd64:v1.11.6
haproxy: haproxy:1.8.3
test: python:3.6
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: haproxy
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-apiserver
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
substitutions:
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: .
dest:
path: .values.secrets.tls.ca
-
src:
schema: deckhand/Certificate/v1
name: apiserver
path: .
dest:
path: .values.secrets.tls.cert
-
src:
schema: deckhand/CertificateKey/v1
name: apiserver
path: .
dest:
path: .values.secrets.tls.key
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd
path: .
dest:
path: .values.secrets.etcd.tls.ca
-
src:
schema: deckhand/Certificate/v1
name: apiserver-etcd
path: .
dest:
path: .values.secrets.etcd.tls.cert
-
src:
schema: deckhand/CertificateKey/v1
name: apiserver-etcd
path: .
dest:
path: .values.secrets.etcd.tls.key
-
src:
schema: deckhand/PublicKey/v1
name: service-account
path: .
dest:
path: .values.secrets.service_account.public_key
-
src:
schema: promenade/EncryptionPolicy/v1
name: encryption-policy
path: .etcd
dest:
path: .values.conf.encryption_provider.content.resources
data:
chart_name: apiserver
release: kubernetes-apiserver
namespace: kube-system
wait:
timeout: 600
labels:
release_group: ucp-kubernetes-apiserver
upgrade:
no_hooks: true
values:
conf:
encryption_provider:
file: encryption_provider.yaml
command_options:
- '--experimental-encryption-provider-config=/etc/kubernetes/apiserver/encryption_provider.yaml'
content:
kind: EncryptionConfig
apiVersion: v1
apiserver:
etcd:
endpoints: https://127.0.0.1:2378
images:
tags:
anchor: gcr.io/google_containers/hyperkube-amd64:v1.11.6
apiserver: gcr.io/google_containers/hyperkube-amd64:v1.11.6
network:
kubernetes_service_ip: 10.96.0.1
pod_cidr: 10.97.0.0/16
service_cidr: 10.96.0.0/16
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: apiserver
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-controller-manager
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
substitutions:
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: .
dest:
path: .values.secrets.tls.ca
-
src:
schema: deckhand/Certificate/v1
name: controller-manager
path: .
dest:
path: .values.secrets.tls.cert
-
src:
schema: deckhand/CertificateKey/v1
name: controller-manager
path: .
dest:
path: .values.secrets.tls.key
-
src:
schema: deckhand/PrivateKey/v1
name: service-account
path: .
dest:
path: .values.secrets.service_account.private_key
data:
chart_name: controller_manager
release: kubernetes-controller-manager
namespace: kube-system
wait:
timeout: 600
labels:
release_group: ucp-kubernetes-controller-manager
upgrade:
no_hooks: true
values:
images:
tags:
anchor: gcr.io/google_containers/hyperkube-amd64:v1.11.6
controller_manager: gcr.io/google_containers/hyperkube-amd64:v1.11.6
secrets:
service_account:
private_key: placeholder
tls:
ca: placeholder
cert: placeholder
key: placeholder
network:
kubernetes_netloc: 127.0.0.1:6553
pod_cidr: 10.97.0.0/16
service_cidr: 10.96.0.0/16
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: controller_manager
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-scheduler
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
substitutions:
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: .
dest:
path: .values.secrets.tls.ca
-
src:
schema: deckhand/Certificate/v1
name: scheduler
path: .
dest:
path: .values.secrets.tls.cert
-
src:
schema: deckhand/CertificateKey/v1
name: scheduler
path: .
dest:
path: .values.secrets.tls.key
data:
chart_name: scheduler
release: kubernetes-scheduler
namespace: kube-system
wait:
timeout: 600
labels:
release_group: ucp-kubernetes-scheduler
upgrade:
no_hooks: true
values:
secrets:
tls:
ca: placeholder
cert: placeholder
key: placeholder
network:
kubernetes_netloc: 127.0.0.1:6553
images:
tags:
anchor: gcr.io/google_containers/hyperkube-amd64:v1.11.6
scheduler: gcr.io/google_containers/hyperkube-amd64:v1.11.6
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: scheduler
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-etcd
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
substitutions:
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd
path: .
dest:
path: '.values.secrets.tls.client.ca'
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd-peer
path: .
dest:
path: '.values.secrets.tls.peer.ca'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-anchor
path: .
dest:
path: '.values.secrets.anchor.tls.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-anchor
path: .
dest:
path: '.values.secrets.anchor.tls.key'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-n0
path: .
dest:
path: '.values.nodes[0].tls.client.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-n0
path: .
dest:
path: '.values.nodes[0].tls.client.key'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-n0-peer
path: .
dest:
path: '.values.nodes[0].tls.peer.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-n0-peer
path: .
dest:
path: '.values.nodes[0].tls.peer.key'
data:
chart_name: etcd
release: kubernetes-etcd
namespace: kube-system
wait:
timeout: 600
labels:
release_group: ucp-kubernetes-etcd
upgrade:
no_hooks: true
values:
anchor:
etcdctl_endpoint: kubernetes-etcd.kube-system.svc.cluster.local
labels:
anchor:
node_selector_key: kubernetes-etcd
node_selector_value: enabled
secrets:
anchor:
tls:
cert: placeholder
key: placeholder
tls:
client:
ca: placeholder
peer:
ca: placeholder
etcd:
host_data_path: /var/lib/etcd/kubernetes
host_etc_path: /etc/etcd/kubernetes
images:
tags:
etcd: quay.io/coreos/etcd:v3.2.18
etcdctl: quay.io/coreos/etcd:v3.2.18
nodes:
- name: n0
tls:
client:
cert: placeholder
key: placeholder
peer:
cert: placeholder
key: placeholder
service:
name: kubernetes-etcd
network:
service_client:
name: service_client
port: 2379
target_port: 2379
service_peer:
name: service_peer
port: 2380
target_port: 2380
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: etcd
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: tiller
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
chart_name: tiller
release: tiller
namespace: kube-system
install:
no_hooks: false
upgrade:
no_hooks: false
wait:
timeout: 600
values:
images:
tags:
tiller: gcr.io/kubernetes-helm/tiller:v2.14.0
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://opendev.org/airship/armada.git
subpath: charts/tiller
reference: 8d1521e96c6b3163f7f6598ef15a11af0052cf04
dependencies:
- helm-toolkit-tiller
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: promenade
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
chart_name: promenade
release: promenade
namespace: ucp
wait:
timeout: 600
labels:
release_group: ucp-promenade
values:
pod:
env:
promenade_api:
- name: PROMENADE_DEBUG
value: '1'
conf:
paste:
app:promenade-api:
disable: keystone
pipeline:main:
pipeline: noauth promenade-api
images:
tags:
promenade: quay.io/airshipit/promenade:master
manifests:
job_ks_endpoints: false
job_ks_service: false
job_ks_user: false
secret_keystone: false
upgrade:
no_hooks: true
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: promenade
dependencies:
- helm-toolkit
...

View File

@ -10,10 +10,13 @@ CONFIG_SOURCE=$(realpath ${1:-${SCRIPT_DIR}/../examples/basic})
BUILD_DIR=$(realpath ${2:-${SCRIPT_DIR}/../build})
REPLACE=${3:-false}
HOSTNAME=$(hostname)
HOST_IFACE=$(ip route | grep "^default" | head -1 | awk '{ print $5 }')
# If not provided, interface is set to HOST_IFACE by default
INTERFACE=${4:-$HOST_IFACE}
# If not provided, it takes a guess at the host IP Address
HOSTIP=${4:-$(hostname -I | cut -d' ' -f 1)}
HOSTIP=${5:-$(hostname -I | cut -d' ' -f 1)}
# Ceph CIDR provide like 10.0.0.0\\\/24
HOSTCIDR=${5:-"$(hostname -I | cut -d'.' -f 1,2,3).0\/24"}
HOSTCIDR=${6:-"$(hostname -I | cut -d'.' -f 1,2,3).0\/24"}
echo === Cleaning up old data ===
@ -21,6 +24,14 @@ rm -rf ${BUILD_DIR}
mkdir -p ${BUILD_DIR}
chmod 777 ${BUILD_DIR}
PROMENADE_TMP_LOCAL="$(basename "$PROMENADE_TMP_LOCAL")"
PROMENADE_TMP="${SCRIPT_DIR}/${PROMENADE_TMP_LOCAL}"
mkdir -p "$PROMENADE_TMP"
chmod 777 "$PROMENADE_TMP"
DOCKER_SOCK="/var/run/docker.sock"
sudo chmod o+rw $DOCKER_SOCK
cp "${CONFIG_SOURCE}"/*.yaml ${BUILD_DIR}
if [ ${REPLACE} == 'replace' ]
@ -31,6 +42,7 @@ then
sed -i "s/:n0/:${HOSTNAME}/g" "${BUILD_DIR}"/*.yaml
sed -i "s/192.168.77.10/${HOSTIP}/g" "${BUILD_DIR}"/*.yaml
sed -i "s/192.168.77.0\/24/${HOSTCIDR}/g" "${BUILD_DIR}"/*.yaml
sed -i "s/=ens3/=${INTERFACE}/g" "${BUILD_DIR}"/*.yaml
fi
if [[ -z $1 ]] || [[ $1 = generate-certs ]]; then
@ -54,6 +66,11 @@ docker run --rm -t \
-e http_proxy=${HTTP_PROXY} \
-e https_proxy=${HTTPS_PROXY} \
-e no_proxy=${NO_PROXY} \
-v "${PROMENADE_TMP}:/${PROMENADE_TMP_LOCAL}" \
-v "${DOCKER_SOCK}:${DOCKER_SOCK}" \
-e "DOCKER_HOST=unix:/${DOCKER_SOCK}" \
-e "PROMENADE_TMP=${PROMENADE_TMP}" \
-e "PROMENADE_TMP_LOCAL=/${PROMENADE_TMP_LOCAL}" \
-v ${BUILD_DIR}:/target \
${IMAGE_PROMENADE} \
promenade \
@ -63,4 +80,6 @@ docker run --rm -t \
$(ls ${BUILD_DIR})
fi
sudo chmod o-rw $DOCKER_SOCK
echo === Done ===

View File

@ -0,0 +1,73 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: primary
vars:
env:
HTTP_PROXY: ""
HTTPS_PROXY: ""
NO_PROXY: ""
PROMENADE_TMP_LOCAL: "tmp_bin"
become: true
tasks:
- name: Install docker
command: apt-get install docker.io resolvconf -y
- name: Generate configuration files
shell: |
set -xe;
./tools/dev-build.sh examples/gate generate-certs replace {{ ansible_default_ipv4.interface }}
args:
chdir: "{{ zuul.project.src_dir }}"
executable: /bin/bash
environment: "{{env}}"
- name: Generate certificate files
shell: |
set -xe;
./tools/dev-build.sh generate-certs
args:
chdir: "{{ zuul.project.src_dir }}"
executable: /bin/bash
environment: "{{env}}"
- name: Copy build files and generated certificates to build-all
command: "mv {{ zuul.project.src_dir }}/build {{ zuul.project.src_dir }}/build-all"
- name: Build genesis script
shell: |
set -xe;
./tools/dev-build.sh build-all
args:
chdir: "{{ zuul.project.src_dir }}"
executable: /bin/bash
environment: "{{env}}"
- name: Deploying genesis
shell: |
set -xe;
./build/genesis.sh
args:
chdir: "{{ zuul.project.src_dir }}"
executable: /bin/bash
- name: Validating genesis
shell: |
set -xe;
./build/validate-genesis.sh
args:
chdir: "{{ zuul.project.src_dir }}"
executable: /bin/bash
register: result
retries: 3
delay: 10
until: result.rc == 0