Definition for virtual site deployment

- Create a site definition to support a full site deployment
  workflow using VMs

Multi-Nodes Gate

Enable multi-node gate for Airship using core Airship
components.

Deployment will be driven by Shipyard.

We will re-use and adapt the source codes from the Promenade
g2 Gate [1].

[1] https://github.com/openstack/airship-promenade/tree/master/tools/g2

Change-Id: I41e79f5f26311fa179a2e5c121aa815caa05cfcd
This commit is contained in:
Scott Hussey 2018-04-30 14:43:33 -05:00 committed by Scott Hussey
parent 3dd7150fef
commit 4624804f80
104 changed files with 4412 additions and 0 deletions

View File

@ -78,6 +78,11 @@ data:
- openvswitch=enabled
- promenade-genesis=enabled
- ucp-control-plane=enabled
- ceph-mon=enabled
- ceph-mds=enabled
- ceph-osd=enabled
- ceph-rgw=enabled
- ceph-mgr=enabled
files:
- path: /var/lib/anchor/calico-etcd-bootstrap
content: "# placeholder for triggering calico etcd bootstrapping\n# this file will be deleted"

View File

@ -3,6 +3,8 @@ schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-drydock
labels:
name: ucp-drydock
layeringDefinition:
abstract: false
layer: global

View File

@ -3,6 +3,8 @@ schema: pegleg/SoftwareVersions/v1
metadata:
schema: metadata/Document/v1
name: software-versions
labels:
name: software-versions
layeringDefinition:
abstract: false
layer: global
@ -559,3 +561,4 @@ data:
unnamed:
# nfs-common for the nfs provisioner - dev only.
- nfs-common
- ceph-common

View File

@ -78,6 +78,11 @@ data:
- openvswitch=enabled
- promenade-genesis=enabled
- ucp-control-plane=enabled
- ceph-mgr=enabled
- ceph-mon=enabled
- ceph-mds=enabled
- ceph-rgw=enabled
- ceph-osd=enabled
files:
- path: /var/lib/anchor/calico-etcd-bootstrap
content: "# placeholder for triggering calico etcd bootstrapping\n# this file will be deleted"

View File

@ -3,6 +3,8 @@ schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-deckhand
labels:
name: ucp-deckhand
layeringDefinition:
abstract: false
layer: global

View File

@ -3,6 +3,8 @@ schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-drydock
labels:
name: ucp-drydock
layeringDefinition:
abstract: false
layer: global

View File

@ -3,6 +3,8 @@ schema: pegleg/SoftwareVersions/v1
metadata:
schema: metadata/Document/v1
name: software-versions
labels:
name: software-versions
layeringDefinition:
abstract: false
layer: global
@ -148,11 +150,13 @@ data:
location: https://git.openstack.org/openstack/openstack-helm
subpath: helm-toolkit
reference: f902cd14fac7de4c4c9f7d019191268a6b4e9601
# TODO Update to openstack-helm and split to match split projects
ceph:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: ceph
reference: f902cd14fac7de4c4c9f7d019191268a6b4e9601
# TODO Update to openstack-helm-infra
ceph-htk:
type: git
location: https://git.openstack.org/openstack/openstack-helm
@ -437,3 +441,4 @@ data:
unnamed:
# nfs-common for the nfs provisioner - dev only.
- nfs-common
- ceph-common

View File

@ -0,0 +1,18 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-storage-provisioner
labels:
name: ucp-storage-provisioner
group: ucp
service: k8_storage_provisioning
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: Kubernetes Storage Provisioner
sequenced: true
chart_group:
- nfs_provisioner

View File

@ -0,0 +1,77 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: nfs_provisioner
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.ucp.nfs_provisioner
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.storage.nfs_provisioner
dest:
path: .values.images.tags
data:
chart_name: nfs-provisioner
release: nfs-provisioner
namespace: kube-system
timeout: 900
wait:
timeout: 900
install:
no_hooks: false
upgrade:
no_hooks: true
values:
labels:
nfs:
node_selector_key: ucp-control-plane
node_selector_value: enabled
job:
node_selector_key: ucp-control-plane
node_selector_value: enabled
storage:
hostPath:
path: /var/lib/airship/nfs
persistentVolumeClaim:
size: '30Gi'
storageclass:
name: general
dependencies:
- nfs_htk
...
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: nfs_htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.ucp.nfs_htk
dest:
path: .source
data:
chart_name: nfs_htk
release: nfs_htk
namespace: nfs_htk
values: {}
dependencies: []
...

View File

@ -0,0 +1,34 @@
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: promjoin
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: site
labels:
application: 'drydock'
data:
signaling: false
assets:
- path: /opt/promjoin.sh
type: file
permissions: '555'
location: promenade+http://promenade-api.ucp.svc.cluster.local/api/v1.0/join-scripts?design_ref={{ action.design_ref | urlencode }}&hostname={{ node.hostname }}&ip={{ node.network.default.ip }}{% for k, v in node.labels.items() %}&labels.dynamic={{ k }}={{ v }}{% endfor %}
location_pipeline:
- template
data_pipeline:
- utf8_decode
- path: /lib/systemd/system/promjoin.service
type: unit
permissions: '600'
data: |-
W1VuaXRdCkRlc2NyaXB0aW9uPVByb21lbmFkZSBJbml0aWFsaXphdGlvbiBTZXJ2aWNlCkFmdGVy
PW5ldHdvcmstb25saW5lLnRhcmdldCBsb2NhbC1mcy50YXJnZXQKQ29uZGl0aW9uUGF0aEV4aXN0
cz0hL3Zhci9saWIvcHJvbS5kb25lCgpbU2VydmljZV0KVHlwZT1zaW1wbGUKRXhlY1N0YXJ0PS9v
cHQvcHJvbWpvaW4uc2gKCltJbnN0YWxsXQpXYW50ZWRCeT1tdWx0aS11c2VyLnRhcmdldAo=
data_pipeline:
- base64_decode
- utf8_decode
...

View File

@ -0,0 +1,58 @@
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
name: n1
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
host_profile: defaults
addressing:
- network: gp
address: 172.24.1.11
metadata:
boot_mac: '52:54:00:00:a3:31'
rack: rack1
tags:
- 'masters'
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
name: n2
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
host_profile: defaults
addressing:
- network: gp
address: 172.24.1.12
metadata:
boot_mac: '52:54:00:1a:95:0d'
rack: rack1
tags:
- 'masters'
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
name: n3
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
host_profile: defaults
addressing:
- network: gp
address: 172.24.1.13
metadata:
boot_mac: '52:54:00:31:c2:36'
rack: rack1
tags:
- 'masters'
...

View File

@ -0,0 +1,26 @@
---
# The global deployment strategy assumes nodes are marked with node_tags
# of masters and workers.
schema: shipyard/DeploymentStrategy/v1
metadata:
schema: metadata/Document/v1
name: deployment-strategy
layeringDefinition:
abstract: false
layer: global
labels:
name: deployment-strategy-global
storagePolicy: cleartext
data:
groups:
- name: masters
critical: true
depends_on: []
selectors:
- node_names: ['n1','n2','n3']
node_labels: []
node_tags: []
rack_names: []
success_criteria:
percent_successful_nodes: 100
...

View File

@ -0,0 +1,31 @@
---
schema: shipyard/DeploymentConfiguration/v1
metadata:
schema: metadata/Document/v1
name: deployment-configuration
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
physical_provisioner:
deployment_strategy: deployment-strategy
deploy_interval: 30
deploy_timeout: 3600
destroy_interval: 30
destroy_timeout: 900
join_wait: 120
prepare_node_interval: 30
prepare_node_timeout: 1000
prepare_site_interval: 10
prepare_site_timeout: 300
verify_interval: 10
verify_timeout: 60
kubernetes_provisioner:
drain_timeout: 3600
drain_grace_period: 1800
clear_labels_timeout: 1800
remove_etcd_timeout: 1800
etcd_ready_timeout: 600
armada:
manifest: 'full-site'

View File

@ -0,0 +1,13 @@
---
schema: dev/Configurables/v1
metadata:
schema: metadata/Document/v1
name: dev-configurables
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
# Data section currently hardcoded, needs to be injected by gate scripts later
data:
hostcidr: 172.24.1.0/24
...

View File

@ -0,0 +1,62 @@
---
schema: pegleg/CommonAddresses/v1
metadata:
schema: metadata/Document/v1
name: common-addresses
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
calico:
ip_autodetection_method: 'interface=ens3'
etcd:
service_ip: 10.96.232.136
dns:
cluster_domain: cluster.local
service_ip: 10.96.0.10
upstream_servers:
- 8.8.8.8
- 8.8.4.4
upstream_servers_joined: 8.8.8.8,8.8.4.4
genesis:
hostname: n0
ip: 172.24.1.10
bootstrap:
ip: 172.24.1.10
kubernetes:
api_service_ip: 10.96.0.1
etcd_service_ip: 10.96.0.2
pod_cidr: 10.97.0.0/16
service_cidr: 10.96.0.0/16
apiserver_port: 6443
haproxy_port: 6553
etcd:
container_port: 2379
haproxy_port: 2378
proxy:
http:
https:
no_proxy:
node_ports:
drydock_api: 30000
maas_api: 30001
maas_proxy: 31800
shipyard_api: 30003
airflow_web: 30004
ntp:
servers_joined: ntp.ubuntu.com
storage:
ceph:
public_cidr: 172.24.1.0/24
cluster_cidr: 172.24.1.0/24
...

View File

@ -0,0 +1,42 @@
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: gp
layeringDefinition:
abstract: false
layer: site
data:
bonding:
mode: disabled
mtu: 1500
linkspeed: auto
trunking:
mode: disabled
default_network: gp
allowed_networks:
- gp
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: gp
layeringDefinition:
abstract: false
layer: site
data:
mtu: 1500
cidr: 172.24.1.0/24
ranges:
- type: dhcp
start: 172.24.1.100
end: 172.24.1.200
routes:
- subnet: 0.0.0.0/0
gateway: 172.24.1.1
metric: 10
dns:
domain: gate.local
servers: '8.8.8.8 8.8.4.4'
...

View File

@ -0,0 +1,270 @@
---
schema: promenade/PKICatalog/v1
metadata:
schema: metadata/Document/v1
name: cluster-certificates
layeringDefinition:
abstract: false
layer: site
data:
certificate_authorities:
kubernetes:
description: CA for Kubernetes components
certificates:
- document_name: apiserver
description: Service certificate for Kubernetes apiserver
common_name: apiserver
hosts:
- localhost
- 127.0.0.1
- 10.96.0.1
kubernetes_service_names:
- kubernetes.default.svc.cluster.local
- document_name: kubelet-genesis
common_name: system:node:n0
hosts:
- n0
- 172.24.1.10
groups:
- system:nodes
- document_name: kubelet-n0
common_name: system:node:n0
hosts:
- n0
- 172.24.1.10
groups:
- system:nodes
- document_name: kubelet-n1
common_name: system:node:n1
hosts:
- n1
- 172.24.1.11
groups:
- system:nodes
- document_name: kubelet-n2
common_name: system:node:n2
hosts:
- n2
- 172.24.1.12
groups:
- system:nodes
- document_name: kubelet-n3
common_name: system:node:n3
hosts:
- n3
- 172.24.1.13
groups:
- system:nodes
- document_name: scheduler
description: Service certificate for Kubernetes scheduler
common_name: system:kube-scheduler
- document_name: controller-manager
description: certificate for controller-manager
common_name: system:kube-controller-manager
- document_name: admin
common_name: admin
groups:
- system:masters
- document_name: armada
common_name: armada
groups:
- system:masters
kubernetes-etcd:
description: Certificates for Kubernetes's etcd servers
certificates:
- document_name: apiserver-etcd
description: etcd client certificate for use by Kubernetes apiserver
common_name: apiserver
- document_name: kubernetes-etcd-anchor
description: anchor
common_name: anchor
- document_name: kubernetes-etcd-genesis
common_name: kubernetes-etcd-genesis
hosts:
- n0
- 172.24.1.10
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- 10.96.0.2
- document_name: kubernetes-etcd-n0
common_name: kubernetes-etcd-n0
hosts:
- n0
- 172.24.1.10
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- 10.96.0.2
- document_name: kubernetes-etcd-n1
common_name: kubernetes-etcd-n1
hosts:
- n1
- 172.24.1.11
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- 10.96.0.2
- document_name: kubernetes-etcd-n2
common_name: kubernetes-etcd-n2
hosts:
- n2
- 172.24.1.12
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- 10.96.0.2
- document_name: kubernetes-etcd-n3
common_name: kubernetes-etcd-n3
hosts:
- n3
- 172.24.1.13
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- 10.96.0.2
kubernetes-etcd-peer:
certificates:
- document_name: kubernetes-etcd-genesis-peer
common_name: kubernetes-etcd-genesis-peer
hosts:
- n0
- 172.24.1.10
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- 10.96.0.2
- document_name: kubernetes-etcd-n0-peer
common_name: kubernetes-etcd-n0-peer
hosts:
- n0
- 172.24.1.10
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- 10.96.0.2
- document_name: kubernetes-etcd-n1-peer
common_name: kubernetes-etcd-n1-peer
hosts:
- n1
- 172.24.1.11
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- 10.96.0.2
- document_name: kubernetes-etcd-n2-peer
common_name: kubernetes-etcd-n2-peer
hosts:
- n2
- 172.24.1.12
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- 10.96.0.2
- document_name: kubernetes-etcd-n3-peer
common_name: kubernetes-etcd-n3-peer
hosts:
- n3
- 172.24.1.13
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- 10.96.0.2
calico-etcd:
description: Certificates for Calico etcd client traffic
certificates:
- document_name: calico-etcd-anchor
description: anchor
common_name: anchor
- document_name: calico-etcd-genesis
common_name: calico-etcd-genesis
hosts:
- n0
- 172.24.1.10
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-etcd-n0
common_name: calico-etcd-n0
hosts:
- n0
- 172.24.1.10
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-etcd-n1
common_name: calico-etcd-n1
hosts:
- n1
- 172.24.1.11
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-etcd-n2
common_name: calico-etcd-n2
hosts:
- n2
- 172.24.1.12
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-etcd-n3
common_name: calico-etcd-n3
hosts:
- n3
- 172.24.1.13
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-node
common_name: calcico-node
calico-etcd-peer:
description: Certificates for Calico etcd clients
certificates:
- document_name: calico-etcd-genesis-peer
common_name: calico-etcd-genesis-peer
hosts:
- n0
- 172.24.1.10
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-etcd-n0-peer
common_name: calico-etcd-n0-peer
hosts:
- n0
- 172.24.1.10
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-etcd-n1-peer
common_name: calico-etcd-n1-peer
hosts:
- n1
- 172.24.1.11
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-etcd-n2-peer
common_name: calico-etcd-n2-peer
hosts:
- n2
- 172.24.1.12
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-etcd-n3-peer
common_name: calico-etcd-n3-peer
hosts:
- n3
- 172.24.1.13
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-node-peer
common_name: calcico-node-peer
keypairs:
- name: service-account
description: Service account signing key for use by Kubernetes controller-manager.
...

View File

@ -0,0 +1,22 @@
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: GenericVM
layeringDefinition:
abstract: false
layer: site
data:
vendor: 'Dell'
generation: '1'
hw_version: '2'
bios_version: '2.2.3'
boot_mode: 'bios'
bootstrap_protocol: 'pxe'
pxe_interface: 0
device_aliases:
pnic01:
bus_type: 'pci'
dev_type: 'Intel 10Gbps NIC'
address: '0000:00:03.0'
...

View File

@ -0,0 +1,47 @@
---
schema: 'drydock/HostProfile/v1'
metadata:
name: defaults
schema: 'metadata/Document/v1'
layeringDefinition:
abstract: false
layer: site
data:
hardware_profile: 'GenericVM'
primary_network: 'gp'
oob:
type: 'libvirt'
libvirt_uri: 'qemu+ssh://virtmgr@172.24.1.1/system'
storage:
physical_devices:
vda:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '20g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
interfaces:
ens3:
device_link: 'gp'
slaves:
- 'ens3'
networks:
- 'gp'
platform:
image: 'xenial'
kernel: 'hwe-16.04'
metadata:
tags:
- 'foo'
...

View File

@ -0,0 +1,18 @@
---
schema: 'drydock/Region/v1'
metadata:
schema: 'metadata/Document/v1'
name: 'gate-multinode'
storagePolicy: 'cleartext'
substitutions:
- src:
schema: deckhand/Certificate/v1
name: ubuntu_ssh_key
path: .
dest:
path: .authorized_keys[0]
labels:
application: 'drydock'
data:
authorized_keys: []
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ceph_swift_keystone_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password1
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ipmi_admin_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: calvin
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: maas-region-key
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: 3858f62230ac3c915f300c664312c63f
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_airflow_postgres_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password2
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_armada_keystone_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password3
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_barbican_keystone_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password4
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_barbican_oslo_db_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password5
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_deckhand_keystone_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password6
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_deckhand_postgres_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password7
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_drydock_keystone_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password8
...

View File

@ -0,0 +1,38 @@
---
schema: deckhand/CertificateKey/v1
metadata:
schema: metadata/Document/v1
name: ucp_drydock_kvm_ssh_key
layeringDefinition:
layer: site
abstract: false
storagePolicy: cleartext
data: |-
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA1K4inGpze6uhnMO2Wf74AE5i6rbMBLppZaguwvWx25bgt0Ie
q3WbQiT+rA0897lO1s551mDMKmtbxM5k28ai3uymcv5tgcN5APeO+gfXNQeOZ4s6
kcgDpSQTSmkYLJZCOqPjrPEJvSKo7SAy+kJkAjBAqfCYfwo9t/yqXTRr+ugKTujz
mBn8N2NNNwfi2VIF1Kcav4Nbwn96npYfIRuwGZWf9jykdGmzL/sm2m1G1WxxSIgZ
n4hKkZqHbHClmGUKex7GzJ1vDz/YhVHMDgyjIqSsioIA8GE/TkEL8+LJe/ecWPsI
gReYtrHtbRP7nau7xoa0AGCp+OELiCYeKtnBgQIDAQABAoIBAGsjBYByB5bHEPSd
iqCvVlMhKac/SmsfVLTHEjdX+a66xJzMjBKnffzGEhyt1oacLOD8xld+D5tW1bXA
mwIzgBx4bikDjjqOM8iO7NF7XafzFIOImYqSjXOFRsp3QpxDXT2ZDboy+kryyNIb
JFL5WtPXZtu0vb1RdWHXwCKPo82SZoUJYWFBSZ+6+O1cKFCGxhYOPytFtMkoco7R
52L+wpbYdnrY8hcOzj/sojfiXicyOxJJQ8D/p1LjGRr6arL7hbPnPM+p+zEzX6Zy
zu/tREK7aJ865BXoJ4dzHm1OoE+HUnT2OFfe2+0bGpPLc/50gDQV7zTPpxp8vJDb
gzAnayECgYEA/0dqZhu1W5L1jCVOI4akMf629Novs6f4UreQE4le7U/nhMN6W+kp
RB2hz+d8owgw7wvx0LIhBmtGO1PgLYvrDJdzJ3Q283z72BQfZ6ubzZzzvz3esSYR
ARdRVZ2QBksOuFZRLD0AjjoqrlnCGczimI59tv45N/z2lNxqvbta450CgYEA1Ufq
8FkfFIukMx3gOgV/+rmKD6UNbvKlVncAagB/Ig9eEl0eBlp0bInCFVv1oiFRJCBr
vpq3XZBnlwhkgTAdDx+yYO6MVR3OsDPDiqdr2re1deDfZa6k8JD1Z6S2ATlhWIok
IqifbMewu/l7ekvrf9zQB+PAKKiO/F+VkQLOijUCgYAKxSuTBUZAh9G+SPCkBRE2
XsW/TNlIXiTW8JGukVQk9QMSkeC+sfagvhNOnzid9EGERF++a1WDXNFIvYAuy8Ft
9ZHRL1EGMta5RhgRGqdnJGEQR372wGFlR2OSFiS2b0I+ur2P/T7hwUh9tKHAzukc
gJ6DMvSdDRomK6qBOL9b7QKBgAFFv5IVhgjyWFVxvS+7bqsJRliXjH/39KKSbS80
t411eAM2CQLSsVUgcUb9WHbSKhDEHMBzXdqyDNwq0aavqcy4TLElWzL6zYvhky4V
l6AjbnK4l7yckSFaTAyFQAy70efcUyDdzq2wd053y5PMNih6IceaRUA42TmKPHM0
CBVtAoGBALZFV3fpPU/nEiblaCQTc7fqJ9hxIOr2HSUXCae2yOqQNpOkRA/1KLnM
HaH88P6fibYdE6VtA3HcLgCQyB6wL/GoFGHkTloBRzZeSIFTW0bjwsVUFXkC/jyG
FxVZOaooEhP7Su5IBrDxbTqeKHgsDoU+gAiL0LWjt9lsvbttNbe+
-----END RSA PRIVATE KEY-----
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_drydock_postgres_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password9
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_keystone_admin_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password10
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_keystone_oslo_db_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password11
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_maas_admin_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password12
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_maas_postgres_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password13
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_oslo_db_admin_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password14
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_oslo_messaging_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password15
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_postgres_admin_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password16
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_promenade_keystone_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password17
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_rabbitmq_erlang_cookie
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: 111df8c05b0f041d4764
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_shipyard_keystone_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password18
...

View File

@ -0,0 +1,11 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: ucp_shipyard_postgres_password
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data: password19
...

View File

@ -0,0 +1,11 @@
---
data:
revision: v1.0demo
site_type: single-node
metadata:
layeringDefinition: {abstract: false, layer: site}
name: gate-multinode
schema: metadata/Document/v1
storagePolicy: cleartext
schema: pegleg/SiteDefinition/v1
...

View File

@ -0,0 +1,188 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-calico-etcd
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: kubernetes-calico-etcd-global
actions:
- method: merge
path: .
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.calico.etcd
dest:
path: .source
# Image versions
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.calico.etcd
dest:
path: .values.images.tags
# IP addresses
-
src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.etcd.service_ip
dest:
path: .values.service.ip
-
src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.etcd.service_ip
dest:
path: .values.anchor.etcdctl_endpoint
# CAs
-
src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd
path: .
dest:
path: .values.secrets.tls.client.ca
-
src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd-peer
path: .
dest:
path: .values.secrets.tls.peer.ca
# Anchor client cert
-
src:
schema: deckhand/Certificate/v1
name: calico-etcd-anchor
path: .
dest:
path: .values.secrets.anchor.tls.cert
-
src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-anchor
path: .
dest:
path: .values.secrets.anchor.tls.key
# Server certs
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-n0
path: .
dest:
path: .values.nodes[0].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-n0
path: .
dest:
path: .values.nodes[0].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-n0-peer
path: .
dest:
path: .values.nodes[0].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-n0-peer
path: .
dest:
path: .values.nodes[0].tls.peer.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-n1
path: .
dest:
path: .values.nodes[1].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-n1
path: .
dest:
path: .values.nodes[1].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-n1-peer
path: .
dest:
path: .values.nodes[1].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-n1-peer
path: .
dest:
path: .values.nodes[1].tls.peer.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-n2
path: .
dest:
path: .values.nodes[2].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-n2
path: .
dest:
path: .values.nodes[2].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-n2-peer
path: .
dest:
path: .values.nodes[2].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-n2-peer
path: .
dest:
path: .values.nodes[2].tls.peer.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-n3
path: .
dest:
path: .values.nodes[3].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-n3
path: .
dest:
path: .values.nodes[3].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-n3-peer
path: .
dest:
path: .values.nodes[3].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-n3-peer
path: .
dest:
path: .values.nodes[3].tls.peer.key
data:
values:
nodes:
- name: n0
- name: n1
- name: n2
- name: n3
...

View File

@ -0,0 +1,185 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-etcd
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: kubernetes-etcd-global
actions:
- method: merge
path: .
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.etcd
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.etcd
dest:
path: .values.images.tags
# IP addresses
-
src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.etcd_service_ip
dest:
path: .values.service.ip
-
src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.etcd_service_ip
dest:
path: .values.anchor.etcdctl_endpoint
# CAs
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd
path: .
dest:
path: .values.secrets.tls.client.ca
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd-peer
path: .
dest:
path: .values.secrets.tls.peer.ca
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-anchor
path: .
dest:
path: .values.secrets.anchor.tls.cert
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-anchor
path: .
dest:
path: .values.secrets.anchor.tls.key
# Server certs
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-n0
path: .
dest:
path: '.values.nodes[0].tls.client.cert'
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-n0
path: .
dest:
path: '.values.nodes[0].tls.client.key'
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-n0-peer
path: .
dest:
path: '.values.nodes[0].tls.peer.cert'
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-n0-peer
path: .
dest:
path: '.values.nodes[0].tls.peer.key'
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-n1
path: .
dest:
path: '.values.nodes[1].tls.client.cert'
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-n1
path: .
dest:
path: '.values.nodes[1].tls.client.key'
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-n1-peer
path: .
dest:
path: '.values.nodes[1].tls.peer.cert'
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-n1-peer
path: .
dest:
path: '.values.nodes[1].tls.peer.key'
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-n2
path: .
dest:
path: '.values.nodes[2].tls.client.cert'
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-n2
path: .
dest:
path: '.values.nodes[2].tls.client.key'
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-n2-peer
path: .
dest:
path: '.values.nodes[2].tls.peer.cert'
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-n2-peer
path: .
dest:
path: '.values.nodes[2].tls.peer.key'
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-n3
path: .
dest:
path: '.values.nodes[3].tls.client.cert'
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-n3
path: .
dest:
path: '.values.nodes[3].tls.client.key'
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-n3-peer
path: .
dest:
path: '.values.nodes[3].tls.peer.cert'
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-n3-peer
path: .
dest:
path: '.values.nodes[3].tls.peer.key'
data:
values:
nodes:
- name: n0
- name: n1
- name: n2
- name: n3
...

View File

@ -0,0 +1,33 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-drydock
replacement: true
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: ucp-drydock
actions:
- method: merge
path: .
storagePolicy: cleartext
substitutions:
- src:
schema: deckhand/CertificateKey/v1
name: airship_drydock_kvm_ssh_key
path: .
dest:
path: .values.conf.ssh.private_key
data:
values:
manifests:
secret_ssh_key: true
conf:
drydock:
plugins:
oob_driver:
- 'drydock_provisioner.drivers.oob.pyghmi_driver.driver.PyghmiDriver'
- 'drydock_provisioner.drivers.oob.libvirt_driver.driver.LibvirtDriver'
...

View File

@ -0,0 +1,27 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-maas
replacement: true
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: ucp-maas
actions:
- method: merge
path: .
storagePolicy: cleartext
substitutions:
- src:
schema: deckhand/CertificateKey/v1
name: airship_drydock_kvm_ssh_key
path: .
dest:
path: .values.conf.ssh.private_key
data:
values:
manifests:
secret_ssh_key: true
...

View File

@ -0,0 +1,123 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: os-ceph-config
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.ucp.ceph
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.ceph.ceph
dest:
path: .values.images.tags
# IP addresses
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.public_cidr
dest:
path: .values.network.public
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.cluster_cidr
dest:
path: .values.network.cluster
# Endpoints
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ucp.identity
dest:
path: .values.endpoints.identity
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.object_store
dest:
path: .values.endpoints.object_store
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.ceph_mon
dest:
path: .values.endpoints.ceph_mon
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.ceph_mgr
dest:
path: .values.endpoints.ceph_mgr
# Credentials
- src:
schema: pegleg/AccountCatalogue/v1
name: ucp_service_accounts
path: .ucp.keystone.admin
dest:
path: .values.endpoints.identity.auth.admin
- src:
schema: pegleg/AccountCatalogue/v1
name: ucp_service_accounts
path: .ceph.swift.keystone
dest:
path: .values.endpoints.identity.auth.swift
# Secrets
- dest:
path: .values.endpoints.identity.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: ucp_keystone_admin_password
path: .
- dest:
path: .values.endpoints.identity.auth.swift.password
src:
schema: deckhand/Passphrase/v1
name: ceph_swift_keystone_password
path: .
data:
chart_name: os-ceph-config
release: os-ceph-config
namespace: openstack
timeout: 3600
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
application: ceph
component: namespace-client-key-generator
values:
labels:
job:
node_selector_key: ucp-control-plane
node_selector_value: enabled
provisioner:
node_selector_key: ucp-control-plane
node_selector_value: enabled
ceph:
rgw_keystone_auth: true
deployment:
storage_secrets: false
ceph: false
rbd_provisioner: false
cephfs_provisioner: false
client_secrets: true
rgw_keystone_user_and_endpoints: false
dependencies:
- ceph_htk
...

View File

@ -0,0 +1,123 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-ceph-config
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.ucp.ceph
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.ceph.ceph
dest:
path: .values.images.tags
# IP addresses
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.public_cidr
dest:
path: .values.network.public
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.cluster_cidr
dest:
path: .values.network.cluster
# Endpoints
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ucp.identity
dest:
path: .values.endpoints.identity
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.object_store
dest:
path: .values.endpoints.object_store
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.ceph_mon
dest:
path: .values.endpoints.ceph_mon
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.ceph_mgr
dest:
path: .values.endpoints.ceph_mgr
# Credentials
- src:
schema: pegleg/AccountCatalogue/v1
name: ucp_service_accounts
path: .ucp.keystone.admin
dest:
path: .values.endpoints.identity.auth.admin
- src:
schema: pegleg/AccountCatalogue/v1
name: ucp_service_accounts
path: .ceph.swift.keystone
dest:
path: .values.endpoints.identity.auth.swift
# Secrets
- dest:
path: .values.endpoints.identity.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: ucp_keystone_admin_password
path: .
- dest:
path: .values.endpoints.identity.auth.swift.password
src:
schema: deckhand/Passphrase/v1
name: ceph_swift_keystone_password
path: .
data:
chart_name: ucp-ceph-config
release: ucp-ceph-config
namespace: ucp
timeout: 3600
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
application: ceph
component: namespace-client-key-generator
values:
labels:
job:
node_selector_key: ucp-control-plane
node_selector_value: enabled
provisioner:
node_selector_key: ucp-control-plane
node_selector_value: enabled
ceph:
rgw_keystone_auth: true
deployment:
storage_secrets: false
ceph: false
rbd_provisioner: false
cephfs_provisioner: false
client_secrets: true
rgw_keystone_user_and_endpoints: false
dependencies:
- ceph_htk
...

View File

@ -0,0 +1,277 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-ceph
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.ucp.ceph
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.ceph.ceph
dest:
path: .values.images.tags
# IP addresses
- src:
schema: dev/Configurables/v1
name: dev-configurables
path: .hostcidr
dest:
path: .values.network.public
- src:
schema: dev/Configurables/v1
name: dev-configurables
path: .hostcidr
dest:
path: .values.network.cluster
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ucp.identity
dest:
path: .values.endpoints.identity
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.object_store
dest:
path: .values.endpoints.object_store
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.ceph_mon
dest:
path: .values.endpoints.ceph_mon
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.ceph_mgr
dest:
path: .values.endpoints.ceph_mgr
- src:
schema: pegleg/AccountCatalogue/v1
name: ucp_service_accounts
path: .ucp.keystone.admin
dest:
path: .values.endpoints.identity.auth.admin
- src:
schema: pegleg/AccountCatalogue/v1
name: ucp_service_accounts
path: .ceph.swift.keystone
dest:
path: .values.endpoints.identity.auth.swift
# Secrets
- dest:
path: .values.endpoints.identity.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: ucp_keystone_admin_password
path: .
- dest:
path: .values.endpoints.identity.auth.swift.password
src:
schema: deckhand/Passphrase/v1
name: ceph_swift_keystone_password
path: .
data:
chart_name: ceph
release: ucp-ceph
namespace: ceph
timeout: 900
wait:
timeout: 900
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
application: ceph
component: bootstrap
- type: job
labels:
application: ceph
component: mds-keyring-generator
- type: job
labels:
application: ceph
component: mon-keyring-generator
- type: job
labels:
application: ceph
component: rgw-keyring-generator
- type: job
labels:
application: ceph
component: storage-keys-generator
- type: job
labels:
application: ceph
component: osd-keyring-generator
values:
labels:
job:
node_selector_key: ucp-control-plane
node_selector_value: enabled
provisioner:
node_selector_key: ucp-control-plane
node_selector_value: enabled
endpoints:
identity:
namespace: openstack
object_store:
namespace: ceph
ceph_mon:
namespace: ceph
ceph:
rgw_keystone_auth: true
deployment:
ceph: true
client_secrets: false
rbd_provisioner: true
cephfs_provisioner: true
rgw_keystone_user_and_endpoints: false
storage_secrets: true
bootstrap:
enabled: true
conf:
storage:
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
rgw_ks:
enabled: true
ceph:
global:
fsid: '88904ebb-f6fc-48b1-80ec-e1915cfa84a9'
osd_pool_default_size: 1
osd:
osd_crush_chooseleaf_type: 0
pool:
crush:
tunables: 'hammer'
target:
osd: 1
pg_per_osd: 100
default:
crush_rule: same_host
spec:
# RBD pool
- name: rbd
application: rbd
replication: 1
percent_total_data: 40
# CephFS pools
- name: cephfs_metadata
application: cephfs
replication: 1
percent_total_data: 5
- name: cephfs_data
application: cephfs
replication: 1
percent_total_data: 10
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.data.root
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.gc
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.intent-log
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.keys
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.email
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.swift
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.extra
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 1
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 1
percent_total_data: 34.8
dependencies:
- ceph_htk
...
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ceph_htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.ucp.ceph-htk
dest:
path: .source
data:
chart_name: ceph_htk
release: ceph_htk
namespace: ceph_htk
timeout: 100
values: {}
dependencies: []
...

View File

@ -0,0 +1,26 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-storage-provisioner
replacement: true
labels:
name: ucp-storage-provisioner
group: ucp
service: k8_storage_provisioning
layeringDefinition:
abstract: false
layer: site
actions:
- method: merge
path: .
parentSelector:
name: ucp-storage-provisioner
storagePolicy: cleartext
data:
description: Kubernetes Storage Provisioner
sequenced: true
chart_group:
- ucp-ceph
- ucp-ceph-config
- os-ceph-config

View File

@ -0,0 +1,235 @@
---
schema: pegleg/EndpointCatalogue/v1
metadata:
schema: metadata/Document/v1
name: ucp_endpoints
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
ucp:
identity:
namespace: ucp
name: keystone
hosts:
default: keystone-api
public: keystone
host_fqdn_override:
default: null
path:
default: /v3
scheme:
default: http
port:
admin:
default: 35357
api:
default: 80
armada:
name: armada
hosts:
default: armada-api
public: armada
port:
api:
default: 8000
path:
default: /api/v1.0
scheme:
default: http
host_fqdn_override:
default: null
deckhand:
name: deckhand
hosts:
default: deckhand-int
public: deckhand-api
port:
api:
default: 9000
path:
default: /api/v1.0
scheme:
default: http
host_fqdn_override:
default: null
postgresql:
name: postgresql
hosts:
default: postgresql
path: /DB_NAME
scheme: postgresql+psycopg2
port:
postgresql:
default: 5432
host_fqdn_override:
default: null
oslo_db:
hosts:
default: mariadb
discovery: mariadb-discovery
host_fqdn_override:
default: null
path: /DB_NAME
scheme: mysql+pymysql
port:
mysql:
default: 3306
wsrep:
default: 4567
key_manager:
name: barbican
hosts:
default: barbican-api
public: barbican
host_fqdn_override:
default: null
path:
default: /v1
scheme:
default: http
port:
api:
default: 9311
public: 80
oslo_messaging:
namespace: null
hosts:
default: rabbitmq
host_fqdn_override:
default: null
path: /openstack
scheme: rabbit
port:
amqp:
default: 5672
oslo_cache:
hosts:
default: memcached
host_fqdn_override:
default: null
port:
memcache:
default: 11211
physicalprovisioner:
name: drydock
hosts:
default: drydock-api
port:
api:
default: 9000
nodeport: 31900
path:
default: /api/v1.0
scheme:
default: http
host_fqdn_override:
default: null
maas_region_ui:
name: maas-region-ui
hosts:
default: maas-region-ui
public: maas
path:
default: /MAAS
scheme:
default: "http"
port:
region_ui:
default: 80
public: 80
host_fqdn_override:
default: null
kubernetesprovisioner:
name: promenade
hosts:
default: promenade-api
port:
api:
default: 80
path:
default: /api/v1.0
scheme:
default: http
host_fqdn_override:
default: null
shipyard:
name: shipyard
hosts:
default: shipyard-int
public: shipyard-api
port:
api:
default: 9000
public: 80
path:
default: /api/v1.0
scheme:
default: http
host_fqdn_override:
default: null
airflow_web:
name: airflow-web
hosts:
default: airflow-web-int
public: airflow-web
port:
airflow_web:
default: 8080
path:
default: /
scheme:
default: http
host_fqdn_override:
default: null
airflow_flower:
name: airflow-flower
hosts:
default: airflow-flower
port:
airflow_flower:
default: 5555
path:
default: /
scheme:
default: http
host_fqdn_override:
default: null
ceph:
object_store:
name: swift
namespace: ceph
hosts:
default: ceph-rgw
host_fqdn_override:
default: null
path:
default: /swift/v1
scheme:
default: http
port:
api:
default: 8088
ceph_mon:
namespace: ceph
hosts:
default: ceph-mon
discovery: ceph-mon-discovery
host_fqdn_override:
default: null
port:
mon:
default: 6789
ceph_mgr:
namespace: ceph
hosts:
default: ceph-mgr
host_fqdn_override:
default: null
port:
mgr:
default: 7000
scheme:
default: http
...

View File

@ -0,0 +1,124 @@
---
schema: pegleg/AccountCatalogue/v1
metadata:
schema: metadata/Document/v1
name: ucp_service_accounts
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
ucp:
postgres:
admin:
username: postgres
oslo_db:
admin:
username: root
oslo_messaging:
admin:
username: rabbitmq
keystone:
admin:
region_name: RegionOne
username: admin
project_name: admin
user_domain_name: default
project_domain_name: default
oslo_messaging:
admin:
username: rabbitmq
keystone:
username: keystone
oslo_db:
username: keystone
database: keystone
promenade:
keystone:
region_name: RegionOne
role: admin
project_name: service
project_domain_name: default
user_domain_name: default
username: promenade
drydock:
keystone:
region_name: RegionOne
role: admin
project_name: service
project_domain_name: default
user_domain_name: default
username: drydock
postgres:
username: drydock
database: drydock
shipyard:
keystone:
region_name: RegionOne
role: admin
project_name: service
project_domain_name: default
user_domain_name: default
username: shipyard
postgres:
username: shipyard
database: shipyard
airflow:
postgres:
username: airflow
database: airflow
oslo_messaging:
username: rabbitmq
maas:
admin:
username: admin
email: none@none
postgres:
username: maas
database: maasdb
barbican:
keystone:
region_name: RegionOne
role: admin
project_name: service
project_domain_name: default
user_domain_name: default
username: barbican
oslo_db:
username: barbican
database: barbican
oslo_messaging:
admin:
username: rabbitmq
keystone:
username: keystone
armada:
keystone:
project_domain_name: default
user_domain_name: default
project_name: service
region_name: RegionOne
role: admin
user_domain_name: default
username: armada
deckhand:
keystone:
region_name: RegionOne
role: admin
project_name: service
project_domain_name: default
user_domain_name: default
username: deckhand
postgres:
username: deckhand
database: deckhand
ceph:
swift:
keystone:
role: admin
region_name: RegionOne
username: swift
project_name: service
user_domain_name: default
project_domain_name: default
...

View File

@ -0,0 +1,47 @@
---
schema: pegleg/SoftwareVersions/v1
metadata:
schema: metadata/Document/v1
name: software-versions
replacement: true
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: software-versions
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
# TODO(sh8121att) - Update all of these when PS are merged
charts:
ucp:
shipyard:
location: https://git.openstack.org/openstack/airship-shipyard
reference: master
shipyard-htk:
location: https://git.openstack.org/openstack/openstack-helm-infra
reference: master
drydock:
location: https://git.openstack.org/openstack/airship-drydock
reference: e35712a57318cb58b7e8c7f55181031d73e89aa5
maas:
location: https://git.openstack.org/openstack/airship-maas
reference: refs/changes/65/577565/9
images:
ucp:
drydock:
drydock: docker.io/sthussey/drydock:fixesv1
drydock_db_sync: docker.io/sthussey/drydock:fixesv1
shipyard:
airflow: quay.io/airshipit/airflow:master
shipyard: quay.io/airshipit/shipyard:master
shipyard_db_sync: quay.io/airshipit/shipyard:master
airflow_db_sync: quay.io/airshipit/airflow:master
maas:
maas_rack: docker.io/sthussey/maas-rack-controller:lvfixv2
deckhand:
deckhand: quay.io/airshipit/deckhand:84ab5c5096826a6382d224271c75adee8d10e742
db_sync: quay.io/airshipit/deckhand:84ab5c5096826a6382d224271c75adee8d10e742
...

View File

@ -0,0 +1 @@
config-ssh

View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..)
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh
source ${GATE_UTILS}
exec rsync -e "ssh -F ${SSH_CONFIG_DIR}/config" $@

View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..)
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh
source ${GATE_UTILS}
exec scp -F ${SSH_CONFIG_DIR}/config $@

View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..)
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh
source ${GATE_UTILS}
exec ssh -F ${SSH_CONFIG_DIR}/config $@

View File

@ -0,0 +1,81 @@
#!/bin/bash
shipyard_cmd() {
set -o pipefail
ssh_cmd "${GENESIS_NAME}" docker run -t --network host -v "${GENESIS_WORK_DIR}:/work" -e OS_AUTH_URL=http://keystone.ucp.svc.cluster.local:80/v3 -e OS_USERNAME=shipyard -e OS_USER_DOMAIN_NAME=default -e OS_PASSWORD="${SHIPYARD_PASSWORD}" -e OS_PROJECT_DOMAIN_NAME=default -e OS_PROJECT_NAME=service "${IMAGE_SHIPYARD_CLI}" $* 2>&1 | tee -a "${LOG_FILE}"
set +o pipefail
}
# Create a shipyard action
# and poll until completion
shipyard_action_wait() {
action=$1
timeout=${2:-3600}
poll_time=${3:-60}
if [[ $action == "update_site" ]]
then
options="--allow-intermediate-commits"
else
options=""
fi
end_time=$(date -d "+${timeout} seconds" +%s)
log "Starting Shipyard action ${action}, will timeout in ${timeout} seconds."
ACTION_ID=$(shipyard_cmd create action ${options} "${action}")
ACTION_ID=$(echo "${ACTION_ID}" | grep -oE 'action/[0-9A-Z]+')
while true;
do
if [[ $(date +%s) -ge ${end_time} ]]
then
log "Shipyard action ${action} did not complete in ${timeout} seconds."
return 2
fi
RESULT=$(shipyard_cmd --output-format=raw describe "${ACTION_ID}")
ACTION_STATUS=$(echo "${RESULT}" | jq -r '.action_lifecycle')
ACTION_RESULT=$(echo "${RESULT}" | jq -r '.dag_status')
if [[ "${ACTION_STATUS}" == "Complete" ]]
then
if [[ "${ACTION_RESULT}" == "success" ]]
then
log "Shipyard action ${action} success!"
return 0
else
log "Shipyard action ${action} completed with result ${ACTION_RESULT}"
echo "${RESULT}" | jq >> "${LOG_FILE}"
return 1
fi
else
sleep "${poll_time}"
fi
done
}
# Re-use the ssh key from ssh-config
# for MAAS-deployed nodes
collect_ssh_key() {
mkdir -p "${GATE_DEPOT}"
if [[ ! -r ${SSH_CONFIG_DIR}/id_rsa.pub ]]
then
ssh_keypair_declare
fi
cat << EOF > ${GATE_DEPOT}/airship_ubuntu_ssh_key.yaml
---
schema: deckhand/Certificate/v1
metadata:
schema: metadata/Document/v1
name: ubuntu_ssh_key
layeringDefinition:
layer: site
abstract: false
storagePolicy: cleartext
data: |-
EOF
cat ${SSH_CONFIG_DIR}/id_rsa.pub | sed -e 's/^/ /' >> ${GATE_DEPOT}/airship_ubuntu_ssh_key.yaml
}

View File

@ -0,0 +1,20 @@
set -e
LIB_DIR=$(realpath "$(dirname "${BASH_SOURCE}")")
REPO_ROOT=$(realpath "$(dirname "${BASH_SOURCE}")/../../../..")
source "$LIB_DIR"/config.sh
source "$LIB_DIR"/const.sh
source "$LIB_DIR"/docker.sh
source "$LIB_DIR"/kube.sh
source "$LIB_DIR"/log.sh
source "$LIB_DIR"/nginx.sh
source "$LIB_DIR"/promenade.sh
source "$LIB_DIR"/registry.sh
source "$LIB_DIR"/ssh.sh
source "$LIB_DIR"/virsh.sh
source "$LIB_DIR"/airship.sh
if [[ -v GATE_DEBUG && ${GATE_DEBUG} = "1" ]]; then
set -x
fi

View File

@ -0,0 +1,63 @@
export TEMP_DIR=${TEMP_DIR:-$(mktemp -d)}
export DEFINITION_DEPOT="${TEMP_DIR}/site_yaml/"
export CERT_DEPOT="${TEMP_DIR}/cert_yaml/"
export GATE_DEPOT="${TEMP_DIR}/gate_yaml/"
export SCRIPT_DEPOT="${TEMP_DIR}/scripts/"
export GENESIS_WORK_DIR=${GENESIS_WORK_DIR:-/work/}
export BASE_IMAGE_SIZE=${BASE_IMAGE_SIZE:-68719476736}
export BASE_IMAGE_URL=${BASE_IMAGE_URL:-https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img}
export IMAGE_PROMENADE_CLI=${IMAGE_PROMENADE_CLI:-quay.io/airshipit/promenade:master}
export IMAGE_PEGLEG_CLI=${IMAGE_PEGLEG_CLI:-quay.io/airshipit/pegleg:master}
export IMAGE_SHIPYARD_CLI=${IMAGE_SHIPYARD_CLI:-quay.io/airshipit/shipyard:master}
export PROMENADE_DEBUG=${PROMENADE_DEBUG:-0}
export SHIPYARD_PASSWORD=${SHIPYARD_OS_PASSWORD:-password18}
export REGISTRY_DATA_DIR=${REGISTRY_DATA_DIR:-/mnt/registry}
export VIRSH_POOL=${VIRSH_POOL:-airship}
export VIRSH_POOL_PATH=${VIRSH_POOL_PATH:-/var/lib/libvirt/airship}
config_vm_memory() {
nodename=${1}
jq -cr ".vm.${nodename}.memory" < "${GATE_MANIFEST}"
}
config_vm_names() {
jq -cr '.vm | keys | join(" ")' < "${GATE_MANIFEST}"
}
config_vm_ip() {
nodename=${1}
jq -cr ".vm.${nodename}.ip" < "${GATE_MANIFEST}"
}
config_vm_mac() {
nodename=${1}
jq -cr ".vm.${nodename}.mac" < "${GATE_MANIFEST}"
}
config_vm_vcpus() {
nodename=${1}
jq -cr ".vm.${nodename}.vcpus" < "${GATE_MANIFEST}"
}
config_vm_bootstrap() {
nodename=${1}
val=$(jq -cr ".vm.${nodename}.bootstrap" < "${GATE_MANIFEST}")
if [[ "${val}" == "true" ]]
then
echo "true"
else
echo "false"
fi
}
config_pegleg_primary_repo() {
jq -cr ".configuration.primary_repo" < "${GATE_MANIFEST}"
}
config_pegleg_sitename() {
jq -cr ".configuration.site" < "${GATE_MANIFEST}"
}
config_pegleg_aux_repos() {
jq -cr '.configuration.aux_repos | join(" ")' < "${GATE_MANIFEST}"
}

View File

@ -0,0 +1,4 @@
export GENESIS_NAME=n0
export SSH_CONFIG_DIR=${WORKSPACE}/multi_nodes_gate/airship_gate/config-ssh
export TEMPLATE_DIR=${WORKSPACE}/multi_nodes_gate/airship_gate/templates
export XML_DIR=${WORKSPACE}/multi_nodes_gate/airship_gate/xml

View File

@ -0,0 +1,26 @@
docker_ps() {
VIA="${1}"
ssh_cmd "${VIA}" docker ps -a
}
docker_info() {
VIA="${1}"
ssh_cmd "${VIA}" docker info 2>&1
}
docker_exited_containers() {
VIA="${1}"
ssh_cmd "${VIA}" docker ps -q --filter "status=exited"
}
docker_inspect() {
VIA="${1}"
CONTAINER_ID="${2}"
ssh_cmd "${VIA}" docker inspect "${CONTAINER_ID}"
}
docker_logs() {
VIA="${1}"
CONTAINER_ID="${2}"
ssh_cmd "${VIA}" docker logs "${CONTAINER_ID}"
}

View File

@ -0,0 +1,44 @@
kubectl_apply() {
VIA=${1}
FILE=${2}
ssh_cmd "${VIA}" "cat ${FILE} | kubectl apply -f -"
}
kubectl_cmd() {
VIA=${1}
shift
ssh_cmd "${VIA}" kubectl "${@}"
}
kubectl_wait_for_pod() {
VIA=${1}
NAMESPACE=${2}
POD_NAME=${3}
SEC=${4:-600}
log Waiting "${SEC}" seconds for termination of pod "${POD_NAME}"
POD_PHASE_JSONPATH='{.status.phase}'
end=$(($(date +%s) + SEC))
while true; do
POD_PHASE=$(kubectl_cmd "${VIA}" --request-timeout 10s --namespace "${NAMESPACE}" get -o jsonpath="${POD_PHASE_JSONPATH}" pod "${POD_NAME}")
if [[ ${POD_PHASE} = "Succeeded" ]]; then
log Pod "${POD_NAME}" succeeded.
break
elif [[ $POD_PHASE = "Failed" ]]; then
log Pod "${POD_NAME}" failed.
kubectl_cmd "${VIA}" --request-timeout 10s --namespace "${NAMESPACE}" get -o yaml pod "${POD_NAME}" 1>&2
exit 1
else
now=$(date +%s)
if [[ $now -gt $end ]]; then
log Pod did not terminate before timeout.
kubectl_cmd "${VIA}" --request-timeout 10s --namespace "${NAMESPACE}" get -o yaml pod "${POD_NAME}" 1>&2
exit 1
fi
sleep 1
fi
done
}

View File

@ -0,0 +1,76 @@
if [[ -v GATE_COLOR && ${GATE_COLOR} = "1" ]]; then
C_CLEAR="\e[0m"
C_ERROR="\e[38;5;160m"
C_HEADER="\e[38;5;164m"
C_HILIGHT="\e[38;5;27m"
C_MUTE="\e[38;5;238m"
C_SUCCESS="\e[38;5;46m"
C_TEMP="\e[38;5;226m"
else
C_CLEAR=""
C_ERROR=""
C_HEADER=""
C_HILIGHT=""
C_MUTE=""
C_SUCCESS=""
C_TEMP=""
fi
log() {
d=$(date --utc)
echo -e "${C_MUTE}${d}${C_CLEAR} ${*}" 1>&2
echo -e "${d} ${*}" >> "${LOG_FILE}"
}
log_warn() {
d=$(date --utc)
echo -e "${C_MUTE}${d}${C_CLEAR} ${C_HILIGHT}WARN${C_CLEAR} ${*}" 1>&2
echo -e "${d} ${*}" >> "${LOG_FILE}"
}
log_stage_diagnostic_header() {
echo -e " ${C_ERROR}= Diagnostic Report =${C_CLEAR}"
}
log_color_reset() {
echo -e "${C_CLEAR}"
}
log_huge_success() {
echo -e "${C_SUCCESS}=== HUGE SUCCESS ===${C_CLEAR}"
}
log_note() {
echo -e "${C_HILIGHT}NOTE:${C_CLEAR} ${*}"
}
log_stage_error() {
NAME=${1}
echo -e " ${C_ERROR}== Error in stage ${C_HILIGHT}${NAME}${C_ERROR} ( ${C_TEMP}${LOG_FILE}${C_ERROR} ) ==${C_CLEAR}"
}
log_stage_footer() {
NAME=${1}
echo -e "${C_HEADER}=== Finished stage ${C_HILIGHT}${NAME}${C_HEADER} ===${C_CLEAR}"
}
log_stage_header() {
NAME=${1}
echo -e "${C_HEADER}=== Executing stage ${C_HILIGHT}${NAME}${C_HEADER} ===${C_CLEAR}"
}
log_stage_success() {
echo -e " ${C_SUCCESS}== Stage Success ==${C_CLEAR}"
}
log_temp_dir() {
echo -e "Working in ${C_TEMP}${TEMP_DIR}${C_CLEAR}"
}
if [[ -v GATE_DEBUG && ${GATE_DEBUG} = "1" ]]; then
export LOG_FILE=/dev/stderr
elif [[ -v TEMP_DIR ]]; then
export LOG_FILE=${TEMP_DIR}/gate.log
else
export LOG_FILE=/dev/null
fi

View File

@ -0,0 +1,35 @@
nginx_down() {
REGISTRY_ID=$(docker ps -qa -f name=promenade-nginx)
if [ "x${REGISTRY_ID}" != "x" ]; then
log Removing nginx server
docker rm -fv "${REGISTRY_ID}" &>> "${LOG_FILE}"
fi
}
nginx_up() {
log Starting nginx server to serve configuration files
mkdir -p "${NGINX_DIR}"
docker run -d \
-p 7777:80 \
--restart=always \
--name promenade-nginx \
-v "${TEMP_DIR}/nginx:/usr/share/nginx/html:ro" \
nginx:stable &>> "${LOG_FILE}"
}
nginx_cache_and_replace_tar_urls() {
log "Finding tar_url options to cache.."
TAR_NUM=0
mkdir -p "${NGINX_DIR}"
for file in "$@"; do
grep -Po "^ +tar_url: \K.+$" "${file}" | while read tar_url ; do
# NOTE(mark-burnet): Does not yet ignore repeated files.
DEST_PATH="${NGINX_DIR}/cached-tar-${TAR_NUM}.tgz"
log "Caching ${tar_url} in file: ${DEST_PATH}"
REPLACEMENT_URL="${NGINX_URL}/cached-tar-${TAR_NUM}.tgz"
curl -Lo "${DEST_PATH}" "${tar_url}"
sed -i "s;${tar_url};${REPLACEMENT_URL};" "${file}"
TAR_NUM=$((TAR_NUM + 1))
done
done
}

View File

@ -0,0 +1,15 @@
promenade_health_check() {
VIA=${1}
log "Checking Promenade API health"
MAX_HEALTH_ATTEMPTS=6
for attempt in $(seq ${MAX_HEALTH_ATTEMPTS}); do
if ssh_cmd "${VIA}" curl -v --fail "${PROMENADE_BASE_URL}/api/v1.0/health"; then
log "Promenade API healthy"
break
elif [[ $attempt == "${MAX_HEALTH_ATTEMPTS}" ]]; then
log "Promenade health check failed, max retries (${MAX_HEALTH_ATTEMPTS}) exceeded."
exit 1
fi
sleep 10
done
}

View File

@ -0,0 +1,74 @@
registry_down() {
REGISTRY_ID=$(docker ps -qa -f name=registry)
if [[ ! -z ${REGISTRY_ID} ]]; then
log Removing docker registry
docker rm -fv "${REGISTRY_ID}" &>> "${LOG_FILE}"
fi
}
registry_list_images() {
FILES=($(find ${DEFINITION_DEPOT} -type f -name '*.yaml'))
HOSTNAME_REGEX='[a-zA-Z0-9][a-zA-Z0-9_-]{0,62}'
DOMAIN_NAME_REGEX="${HOSTNAME_REGEX}(\.${HOSTNAME_REGEX})*"
PORT_REGEX='[0-9]+'
NETLOC_REGEX="${DOMAIN_NAME_REGEX}(:${PORT_REGEX})?"
REPO_COMPONENT_REGEX='[a-zA-Z0-9][a-zA-Z0-9_-]{0,62}'
REPO_REGEX="${REPO_COMPONENT_REGEX}(/${REPO_COMPONENT_REGEX})*"
TAG_REGEX='[a-zA-Z0-9][a-zA-Z0-9.-]{0,127}'
cat "${FILES[@]}" \
| grep -v '^ *#' \
| tr ' \t' '\n' | tr -s '\n' \
| grep -E "^(${NETLOC_REGEX}/)?${REPO_REGEX}:${TAG_REGEX}$" \
| sort -u \
| grep -v 'registry:5000'
}
registry_populate() {
log Validating local registry is populated
for image in $(registry_list_images); do
if [[ ${image} =~ promenade ]]; then
continue
fi
if [[ ${image} =~ .*:(latest|master) ]] || ! docker pull "localhost:5000/${image}" &> /dev/null; then
log Loading image "${image}" into local registry
{
docker pull "${image}"
docker tag "${image}" "localhost:5000/${image}"
docker push "localhost:5000/${image}"
} &>> "${LOG_FILE}" || echo "Failed to cache ${image}"
fi
done
}
registry_replace_references() {
FILES=(${@})
for image in $(registry_list_images); do
sed -i "s;${image}\$;registry:5000/${image};g" "${FILES[@]}"
done
}
registry_up() {
log Validating local registry is up
REGISTRY_ID=$(docker ps -qa -f name=registry)
RUNNING_REGISTRY_ID=$(docker ps -q -f name=registry)
if [[ -z ${RUNNING_REGISTRY_ID} && ! -z ${REGISTRY_ID} ]]; then
log Removing stopped docker registry
docker rm -fv "${REGISTRY_ID}" &>> "${LOG_FILE}"
fi
if [[ -z ${RUNNING_REGISTRY_ID} ]]; then
log Starting docker registry
docker run -d \
-p 5000:5000 \
-e REGISTRY_HTTP_ADDR=0.0.0.0:5000 \
--restart=always \
--name registry \
-v "${REGISTRY_DATA_DIR}:/var/lib/registry" \
registry:2 &>> "${LOG_FILE}"
fi
}

View File

@ -0,0 +1,6 @@
IdentityFile ${SSH_CONFIG_DIR}/id_rsa
LogLevel QUIET
StrictHostKeyChecking no
User root
UserKnownHostsFile /dev/null

View File

@ -0,0 +1,60 @@
rsync_cmd() {
rsync -e "ssh -F ${SSH_CONFIG_DIR}/config" "${@}"
}
ssh_cmd() {
HOST=${1}
shift
args=$(shell-quote -- "${@}")
if [[ -v GATE_DEBUG && ${GATE_DEBUG} = "1" ]]; then
ssh -F "${SSH_CONFIG_DIR}/config" -v "${HOST}" "${args}"
else
ssh -F "${SSH_CONFIG_DIR}/config" "${HOST}" "${args}"
fi
}
ssh_config_declare() {
log Creating SSH config
env -i \
"SSH_CONFIG_DIR=${SSH_CONFIG_DIR}" \
envsubst < "${TEMPLATE_DIR}/ssh-config-global.sub" > "${SSH_CONFIG_DIR}/config"
for n in $(config_vm_names)
do
env -i \
"SSH_CONFIG_DIR=${SSH_CONFIG_DIR}" \
"SSH_NODE_HOSTNAME=${n}" \
"SSH_NODE_IP=$(config_vm_ip ${n})" \
envsubst < "${TEMPLATE_DIR}/ssh-config-node.sub" >> "${SSH_CONFIG_DIR}/config"
if [[ "$(config_vm_bootstrap ${n})" == "true" ]]
then
echo " User root" >> "${SSH_CONFIG_DIR}/config"
else
echo " User ubuntu" >> "${SSH_CONFIG_DIR}/config"
fi
done
}
ssh_keypair_declare() {
log Validating SSH keypair exists
if [ ! -s "${SSH_CONFIG_DIR}/id_rsa" ]; then
log Generating SSH keypair
ssh-keygen -N '' -f "${SSH_CONFIG_DIR}/id_rsa" &>> "${LOG_FILE}"
fi
}
ssh_load_pubkey() {
cat "${SSH_CONFIG_DIR}/id_rsa.pub"
}
ssh_setup_declare() {
mkdir -p "${SSH_CONFIG_DIR}"
ssh_keypair_declare
ssh_config_declare
}
ssh_wait() {
NAME=${1}
while ! ssh_cmd "${NAME}" /bin/true; do
sleep 0.5
done
}

View File

@ -0,0 +1,300 @@
img_base_declare() {
log Validating base image exists
if ! virsh vol-key --pool "${VIRSH_POOL}" --vol airship-gate-base.img > /dev/null; then
log Installing base image from "${BASE_IMAGE_URL}"
cd "${TEMP_DIR}"
curl -q -L -o base.img "${BASE_IMAGE_URL}"
{
virsh vol-create-as \
--pool "${VIRSH_POOL}" \
--name airship-gate-base.img \
--format qcow2 \
--capacity "${BASE_IMAGE_SIZE}" \
--prealloc-metadata
virsh vol-upload \
--vol airship-gate-base.img \
--file base.img \
--pool "${VIRSH_POOL}"
} &>> "${LOG_FILE}"
fi
}
iso_gen() {
NAME=${1}
if virsh vol-key --pool "${VIRSH_POOL}" --vol "cloud-init-${NAME}.iso" &> /dev/null; then
log Removing existing cloud-init ISO for "${NAME}"
virsh vol-delete \
--pool "${VIRSH_POOL}" \
--vol "cloud-init-${NAME}.iso" &>> "${LOG_FILE}"
fi
log "Creating cloud-init ISO for ${NAME}"
ISO_DIR=${TEMP_DIR}/iso/${NAME}
mkdir -p "${ISO_DIR}"
cd "${ISO_DIR}"
BR_IP_NODE=$(config_vm_ip "${NAME}")
SSH_PUBLIC_KEY=$(ssh_load_pubkey)
export BR_IP_NODE
export NAME
export SSH_PUBLIC_KEY
envsubst < "${TEMPLATE_DIR}/user-data.sub" > user-data
envsubst < "${TEMPLATE_DIR}/meta-data.sub" > meta-data
envsubst < "${TEMPLATE_DIR}/network-config.sub" > network-config
{
genisoimage \
-V cidata \
-input-charset utf-8 \
-joliet \
-rock \
-o cidata.iso \
meta-data \
network-config \
user-data
virsh vol-create-as \
--pool "${VIRSH_POOL}" \
--name "cloud-init-${NAME}.iso" \
--capacity "$(stat -c %s "${ISO_DIR}/cidata.iso")" \
--format raw
virsh vol-upload \
--pool "${VIRSH_POOL}" \
--vol "cloud-init-${NAME}.iso" \
--file "${ISO_DIR}/cidata.iso"
} &>> "${LOG_FILE}"
}
iso_path() {
NAME=${1}
echo "${TEMP_DIR}/iso/${NAME}/cidata.iso"
}
net_clean() {
if virsh net-list --name | grep ^airship_gate$ > /dev/null; then
log Destroying UCP gate network
virsh net-destroy "${XML_DIR}/network.xml" &>> "${LOG_FILE}"
fi
}
net_declare() {
if ! virsh net-list --name | grep ^airship_gate$ > /dev/null; then
log Creating UCP gate network
virsh net-create "${XML_DIR}/network.xml" &>> "${LOG_FILE}"
fi
}
pool_declare() {
log Validating virsh pool setup
if ! virsh pool-uuid "${VIRSH_POOL}" &> /dev/null; then
log Creating pool "${VIRSH_POOL}"
virsh pool-create-as --name "${VIRSH_POOL}" --type dir --target "${VIRSH_POOL_PATH}" &>> "${LOG_FILE}"
fi
}
vm_clean() {
NAME=${1}
if virsh list --name | grep "${NAME}" &> /dev/null; then
virsh destroy "${NAME}" &>> "${LOG_FILE}"
fi
if virsh list --name --all | grep "${NAME}" &> /dev/null; then
log Removing VM "${NAME}"
virsh undefine --remove-all-storage --domain "${NAME}" &>> "${LOG_FILE}"
fi
}
vm_clean_all() {
log Removing all VMs in parallel
VM_NAMES=($(config_vm_names))
for NAME in ${VM_NAMES[*]}
do
vm_clean "${NAME}" &
done
wait
}
vm_create() {
NAME=${1}
MAC_ADDRESS=$(config_vm_mac "${NAME}")
DISK_OPTS="bus=virtio,cache=directsync,discard=unmap,format=qcow2"
vol_create_root "${NAME}"
wait
if [[ "$(config_vm_bootstrap ${NAME})" == "true" ]]; then
iso_gen "${NAME}"
wait
log Creating VM "${NAME}" and bootstrapping the boot drive
virt-install \
--name "${NAME}" \
--virt-type kvm \
--cpu host \
--graphics vnc,listen=0.0.0.0 \
--noautoconsole \
--network "network=airship_gate,model=virtio" \
--mac="${MAC_ADDRESS}" \
--vcpus "$(config_vm_vcpus ${NAME})" \
--memory "$(config_vm_memory ${NAME})" \
--import \
--disk "vol=${VIRSH_POOL}/airship-gate-${NAME}.img,${DISK_OPTS}" \
--disk "vol=${VIRSH_POOL}/cloud-init-${NAME}.iso,device=cdrom" &>> "${LOG_FILE}"
ssh_wait "${NAME}"
ssh_cmd "${NAME}" sync
else
log Creating VM "${NAME}"
virt-install \
--name "${NAME}" \
--virt-type kvm \
--cpu host \
--graphics vnc,listen=0.0.0.0 \
--noautoconsole \
--network "network=airship_gate,model=virtio" \
--mac="${MAC_ADDRESS}" \
--vcpus "$(config_vm_vcpus ${NAME})" \
--memory "$(config_vm_memory ${NAME})" \
--import \
--disk "vol=${VIRSH_POOL}/airship-gate-${NAME}.img,${DISK_OPTS}" &>> "${LOG_FILE}"
fi
}
vm_create_all() {
log Starting all VMs
VM_NAMES=($(config_vm_names))
for name in ${VM_NAMES[*]}
do
vm_create "${name}"
if [[ "$(config_vm_bootstrap ${name})" == "true" ]]
then
vm_validate "${name}"
fi
done
}
vm_start() {
NAME=${1}
log Starting VM "${NAME}"
virsh start "${NAME}" &>> "${LOG_FILE}"
ssh_wait "${NAME}"
}
vm_stop() {
NAME=${1}
log Stopping VM "${NAME}"
virsh destroy "${NAME}" &>> "${LOG_FILE}"
}
vm_stop_non_genesis() {
log Stopping all non-genesis VMs in parallel
for NAME in $(config_non_genesis_vms); do
vm_stop "${NAME}" &
done
wait
}
vm_restart_all() {
for NAME in $(config_vm_names); do
vm_stop "${NAME}" &
done
wait
for NAME in $(config_vm_names); do
vm_start "${NAME}" &
done
wait
}
vm_validate() {
NAME=${1}
if ! virsh list --name | grep "${NAME}" &> /dev/null; then
log VM "${NAME}" did not start correctly.
exit 1
fi
}
vol_create_root() {
NAME=${1}
if virsh vol-list --pool "${VIRSH_POOL}" | grep "airship-gate-${NAME}.img" &> /dev/null; then
log Deleting previous volume "airship-gate-${NAME}.img"
virsh vol-delete --pool "${VIRSH_POOL}" "airship-gate-${NAME}.img" &>> "${LOG_FILE}"
fi
log Creating root volume for "${NAME}"
if [[ "$(config_vm_bootstrap ${NAME})" == "true" ]]; then
virsh vol-create-as \
--pool "${VIRSH_POOL}" \
--name "airship-gate-${NAME}.img" \
--capacity 64G \
--format qcow2 \
--backing-vol 'airship-gate-base.img' \
--backing-vol-format qcow2 &>> "${LOG_FILE}"
else
virsh vol-create-as \
--pool "${VIRSH_POOL}" \
--name "airship-gate-${NAME}.img" \
--capacity 64G \
--format qcow2 &>> "${LOG_FILE}"
fi
}
#Find the correct group name for libvirt access
get_libvirt_group() {
grep -oE '^libvirtd?:' /etc/group | tr -d ':'
}
# Make a user 'virtmgr' if it does not exist and add it to the libvirt group
make_virtmgr_account() {
for libvirt_group in $(get_libvirt_group)
do
if [[ -z "$(grep -oE '^virtmgr:' /etc/passwd)" ]]
then
sudo useradd -m -s /bin/sh -g "${libvirt_group}" virtmgr
else
sudo usermod -g "${libvirt_group}" virtmgr
fi
done
}
# Generate a new keypair
gen_libvirt_key() {
log Removing any existing virtmgr SSH keys
sudo rm -rf ~virtmgr/.ssh
log Generating new SSH keypair for virtmgr
sudo mkdir -p ~virtmgr/.ssh
sudo ssh-keygen -N '' -b 2048 -t rsa -f ~virtmgr/.ssh/airship_gate &>> "${LOG_FILE}"
}
# Install private key into site definition
install_libvirt_key() {
export PUB_KEY=$(sudo cat ~virtmgr/.ssh/airship_gate.pub)
mkdir -p ${TEMP_DIR}/tmp
envsubst < "${TEMPLATE_DIR}/authorized_keys.sub" > ${TEMP_DIR}/tmp/virtmgr.authorized_keys
sudo cp ${TEMP_DIR}/tmp/virtmgr.authorized_keys ~virtmgr/.ssh/authorized_keys
sudo chown -R virtmgr ~virtmgr/.ssh
sudo chmod 700 ~virtmgr/.ssh
sudo chmod 600 ~virtmgr/.ssh/authorized_keys
mkdir -p "${GATE_DEPOT}"
cat << EOF > ${GATE_DEPOT}/airship_drydock_kvm_ssh_key.yaml
---
schema: deckhand/CertificateKey/v1
metadata:
schema: metadata/Document/v1
name: airship_drydock_kvm_ssh_key
layeringDefinition:
layer: site
abstract: false
storagePolicy: cleartext
data: |-
EOF
sudo cat ~virtmgr/.ssh/airship_gate | sed -e 's/^/ /' >> ${GATE_DEPOT}/airship_drydock_kvm_ssh_key.yaml
}

View File

@ -0,0 +1,117 @@
{
"$schema": "http://json-schema.org/schema#",
"definitions": {
"publish": {
"type": "object",
"properties": {
"junit": {
"type": "array",
"items": {
"$ref": "#/definitions/relativePath"
}
}
},
"additionalProperties": false
},
"relativePath": {
"type": "string",
"pattern": "^[A-Za-z0-9][A-Za-z0-9_\\.-]*(/[A-Za-z0-9_\\.-]+)*[A-Za-z0-9_-]$"
}
},
"type": "object",
"properties": {
"configuration": {
"type": "array",
"items": {
"$ref": "#/definitions/relativePath"
}
},
"publish": {
"$ref": "#/definitions/publish"
},
"stages": {
"type": "array",
"items": {
"type": "object",
"properties": {
"arguments": {
"type": "array",
"items": {
"type": "string"
}
},
"name": {
"type": "string"
},
"on_error": {
"$ref": "#/definitions/relativePath"
},
"publish": {
"$ref": "#/definitions/publish"
},
"script": {
"$ref": "#/definitions/relativePath"
}
},
"required": [
"name",
"script"
],
"additionalProperties": false
},
"minItems": 1
},
"vm": {
"type": "object",
"properties": {
"memory": {
"type": "integer",
"minimum": 1024
},
"names": {
"type": "array",
"items": {
"type": "string",
"enum": [
"n0",
"n1",
"n2",
"n3"
]
},
"uniqueItems": true
},
"non_genesis": {
"type": "array",
"items": {
"type": "string",
"enum": [
"n1",
"n2",
"n3"
]
},
"uniqueItems": true
},
"vcpus": {
"type": "integer",
"minimum": 1,
"maximum": 8
}
},
"required": [
"memory",
"names",
"vcpus",
"non_genesis"
],
"additionalProperties": false
}
},
"required": [
"stages"
],
"additionalProperties": false
}

View File

@ -0,0 +1,76 @@
{
"configuration": {
"site": "gate-multinode",
"primary_repo": "deployment_files",
"aux_repos": []
},
"stages": [
{
"name": "Gate Setup",
"script": "gate-setup.sh"
},
{
"name": "Pegleg Collection",
"script": "pegleg-collect.sh"
},
{
"name": "Populate Image Cache",
"script": "registry-load.sh"
},
{
"name": "Generate Certificates",
"script": "generate-certificates.sh"
},
{
"name": "Build Scripts",
"script": "build-scripts.sh"
},
{
"name": "Create VMs",
"script": "create-vms.sh"
},
{
"name": "Genesis",
"script": "genesis.sh",
"on_error": "collect_genesis_info.sh"
},
{
"name": "Load Site Design",
"script": "shipyard-load-design.sh"
},
{
"name": "Deploy Site",
"script": "shipyard-deploy-site.sh"
}
],
"vm": {
"n0" : {
"memory": 32768,
"vcpus": 8,
"mac": "52:54:00:00:a4:31",
"ip": "172.24.1.10",
"bootstrap": true
},
"n1" : {
"memory": 3072,
"vcpus": 2,
"mac": "52:54:00:00:a3:31",
"ip": "172.24.1.11",
"bootstrap": false
},
"n2" : {
"memory": 3072,
"vcpus": 2,
"mac": "52:54:00:1a:95:0d",
"ip": "172.24.1.12",
"bootstrap": false
},
"n3" : {
"memory": 3072,
"vcpus": 2,
"mac": "52:54:00:31:c2:36",
"ip": "172.24.1.13",
"bootstrap": false
}
}
}

View File

@ -0,0 +1,68 @@
{
"configuration": {
"site": "gate-multinode",
"primary_repo": "deployment_files",
"aux_repos": []
},
"stages": [
{
"name": "Gate Setup",
"script": "gate-setup.sh"
},
{
"name": "Pegleg Collection",
"script": "pegleg-collect.sh"
},
{
"name": "Populate Image Cache",
"script": "registry-load.sh"
},
{
"name": "Generate Certificates",
"script": "generate-certificates.sh"
},
{
"name": "Build Scripts",
"script": "build-scripts.sh"
},
{
"name": "Create VMs",
"script": "create-vms.sh"
},
{
"name": "Genesis",
"script": "genesis.sh",
"on_error": "collect_genesis_info.sh"
}
],
"vm": {
"n0" : {
"memory": 16384,
"vcpus": 8,
"mac": "52:54:00:00:a4:31",
"ip": "172.24.1.10",
"bootstrap": true
},
"n1" : {
"memory": 3072,
"vcpus": 2,
"mac": "52:54:00:00:a3:31",
"ip": "172.24.1.11",
"bootstrap": false
},
"n2" : {
"memory": 3072,
"vcpus": 2,
"mac": "52:54:00:1a:95:0d",
"ip": "172.24.1.12",
"bootstrap": false
},
"n3" : {
"memory": 3072,
"vcpus": 2,
"mac": "52:54:00:31:c2:36",
"ip": "172.24.1.13",
"bootstrap": false
}
}
}

View File

@ -0,0 +1,81 @@
{
"configuration": {
"site": "gate-multinode",
"primary_repo": "deployment_files",
"aux_repos": []
},
"stages": [
{
"name": "Gate Setup",
"script": "gate-setup.sh"
},
{
"name": "Pegleg Collection",
"script": "pegleg-collect.sh"
},
{
"name": "Populate Image Cache",
"script": "registry-load.sh"
},
{
"name": "Generate Certificates",
"script": "generate-certificates.sh"
},
{
"name": "Build Scripts",
"script": "build-scripts.sh"
},
{
"name": "Create VMs",
"script": "create-vms.sh"
},
{
"name": "Genesis",
"script": "genesis.sh",
"on_error": "collect_genesis_info.sh"
},
{
"name": "Load Site Design",
"script": "shipyard-load-design.sh"
},
{
"name": "Deploy Site",
"script": "shipyard-deploy-site.sh"
},
{
"name": "Update Site",
"script": "shipyard-update-site.sh"
}
],
"vm": {
"n0" : {
"memory": 16384,
"vcpus": 8,
"mac": "52:54:00:00:a4:31",
"ip": "172.24.1.10",
"bootstrap": true
},
"n1" : {
"memory": 3072,
"vcpus": 2,
"mac": "52:54:00:00:a3:31",
"ip": "172.24.1.11",
"bootstrap": false
},
"n2" : {
"memory": 3072,
"vcpus": 2,
"mac": "52:54:00:1a:95:0d",
"ip": "172.24.1.12",
"bootstrap": false
},
"n3" : {
"memory": 3072,
"vcpus": 2,
"mac": "52:54:00:31:c2:36",
"ip": "172.24.1.13",
"bootstrap": false
}
}
}

View File

@ -0,0 +1,42 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE(mark-burnett): Keep trying to collect info even if there's an error
set +e
set -x
source "${GATE_UTILS}"
ERROR_DIR="${TEMP_DIR}/errors"
VIA=n0
mkdir -p "${ERROR_DIR}"
log "Gathering info from failed genesis server (n0) in ${ERROR_DIR}"
log "Gathering docker info for exitted containers"
mkdir -p "${ERROR_DIR}/docker"
docker_ps "${VIA}" | tee "${ERROR_DIR}/docker/ps"
docker_info "${VIA}" | tee "${ERROR_DIR}/docker/info"
for container_id in $(docker_exited_containers "${VIA}"); do
docker_inspect "${VIA}" "${container_id}" | tee "${ERROR_DIR}/docker/${container_id}"
echo "=== Begin logs ===" | tee -a "${ERROR_DIR}/docker/${container_id}"
docker_logs "${VIA}" "${container_id}" | tee -a "${ERROR_DIR}/docker/${container_id}"
done
log "Gathering kubectl output"
mkdir -p "${ERROR_DIR}/kube"
kubectl_cmd "${VIA}" describe nodes n0 | tee "${ERROR_DIR}/kube/n0"
kubectl_cmd "${VIA}" get --all-namespaces -o wide pod | tee "${ERROR_DIR}/kube/pods"

View File

@ -0,0 +1,34 @@
#!/usr/bin/env bash
set -e
source "${GATE_UTILS}"
mkdir -p ${SCRIPT_DEPOT}
chmod 777 ${SCRIPT_DEPOT}
DOCKER_RUN_OPTS=("-e PROMENADE_DEBUG=${PROMENADE_DEBUG}")
for v in HTTPS_PROXY HTTP_PROXY NO_PROXY https_proxy http_proxy no_proxy
do
if [[ -v "${v}" ]]
then
DOCKER_RUN_OPTS+=(" -e ${v}=${!v}")
fi
done
log Building scripts
docker run --rm -t \
-w /config \
--network host \
-v "${DEFINITION_DEPOT}:/config" \
-v "${GATE_DEPOT}:/gate" \
-v "${CERT_DEPOT}:/certs" \
-v "${SCRIPT_DEPOT}:/scripts" \
${DOCKER_RUN_OPTS[*]} \
"${IMAGE_PROMENADE_CLI}" \
promenade \
build-all \
--validators \
-o /scripts \
/config/*.yaml /certs/*.yaml /gate/*.yaml

View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
source "${GATE_UTILS}"
vm_clean_all
vm_create_all

View File

@ -0,0 +1,33 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
source "${GATE_UTILS}"
# Docker registry (cache) setup
registry_up
# SSH setup
ssh_setup_declare
# Virsh setup
pool_declare
img_base_declare
net_declare
# Make libvirtd available via SSH
make_virtmgr_account
gen_libvirt_key

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
set -e
source "${GATE_UTILS}"
IS_UPDATE=0
DO_EXCLUDE=0
EXCLUDE_PATTERNS=()
while getopts "ux:" opt; do
case "${opt}" in
u)
IS_UPDATE=1
;;
x)
DO_EXCLUDE=1
EXCLUDE_PATTERNS+=("${OPTARG}")
;;
*)
echo "Unknown option"
exit 1
;;
esac
done
shift $((OPTIND-1))
DESIGN_FILES=($(find "${DEFINITION_DEPOT}" -name '*.yaml' | xargs -n 1 basename | xargs -n 1 printf "/tmp/design/%s\n"))
GATE_FILES=($(find "${GATE_DEPOT}" -name '*.yaml' | xargs -n 1 basename | xargs -n 1 printf "/tmp/gate/%s\n"))
mkdir -p "${CERT_DEPOT}"
chmod 777 "${CERT_DEPOT}"
log Generating certificates
docker run --rm -t \
-w /tmp \
-v "${DEFINITION_DEPOT}:/tmp/design" \
-v "${GATE_DEPOT}:/tmp/gate" \
-v "${CERT_DEPOT}:/certs" \
-e "PROMENADE_DEBUG=${PROMENADE_DEBUG}" \
"${IMAGE_PROMENADE_CLI}" \
promenade \
generate-certs \
-o /certs \
"${DESIGN_FILES[@]}" "${GATE_FILES[@]}"

View File

@ -0,0 +1,30 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
source "${GATE_UTILS}"
# Copies script and virtmgr private key to genesis VM
rsync_cmd "${SCRIPT_DEPOT}/genesis.sh" "${GENESIS_NAME}:/root/airship/"
set -o pipefail
ssh_cmd "${GENESIS_NAME}" /root/airship/genesis.sh 2>&1 | tee -a "${LOG_FILE}"
set +o pipefail
if ! ssh_cmd n0 docker images | tail -n +2 | grep -v registry:5000 ; then
log_warn "Using some non-cached docker images. This will slow testing."
ssh_cmd n0 docker images | tail -n +2 | grep -v registry:5000 | tee -a "${LOG_FILE}"
fi

View File

@ -0,0 +1,65 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
source "${GATE_UTILS}"
mkdir -p "${DEFINITION_DEPOT}"
chmod 777 "${DEFINITION_DEPOT}"
render_pegleg_cli() {
cli_string="pegleg -v site"
primary_repo=$(config_pegleg_primary_repo)
if [[ -d "${REPO_ROOT}/${primary_repo}" ]]
then
cli_string="${cli_string} -p /workspace/${primary_repo}"
else
log "${primary_repo} not a valid primary repository"
return 1
fi
aux_repos=($(config_pegleg_aux_repos))
if [[ ${#aux_repos[@]} -gt 0 ]]
then
for r in ${aux_repos[*]}
do
cli_string="${cli_string} -a ${r}"
done
fi
cli_string="${cli_string} collect -s /collect"
cli_string="${cli_string} $(config_pegleg_sitename)"
echo ${cli_string}
}
log "Collecting site definition to ${DEFINITION_DEPOT}"
docker run \
--rm -t \
--network none \
-v "${REPO_ROOT}":/workspace \
-v "${DEFINITION_DEPOT}":/collect \
"${IMAGE_PEGLEG_CLI}" \
$(render_pegleg_cli)
log "Generating virtmgr key documents"
gen_libvirt_key && install_libvirt_key
collect_ssh_key

View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
source "${GATE_UTILS}"
# Docker registry (cache) setup
registry_populate

View File

@ -0,0 +1,33 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
source "${GATE_UTILS}"
log Testing disk IO
fio \
--randrepeat=1 \
--ioengine=libaio \
--direct=1 \
--gtod_reduce=1 \
--name=test \
--filename=.fiotest \
--bs=4k \
--iodepth=64 \
--size=1G \
--readwrite=randrw \
--rwmixread=50

View File

@ -0,0 +1,22 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
source "${GATE_UTILS}"
cd "${TEMP_DIR}"
shipyard_action_wait deploy_site 3600

View File

@ -0,0 +1,77 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
source "${GATE_UTILS}"
cd "${TEMP_DIR}"
# Omit the cert_yaml bundle
OMIT_CERTS=0
# Omit the gate_yaml bundle
OMIT_GATE=0
while getopts "og" opt; do
case "${opt}" in
o)
OMIT_CERTS=1
;;
g)
OMIT_GATE=1
;;
*)
echo "Unknown option"
exit 1
;;
esac
done
shift $((OPTIND-1))
check_configdocs_result(){
RESULT=$1
ERROR_CNT=$(echo "${RESULT}" | grep -oE 'Errors: [0-9]+')
if [[ "${ERROR_CNT}" != "Errors: 0" ]]
then
log "Shipyard create configdocs did not pass validation."
echo "${RESULT}" >> "${LOG_FILE}"
return 1
fi
}
# Copy site design to genesis node
ssh_cmd "${GENESIS_NAME}" mkdir -p "${GENESIS_WORK_DIR}/site"
rsync_cmd "${DEFINITION_DEPOT}"/*.yaml "${GENESIS_NAME}:${GENESIS_WORK_DIR}/site/"
sleep 120
check_configdocs_result "$(shipyard_cmd create configdocs design --directory=/${GENESIS_WORK_DIR}/site --replace)"
if [[ "${OMIT_CERTS}" == "0" ]]
then
ssh_cmd "${GENESIS_NAME}" mkdir -p "${GENESIS_WORK_DIR}/certs"
rsync_cmd "${CERT_DEPOT}"/*.yaml "${GENESIS_NAME}:${GENESIS_WORK_DIR}/certs/"
check_configdocs_result "$(shipyard_cmd create configdocs certs --directory=/${GENESIS_WORK_DIR}/certs --append)"
fi
if [[ "${OMIT_GATE}" == "0" ]]
then
ssh_cmd "${GENESIS_NAME}" mkdir -p "${GENESIS_WORK_DIR}/gate"
rsync_cmd "${GATE_DEPOT}"/*.yaml "${GENESIS_NAME}:${GENESIS_WORK_DIR}/gate/"
check_configdocs_result "$(shipyard_cmd create configdocs gate --directory=/${GENESIS_WORK_DIR}/gate --append)"
fi
check_configdocs_result "$(shipyard_cmd commit configdocs)"

View File

@ -0,0 +1,22 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
source "${GATE_UTILS}"
cd "${TEMP_DIR}"
shipyard_action_wait update_site 1800

View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
source "${GATE_UTILS}"
vm_stop_non_genesis

View File

@ -0,0 +1 @@
from="172.24.1.0/24" ${PUB_KEY}

View File

@ -0,0 +1,3 @@
#cloud-config
instance-id: ucp-${NAME}
local-hostname: ${NAME}

View File

@ -0,0 +1,13 @@
#cloud-config
version: 1
config:
- type: physical
name: ens3
subnets:
- type: static
address: ${BR_IP_NODE}/24
gateway: 172.24.1.1
- type: nameserver
address:
- 8.8.8.8
- 8.8.4.4

View File

@ -0,0 +1,5 @@
IdentityFile ${SSH_CONFIG_DIR}/id_rsa
LogLevel QUIET
StrictHostKeyChecking no
UserKnownHostsFile /dev/null

View File

@ -0,0 +1,2 @@
Host ${SSH_NODE_HOSTNAME}
HostName ${SSH_NODE_IP}

View File

@ -0,0 +1,14 @@
#cloud-config
disable_root: false
hostname: ${NAME}
manage_etc_hosts: false
ssh_authorized_keys:
- ${SSH_PUBLIC_KEY}
chpasswd:
list: |
root:password
expire: false

View File

@ -0,0 +1,8 @@
<network>
<name>airship_gate</name>
<forward mode='nat'/>
<bridge name='airship_gate' stp='on' delay='0'/>
<mac address='52:54:00:e7:94:3f'/>
<ip address='172.24.1.1' netmask='255.255.255.0'>
</ip>
</network>

74
tools/multi_nodes_gate/gate.sh Executable file
View File

@ -0,0 +1,74 @@
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
SCRIPT_DIR=$(realpath "$(dirname "${0}")")
WORKSPACE=$(realpath "${SCRIPT_DIR}/..")
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh
GATE_COLOR=${GATE_COLOR:-1}
MANIFEST_ARG=${1:-multinode_deploy}
GATE_MANIFEST=${WORKSPACE}/multi_nodes_gate/airship_gate/manifests/${MANIFEST_ARG}.json
export GATE_COLOR
export GATE_MANIFEST
export GATE_UTILS
export WORKSPACE
source "${GATE_UTILS}"
sudo chmod -R 755 "${TEMP_DIR}"
STAGES_DIR=${WORKSPACE}/multi_nodes_gate/airship_gate/stages
log_temp_dir
echo
STAGES=$(mktemp)
jq -cr '.stages | .[]' "${GATE_MANIFEST}" > "${STAGES}"
# NOTE(mark-burnett): It is necessary to use a non-stdin file descriptor for
# the read below, since we will be calling SSH, which will consume the
# remaining data on STDIN.
exec 3< "$STAGES"
while read -u 3 stage; do
NAME=$(echo "${stage}" | jq -r .name)
STAGE_CMD=${STAGES_DIR}/$(echo "${stage}" | jq -r .script)
log_stage_header "${NAME}"
if echo "${stage}" | jq -r '.arguments | @sh' | xargs "${STAGE_CMD}" ; then
log_stage_success
else
log_color_reset
log_stage_error "${NAME}" "${LOG_FILE}"
if echo "${stage}" | jq -e .on_error > /dev/null; then
log_stage_diagnostic_header
ON_ERROR=${WORKSPACE}/multi_nodes_gate/airship_gate/on_error/$(echo "${stage}" | jq -r .on_error)
set +e
$ON_ERROR
fi
log_stage_error "${NAME}" "${TEMP_DIR}"
exit 1
fi
log_stage_footer "${NAME}"
echo
done
log_note "Site Definition YAMLs found in ${DEFINITION_DEPOT}"
echo
log_huge_success

View File

@ -0,0 +1,13 @@
# source_name, tag, cache_name
coredns/coredns,0.9.9,coredns
gcr.io/google_containers/hyperkube-amd64,v1.10.2,hyperkube
gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64,1.14.4,k8s-dns-dnsmasq-nanny-amd64
gcr.io/google_containers/k8s-dns-kube-dns-amd64,1.14.4,k8s-dns-kube-dns-amd64
gcr.io/google_containers/k8s-dns-sidecar-amd64,1.14.4,k8s-dns-sidecar-amd64
gcr.io/kubernetes-helm/tiller,v2.7.2,tiller
lachlanevenson/k8s-helm,v2.7.2,helm
quay.io/attcomdev/armada,latest,armada
quay.io/calico/cni,v1.11.0,calico-cni
quay.io/calico/ctl,v1.6.1,calico-ctl
quay.io/calico/kube-controllers,v1.0.0,calico-kube-controllers
quay.io/calico/node,v2.6.1,calico-node

Some files were not shown because too many files have changed in this diff Show More