Add new airskiff type

Create a site type called skiff and point airskiff site to be based on
the new type. This will help to reduce the duplication when creating
airskiff-suse site in subsequent commit.

Change-Id: Ie9cd6d0e572a4556ee6ff5c3d37f20349c2d7de3
changes/63/674963/15
James Gu 4 years ago
parent 5fcc70eec9
commit e28d0a1313

@ -1,4 +1,14 @@
---
schema: pegleg/SoftwareVersions/v1
metadata:
schema: metadata/Document/v1
name: software-versions
labels:
name: software-versions-global
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
charts:
kubernetes:
@ -737,14 +747,4 @@ data:
url: http://us.archive.ubuntu.com/ubuntu
unnamed:
- ceph-common
metadata:
labels:
name: software-versions-global
layeringDefinition:
abstract: false
layer: global
name: software-versions
schema: metadata/Document/v1
storagePolicy: cleartext
schema: pegleg/SoftwareVersions/v1
...

@ -1,34 +0,0 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# This file defines a boot action which is responsible for fetching the node's
# promjoin script from the promenade API. This is the script responsible for
# installing kubernetes on the node and joining the kubernetes cluster.
# #GLOBAL-CANDIDATE#
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: promjoin
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: site
labels:
application: 'drydock'
data:
signaling: false
# TODO(alanmeadows) move what is global about this document
assets:
- path: /opt/promjoin.sh
type: file
permissions: '555'
# The ip= parameter must match the MaaS network name of the network used
# to contact kubernetes. With a standard, reference Airship deployment where
# L2 networks are shared between all racks, the network name (i.e. calico)
# should be correct.
location: promenade+http://promenade-api.ucp.svc.cluster.local/api/v1.0/join-scripts?design_ref={{ action.design_ref | urlencode }}&hostname={{ node.hostname }}&ip={{ node.network.calico.ip }}{% for k, v in node.labels.items() %}&labels.dynamic={{ k }}={{ v }}{% endfor %}
location_pipeline:
- template
data_pipeline:
- utf8_decode
...

@ -1,256 +0,0 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# Drydock BaremetalNode resources for a specific rack are stored in this file.
#
# NOTE: For new sites, you should complete the networks/physical/networks.yaml
# file before working on this file.
#
# In this file, you should make the number of `drydock/BaremetalNode/v1`
# resources equal the number of bare metal nodes you have, either by deleting
# excess BaremetalNode definitions (if there are too many), or by copying and
# pasting the last BaremetalNode in the file until you have the correct number
# of baremetal nodes (if there are too few).
#
# Then in each file, address all additional NEWSITE-CHANGEME markers to update
# the data in these files with the right values for your new site.
#
# *NOTE: The Genesis node is counted as one of the control plane nodes. Note
# that the Genesis node does not appear on this bare metal list, because the
# procedure to reprovision the Genesis host with MaaS has not yet been
# implemented. Therefore there will be only three bare metal nodes in this file
# with the 'masters' tag, as the genesis roles are assigned in a difference
# place (profiles/genesis.yaml).
# NOTE: The host profiles for the control plane are further divided into two
# variants: primary and secondary. The only significance this has is that the
# "primary" nodes are active Ceph nodes, whereas the "secondary" nodes are Ceph
# standby nodes. For Ceph quorum, this means that the control plane split will
# be 3 primary + 1 standby host profile, and the Genesis node counts toward one
# of the 3 primary profiles. Other control plane services are not affected by
# primary vs secondary designation.
#
# TODO: Include the hostname naming convention
#
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
# NEWSITE-CHANGEME: Replace with the hostname of the first node in the rack,
# after (excluding) genesis.
name: cab23-r720-12
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: The IPv4 address assigned to each logical network on this
# node. In the reference Airship deployment, this is all logical Networks defined
# in networks/physical/networks.yaml. IP addresses are manually assigned, by-hand.
# (what could possibly go wrong!) The instructions differ for each logical
# network, which are laid out below.
addressing:
# The iDrac/iLo IP of the node. It's important that this match up with the
# node's hostname above, so that the rack number and node position encoded
# in the hostname are accurate and matching the node that IPMI operations
# will be performed against (for poweron, poweroff, PXE boot to wipe disk or
# reconfigure identity, etc - very important to get right for these reasons).
# These addresses should already be assigned to nodes racked and stacked in
# the environment; these are not addresses which MaaS assigns.
- network: oob
address: 10.23.104.12
# The IP of the node on the PXE network. Refer to the static IP range
# defined for the PXE network in networks/physical/networks.yaml. Begin allocating
# IPs from this network, starting with the second IP (inclusive) from the
# allocation range of this subnet (Genesis node will have the first IP).
# Ex: If the start IP for the PXE "static" network is 10.23.20.11, then
# genesis will have 10.23.20.11, this node will have 10.23.20.12, and
# so on with incrementing IP addresses with each additional node.
- network: pxe
address: 10.23.20.12
# Genesis node gets first IP, all other nodes increment IPs from there
# within the allocation range defined for the network in
# networks/physical/networks.yaml
- network: oam
address: 10.23.21.12
# Genesis node gets first IP, all other nodes increment IPs from there
# within the allocation range defined for the network in
# networks/physical/networks.yaml
- network: storage
address: 10.23.23.12
# Genesis node gets first IP, all other nodes increment IPs from there
# within the allocation range defined for the network in
# networks/physical/networks.yaml
- network: overlay
address: 10.23.24.12
# Genesis node gets first IP, all other nodes increment IPs from there
# within the allocation range defined for the network in
# networks/physical/networks.yaml
- network: calico
address: 10.23.22.12
# NEWSITE-CHANGEME: Set the host profile for the node.
# Note that there are different host profiles depending if this is a control
# plane vs data plane node, and different profiles that map to different types
# hardware. Control plane host profiles are further broken down into "primary"
# and "secondary" profiles (refer to the Notes section at the top of this doc).
# Select the host profile that matches up to your type of
# hardware and function. E.g., the r720 here refers to Dell R720 hardware, the
# 'cp' refers to a control plane profile, and the "primary" means it will be
# an active member in the ceph quorum. Refer to profiles/host/ for the list
# of available host profiles specific to this site (otherwise, you may find
# a general set of host profiles at the "type" or "global" layers/folders.
# If you have hardware that is not on this list of profiles, you may need to
# create a new host profile for that hardware.
# Regarding control plane vs other data plane profiles, refer to the notes at
# the beginning of this file. There should be one control plane node per rack,
# including Genesis. Note Genesis won't actually be listed in this file as a
# BaremetalNode, but the rest are.
# This is the second "primary" control plane node after Genesis.
host_profile: cp_r720-primary
metadata:
tags:
# NEWSITE-CHANGEME: See previous comment. Apply 'masters' tag for control
# plane node, and 'workers' tag for data plane hosts.
- 'masters'
# NEWSITE-CHANGEME: Refer to site engineering package or other supporting
# documentation for the specific rack name. This should be a rack name that
# is meaningful to data center personnel (i.e. a rack they could locate if
# you gave them this rack designation).
rack: cab23
...
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
# NEWSITE-CHANGEME: The next node's hostname
name: cab23-r720-13
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: The next node's IPv4 addressing
addressing:
- network: oob
address: 10.23.104.13
- network: pxe
address: 10.23.20.13
- network: oam
address: 10.23.21.13
- network: storage
address: 10.23.23.13
- network: overlay
address: 10.23.24.13
- network: calico
address: 10.23.22.13
# NEWSITE-CHANGEME: The next node's host profile
host_profile: cp_r720-primary
metadata:
# NEWSITE-CHANGEME: The next node's rack designation
rack: cab23
# NEWSITE-CHANGEME: The next node's role desigatnion
tags:
- 'masters'
...
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
# NEWSITE-CHANGEME: The next node's hostname
name: cab23-r720-14
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: The next node's IPv4 addressing
addressing:
- network: oob
address: 10.23.104.14
- network: pxe
address: 10.23.20.14
- network: oam
address: 10.23.21.14
- network: storage
address: 10.23.23.14
- network: overlay
address: 10.23.24.14
- network: calico
address: 10.23.22.14
# NEWSITE-CHANGEME: The next node's host profile
# This is the third "primary" control plane profile after genesis
host_profile: dp_r720
metadata:
# NEWSITE-CHANGEME: The next node's rack designation
rack: cab23
# NEWSITE-CHANGEME: The next node's role desigatnion
tags:
- 'workers'
...
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
# NEWSITE-CHANGEME: The next node's hostname
name: cab23-r720-17
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: The next node's IPv4 addressing
addressing:
- network: oob
address: 10.23.104.17
- network: pxe
address: 10.23.20.17
- network: oam
address: 10.23.21.17
- network: storage
address: 10.23.23.17
- network: overlay
address: 10.23.24.17
- network: calico
address: 10.23.22.17
# NEWSITE-CHANGEME: The next node's host profile
# This is the one and only appearance of the "secondary" control plane profile
host_profile: dp_r720
metadata:
# NEWSITE-CHANGEME: The next node's rack designation
rack: cab23
# NEWSITE-CHANGEME: The next node's role desigatnion
tags:
- 'workers'
...
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
# NEWSITE-CHANGEME: The next node's hostname
name: cab23-r720-19
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: The next node's IPv4 addressing
addressing:
- network: oob
address: 10.23.104.19
- network: pxe
address: 10.23.20.19
- network: oam
address: 10.23.21.19
- network: storage
address: 10.23.23.19
- network: overlay
address: 10.23.24.19
- network: calico
address: 10.23.22.19
# NEWSITE-CHANGEME: The next node's host profile
host_profile: dp_r720
metadata:
# NEWSITE-CHANGEME: The next node's rack designation
rack: cab23
# NEWSITE-CHANGEME: The next node's role desigatnion
tags:
- 'workers'
...

@ -1,302 +0,0 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# The purpose of this file is to define all of the NetworkLinks (i.e. layer 1
# devices) and Networks (i.e. layer 3 configurations). The following is standard
# for the logical networks in Airship:
#
# +----------+-----------------------------------+----------------+--------------+----------------------------------------------------+-----------------+
# | Network | | Per-rack or | | | VLAN tagged |
# | Name | Purpose | per-site CIDR? | Has gateway? | Bond | or untagged? |
# +----------+-----------------------------------+----------------+--------------+----------------------------------------------------+-----------------+
# | oob | Out of Band devices (iDrac/iLo) | per-site CIDR | Has gateway | No bond, N/A | Untagged/Native |
# | pxe | PXE boot network | per-site CIDR | No gateway | No bond, no LACP fallback. Dedicated PXE interface | Untagged/Native |
# | oam | management network | per-site CIDR | Has gateway | member of bond0 | tagged |
# | storage | storage network | per-site CIDR | No gateway | member of bond0 | tagged |
# | calico | underlay calico net; k8s traffic | per-site CIDR | No gateway | member of bond0 | tagged |
# | overlay | overlay network for openstack SDN | per-site CIDR | No gateway | member of bond0 | tagged |
# +----------+-----------------------------------+----------------+--------------+----------------------------------------------------+-----------------+
#
# For standard Airship deployments, you should not need to modify the number of
# NetworkLinks and Networks in this file. Only the IP addresses and CIDRs should
# need editing.
#
# TODO: Given that we expect all network broadcast domains to span all racks in
# Airship, we should choose network names that do not include the rack number.
#
# TODO: FQDN naming standards for hosts
#
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# MaaS doesnt own this network like it does the others, so the noconfig label
# is specified.
labels:
noconfig: enabled
bonding:
mode: disabled
mtu: 1500
linkspeed: auto
trunking:
mode: disabled
default_network: oob
allowed_networks:
- oob
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Update with the site's out-of-band CIDR
cidr: 10.23.104.0/24
routes:
# NEWSITE-CHANGEME: Update with the site's out-of-band gateway IP
- subnet: '0.0.0.0/0'
gateway: 10.23.104.1
metric: 100
# NEWSITE-CHANGEME: Update with the site's out-of-band IP allocation range
# FIXME: Is this IP range actually used/allocated for anything? The HW already
# has its OOB IPs assigned. None of the Ubuntu OS's should need IPs on OOB
# network either, as they should be routable via the default gw on OAM network
ranges:
- type: static
start: 10.23.104.11
end: 10.23.104.21
...
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
bonding:
mode: disabled
mtu: 1500
linkspeed: auto
trunking:
mode: disabled
default_network: pxe
allowed_networks:
- pxe
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Update with the site's PXE network CIDR
# NOTE: The CIDR minimum size = (number of nodes * 2) + 10
cidr: 10.23.20.0/24
routes:
- subnet: 0.0.0.0/0
# NEWSITE-CHANGEME: Set the OAM network gateway IP address
gateway: 10.23.20.1
metric: 100
# NOTE: The first 10 IPs in the subnet are reserved for network infrastructure.
# The remainder of the range is divided between two subnets of equal size:
# one static, and one DHCP.
# The DHCP addresses are used when nodes perform a PXE boot (DHCP address gets
# assigned), and when a node is commissioning in MaaS (also uses DHCP to get
# its IP address). However, when MaaS installs the operating system
# ("Deploying/Deployed" states), it will write a static IP assignment to
# /etc/network/interfaces[.d] with IPs from the "static" subnet defined here.
ranges:
# NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
- type: reserved
start: 10.23.20.1
end: 10.23.20.10
# NEWSITE-CHANGEME: Update to the first half of the remaining range after
# excluding the 10 reserved IPs.
- type: static
start: 10.23.20.11
end: 10.23.20.21
# NEWSITE-CHANGEME: Update to the second half of the remaining range after
# excluding the 10 reserved IPs.
- type: dhcp
start: 10.23.20.40
end: 10.23.20.80
dns:
# NEWSITE-CHANGEME: FQDN for bare metal nodes.
# Choose FQDN according to the node FQDN naming conventions at the top of
# this document.
domain: atlantafoundry.com
# List of upstream DNS forwards. Verify you can reach them from your
# environment. If so, you should not need to change them.
# TODO: This should be populated via substitution from common-addresses
servers: '8.8.8.8,8.8.4.4,208.67.222.222'
...
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: data
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
bonding:
mode: 802.3ad
hash: layer3+4
peer_rate: fast
mon_rate: 100
up_delay: 1000
down_delay: 3000
# NEWSITE-CHANGEME: Ensure the network switches in the environment are
# configured for this MTU or greater. Even if switches are configured for or
# can support a slightly higher MTU, there is no need (and negliable benefit)
# to squeeze every last byte into the MTU (e.g., 9216 vs 9100). Leave MTU at
# 9100 for maximum compatibility.
mtu: 9100
linkspeed: auto
trunking:
mode: 802.1q
allowed_networks:
- oam
- storage
- overlay
- calico
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: oam
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Set the VLAN ID which the OAM network is on
vlan: '21'
mtu: 9100
# NEWSITE-CHANGEME: Set the CIDR for the OAM network
# NOTE: The CIDR minimum size = number of nodes + 10
cidr: 10.23.21.0/24
routes:
- subnet: 0.0.0.0/0
# NEWSITE-CHANGEME: Set the OAM network gateway IP address
gateway: 10.23.21.1
metric: 100
ranges:
# NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
- type: reserved
start: 10.23.21.1
end: 10.23.21.10
# NEWSITE-CHANGEME: Update to the remaining range after excluding the 10
# 10 reserved IPs.
- type: static
start: 10.23.21.11
end: 10.23.21.21
dns:
# NEWSITE-CHANGEME: FQDN for bare metal nodes.
# Choose FQDN according to the node FQDN naming conventions at the top of
# this document.
domain: atlantafoundry.com
# List of upstream DNS forwards. Verify you can reach them from your
# environment. If so, you should not need to change them.
# TODO: This should be populated via substitution from common-addresses
servers: '8.8.8.8,8.8.4.4,208.67.222.222'
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: storage
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Set the VLAN ID which the storage network is on
vlan: '23'
mtu: 9100
# NEWSITE-CHANGEME: Set the CIDR for the storage network
# NOTE: The CIDR minimum size = number of nodes + 10
cidr: 10.23.23.0/24
ranges:
# NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
- type: reserved
start: 10.23.23.1
end: 10.23.23.10
# NEWSITE-CHANGEME: Update to the remaining range after excluding the 10
# 10 reserved IPs.
- type: static
start: 10.23.23.11
end: 10.23.23.21
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: overlay
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Set the VLAN ID which the overlay network is on
vlan: '24'
mtu: 9100
# NEWSITE-CHANGEME: Set the CIDR for the overlay network
# NOTE: The CIDR minimum size = number of nodes + 10
cidr: 10.23.24.0/24
ranges:
# NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
- type: reserved
start: 10.23.24.1
end: 10.23.24.10
# NEWSITE-CHANGEME: Update to the remaining range after excluding the 10
# 10 reserved IPs.
- type: static
start: 10.23.24.11
end: 10.23.24.21
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: calico
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Set the VLAN ID which the calico network is on
vlan: '22'
mtu: 9100
# NEWSITE-CHANGEME: Set the CIDR for the calico network
# NOTE: The CIDR minimum size = number of nodes + 10
cidr: 10.23.22.0/24
ranges:
# NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
- type: reserved
start: 10.23.22.1
end: 10.23.22.10
# NEWSITE-CHANGEME: Update to the remaining range after excluding the 10
# 10 reserved IPs.
- type: static
start: 10.23.22.11
end: 10.23.22.21
...

@ -1,51 +0,0 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# The purpose of this file is to apply proper labels to Genesis node so the
# proper services are installed and proper configuration applied. This should
# not need to be changed for a new site.
# #GLOBAL-CANDIDATE#
schema: promenade/Genesis/v1
metadata:
schema: metadata/Document/v1
name: genesis-site
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: genesis-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
labels:
dynamic:
- beta.kubernetes.io/fluentd-ds-ready=true
- calico-etcd=enabled
- ceph-mds=enabled
- ceph-mon=enabled
- ceph-osd=enabled
- ceph-rgw=enabled
- ceph-mgr=enabled
- ceph-bootstrap=enabled
- tenant-ceph-control-plane=enabled
- tenant-ceph-mon=enabled
- tenant-ceph-rgw=enabled
- tenant-ceph-mgr=enabled
- kube-dns=enabled
- kube-ingress=enabled
- kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled
- kubernetes-scheduler=enabled
- promenade-genesis=enabled
- ucp-control-plane=enabled
- maas-rack=enabled
- maas-region=enabled
- ceph-osd-bootstrap=enabled
- openstack-control-plane=enabled
- openvswitch=enabled
- openstack-l3-agent=enabled
- node-exporter=enabled
...

@ -1,78 +0,0 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: dell_r720
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# Vendor of the server chassis
vendor: DELL
# Generation of the chassis model
generation: '8'
# Version of the chassis model within its generation - not version of the hardware definition
hw_version: '3'
# The certified version of the chassis BIOS
bios_version: '2.2.3'
# Mode of the default boot of hardware - bios, uefi
boot_mode: bios
# Protocol of boot of the hardware - pxe, usb, hdd
bootstrap_protocol: pxe
# Which interface to use for network booting within the OOB manager, not OS device
pxe_interface: 0
# Map hardware addresses to aliases/roles to allow a mix of hardware configs
# in a site to result in a consistent configuration
device_aliases:
## network
# eno1
pxe_nic01:
address: '0000:01:00.0'
# type could identify expected hardware - used for hardware manifest validation
dev_type: 'I350 Gigabit Network Connection'
bus_type: 'pci'
# enp67s0f0
data_nic01:
address: '0000:43:00.0'
dev_type: 'Ethernet 10G 2P X520 Adapter'
bus_type: 'pci'
# enp67s0f1
data_nic02:
address: '0000:43:00.1'
dev_type: 'Ethernet 10G 2P X520 Adapter'
bus_type: 'pci'
# enp68s0f0
data_nic03:
address: '0000:44:00.0'
dev_type: 'Ethernet 10G 2P X520 Adapter'
bus_type: 'pci'
# enp68s0f1
data_nic04:
address: '0000:44:00.1'
dev_type: 'Ethernet 10G 2P X520 Adapter'
bus_type: 'pci'
## storage
# /dev/sda
bootdisk:
address: '0:2.0.0'
dev_type: 'PERC H710P'
bus_type: 'scsi'
# /dev/sdb
cephjournal1:
address: '0:2.1.0'
dev_type: 'PERC H710P'
bus_type: 'scsi'
# /dev/sdc
cephjournal2:
address: '0:2.2.0'
dev_type: 'PERC H710P'
bus_type: 'scsi'
# /dev/sdc
ephemeral:
address: '0:2.3.0'
dev_type: 'PERC H710P'
bus_type: 'scsi'
...

@ -1,232 +0,0 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# The primary control plane host profile for Airship for DELL R720s, and
# should not need to be altered if you are using matching HW. The active
# participants in the Ceph cluster run on this profile. Other control plane
# services are not affected by primary vs secondary designation.
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: cp_r720-primary
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: site
parentSelector:
hosttype: cp-global
actions:
- method: replace
path: .interfaces
- method: replace
path: .storage
- method: merge
path: .
data:
hardware_profile: dell_r720
primary_network: oam
interfaces:
pxe:
device_link: pxe
slaves:
- pxe_nic01
networks:
- pxe
bond0:
device_link: data
slaves:
- data_nic01
- data_nic02
- data_nic03
- data_nic04
networks:
- oam
- storage
- overlay
- calico
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>100g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
platform:
kernel: 'hwe-16.04'
kernel_params:
console: 'ttyS1,115200n8'
metadata:
owner_data:
openstack-l3-agent: enabled
...
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: cp_r740-secondary
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: site
parentSelector:
hosttype: cp-global
actions:
- method: replace
path: .interfaces
- method: replace
path: .storage
- method: replace
path: .metadata.owner_data
- method: merge
path: .
data:
hardware_profile: dell_r720
primary_network: oam
interfaces:
pxe:
device_link: pxe
slaves:
- pxe_nic01
networks:
- pxe
bond0:
device_link: data
slaves:
- data_nic01
- data_nic02
- data_nic03
- data_nic04
networks:
- oam
- storage
- overlay
- calico
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>100g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
platform:
kernel: 'hwe-16.04'
kernel_params:
console: 'ttyS1,115200n8'
metadata:
owner_data:
control-plane: enabled
ucp-control-plane: enabled
openstack-control-plane: enabled
openstack-heat: enabled
openstack-keystone: enabled
openstack-rabbitmq: enabled
openstack-dns-helper: enabled
openstack-mariadb: enabled
openstack-nova-control: enabled
# openstack-etcd: enabled
openstack-mistral: enabled
openstack-memcached: enabled
openstack-glance: enabled
openstack-horizon: enabled
openstack-cinder-control: enabled
openstack-cinder-volume: control
openstack-neutron: enabled
openvswitch: enabled
ucp-barbican: enabled
# ceph-mon: enabled
ceph-mgr: enabled
ceph-osd: enabled
ceph-mds: enabled
ceph-rgw: enabled
ucp-maas: enabled
kube-dns: enabled
tenant-ceph-control-plane: enabled
# tenant-ceph-mon: enabled
tenant-ceph-rgw: enabled
tenant-ceph-mgr: enabled
kubernetes-apiserver: enabled
kubernetes-controller-manager: enabled
# kubernetes-etcd: enabled
kubernetes-scheduler: enabled
tiller-helm: enabled
# kube-etcd: enabled
calico-policy: enabled
calico-node: enabled
# calico-etcd: enabled
ucp-armada: enabled
ucp-drydock: enabled
ucp-deckhand: enabled
ucp-shipyard: enabled
IAM: enabled
ucp-promenade: enabled
prometheus-server: enabled
prometheus-client: enabled
fluentd: enabled
influxdb: enabled
kibana: enabled
elasticsearch-client: enabled
elasticsearch-master: enabled
elasticsearch-data: enabled
postgresql: enabled
kube-ingress: enabled
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
...

@ -1,93 +0,0 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# The data plane host profile for Airship for DELL R720s, and should
# not need to be altered if you are using matching HW. The host profile is setup
# for cpu isolation (for nova pinning), hugepages, and sr-iov.
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: dp_r720
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: site
parentSelector:
hosttype: dp-global
actions:
- method: replace
path: .interfaces
- method: replace
path: .storage
- method: merge
path: .
data:
hardware_profile: dell_r720
primary_network: oam
interfaces:
pxe:
device_link: pxe
slaves:
- pxe_nic01
networks:
- pxe
bond0:
device_link: data
slaves:
- data_nic01
- data_nic02
- data_nic03
- data_nic04
networks:
- oam
- storage
- overlay
- calico
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>100g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
ephemeral:
partitions:
- name: 'nova'
size: '99%'
filesystem:
mountpoint: '/var/lib/nova'
fstype: 'ext4'
mount_options: 'defaults'
platform:
kernel: 'hwe-16.04'
kernel_params:
console: 'ttyS1,115200n8'
...

@ -1,55 +0,0 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# The purpose of this file is to define the drydock Region, which in turn drives
# the MaaS region.
schema: 'drydock/Region/v1'
metadata:
schema: 'metadata/Document/v1'
# NEWSITE-CHANGEME: Replace with the site name
name: airskiff
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
substitutions:
# NEWSITE-CHANGEME: Substitutions from deckhand SSH public keys into the
# list of authorized keys which MaaS will register for the build-in "ubuntu"
# account during the PXE process. Create a substitution rule for each SSH
# key that should have access to the "ubuntu" account (useful for trouble-
# shooting problems before UAM or UAM-lite is operational). SSH keys are
# stored as secrets in site/seaworthy/secrets.
- dest:
# Add/replace the first item in the list
path: .authorized_keys[0]
src:
schema: deckhand/PublicKey/v1
# This should match the "name" metadata of the SSH key which will be
# substituted, located in site/seaworthy/secrets folder.
name: airship_ssh_public_key
path: .
- dest:
path: .repositories.main_archive
src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.repositories.main_archive
# Second key example
#- dest:
# # Increment the list index
# path: .authorized_keys[1]
# src:
# schema: deckhand/PublicKey/v1
# # your ssh key
# name: MY_USER_ssh_public_key
# path: .
data:
tag_definitions: []
# This is the list of SSH keys which MaaS will register for the built-in
# "ubuntu" account during the PXE process. This list is populated by
# substitution, so the same SSH keys do not need to be repeated in multiple
# manifests.
authorized_keys: []
repositories:
remove_unlisted: true
...

@ -8,5 +8,5 @@ metadata:
name: airskiff
storagePolicy: cleartext
data:
site_type: single-node
site_type: skiff
...

@ -1,161 +0,0 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# The purpose of this file is to build the list of calico etcd nodes and the
# calico etcd certs for those nodes in the environment.
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-calico-etcd
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: kubernetes-calico-etcd-global
actions:
- method: merge
path: .
storagePolicy: cleartext
substitutions:
# Generate a list of control plane nodes (i.e. genesis node + master node
# list) on which calico etcd will run and will need certs. It is assumed
# that Airship sites will have 4 control plane nodes, so this should not need to
# change for a new site.
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .genesis.hostname
dest:
path: .values.nodes[0].name
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .masters[0].hostname
dest:
path: .values.nodes[1].name
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .masters[1].hostname
dest:
path: .values.nodes[2].name
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .masters[2].hostname
dest:
path: .values.nodes[3].name
# Certificate substitutions for the node names assembled on the above list.
# NEWSITE-CHANGEME: Per above, the number of substitutions should not need
# to change with a standard Airship deployment. However, the names of each
# deckhand certficiate should be updated with the correct hostnames for your
# environment. The ordering is important (Genesis is index 0, then master
# nodes in the order they are specified in common-addresses).
# Genesis hostname - cab23-r720-11
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-cab23-r720-11
path: .
dest:
path: .values.nodes[0].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-cab23-r720-11
path: .
dest:
path: .values.nodes[0].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-cab23-r720-11-peer
path: .
dest:
path: .values.nodes[0].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-cab23-r720-11-peer
path: .
dest:
path: .values.nodes[0].tls.peer.key
# master node 1 hostname - cab23-r720-12
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-cab23-r720-12
path: .
dest:
path: .values.nodes[1].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-cab23-r720-12
path: .
dest:
path: .values.nodes[1].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-cab23-r720-12-peer
path: .
dest:
path: .values.nodes[1].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-cab23-r720-12-peer
path: .
dest:
path: .values.nodes[1].tls.peer.key
# master node 2 hostname - cab23-r720-13
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-cab23-r720-13
path: .
dest:
path: .values.nodes[2].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-cab23-r720-13
path: .
dest:
path: .values.nodes[2].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-cab23-r720-13-peer
path: .
dest:
path: .values.nodes[2].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-cab23-r720-13-peer
path: .
dest:
path: .values.nodes[2].tls.peer.key
# master node 3 hostname - cab23-r720-14
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-cab23-r720-14
path: .
dest:
path: .values.nodes[3].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-cab23-r720-14
path: .
dest:
path: .values.nodes[3].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-cab23-r720-14-peer
path: .
dest:
path: .values.nodes[3].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-cab23-r720-14-peer
path: $
dest:
path: .values.nodes[3].tls.peer.key
data: {}
...

@ -1,165 +0,0 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# The purpose of this file is to build the list of k8s etcd nodes and the
# k8s etcd certs for those nodes in the environment.
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-etcd
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: kubernetes-etcd-global
actions:
- method: merge
path: .
storagePolicy: cleartext
substitutions:
# Generate a list of control plane nodes (i.e. genesis node + master node
# list) on which k8s etcd will run and will need certs. It is assumed
# that Airship sites will have 4 control plane nodes, so this should not need to
# change for a new site.
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .genesis.hostname
dest:
path: .values.nodes[0].name
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .masters[0].hostname
dest:
path: .values.nodes[1].name
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .masters[1].hostname
dest:
path: .values.nodes[2].name
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .masters[2].hostname
dest:
path: .values.nodes[3].name
# Certificate substitutions for the node names assembled on the above list.
# NEWSITE-CHANGEME: Per above, the number of substitutions should not need
# to change with a standard Airship deployment. However, the names of each
# deckhand certficiate should be updated with the correct hostnames for your
# environment. The ordering is important (Genesis is index 0, then master
# nodes in the order they are specified in common-addresses).
# Genesis Exception*
# *NOTE: This is an exception in that `genesis` is not the hostname of the
# genesis node, but `genesis` is reference here in the certificate names
# because of certain Promenade assumptions that may be addressed in the
# future. Therefore `genesis` is used instead of `cab23-r720-11` here.
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-genesis
path: .
dest:
path: .values.nodes[0].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-genesis
path: .
dest:
path: .values.nodes[0].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-genesis-peer
path: .
dest:
path: .values.nodes[0].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-genesis-peer
path: .
dest:
path: .values.nodes[0].tls.peer.key
# master node 1 hostname - cab23-r720-12
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-cab23-r720-12
path: .
dest:
path: .values.nodes[1].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-cab23-r720-12
path: .
dest:
path: .values.nodes[1].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-cab23-r720-12-peer
path: .
dest:
path: .values.nodes[1].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-cab23-r720-12-peer
path: .
dest:
path: .values.nodes[1].tls.peer.key
# master node 2 hostname - cab23-r720-13
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-cab23-r720-13
path: .
dest:
path: .values.nodes[2].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-cab23-r720-13
path: .
dest:
path: .values.nodes[2].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-cab23-r720-13-peer
path: .
dest:
path: .values.nodes[2].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-cab23-r720-13-peer
path: $
dest:
path: .values.nodes[2].tls.peer.key
# master node 3 hostname - cab23-r720-14
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-cab23-r720-14
path: .
dest:
path: .values.nodes[3].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-cab23-r720-14
path: .
dest:
path: .values.nodes[3].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-cab23-r720-14-peer
path: .
dest:
path: .values.nodes[3].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-cab23-r720-14-peer
path: $
dest:
path: .values.nodes[3].tls.peer.key
data: {}
...

@ -1,20 +0,0 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# The purpose of this file is to define the environment-specific public-facing
# VIP for the ingress controller
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ingress-kube-system
layeringDefinition:
abstract: false
layer: site
parentSelector:
ingress: kube-system
actions:
- method: merge
path: .
storagePolicy: cleartext
data: {}
...

@ -1,18 +0,0 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: elasticsearch
layeringDefinition:
abstract: false
layer: site
parentSelector:
hosttype: elasticsearch-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data: {}
...

@ -1,18 +0,0 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: fluentbit
layeringDefinition:
abstract: false
layer: site
parentSelector:
hosttype: fluentbit-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data: {}
...