Browse Source

site: Add Airskiff site

This change introduces Airskiff (see [0]), a development/learning environment
for the software delivery components of Airship, to the Airship-Treasuremap
repository. This change also adds a set of scripts accompanied by documentation
for easy deployment. During deployment, Armada, Deckhand, Pegleg, and Shipyard
are downloaded and built from source. Gate scripts from the OpenStack-Helm
project deploy a KubeADM-adminstered cluster. Armada deploys Armada, Deckhand,
and Shipyard into the cluster. The Airship components deploy OpenStack using
the documents provided by the Airskiff site. Airskiff is not safe for
production and should not be duplicated for production usage.

[0] https://github.com/mattmceuen/airskiff

Depends-On: https://review.openstack.org/#/c/613686/
Depends-On: https://review.openstack.org/#/c/614032/
Change-Id: Iae1efcca0812b98a9ad05aa8b869bdccfdb7e44b
changes/48/608348/51
Drew Walters 3 years ago
parent
commit
ba0d16dc52
  1. 307
      doc/source/airskiff.rst
  2. 1
      doc/source/index.rst
  3. 34
      site/airskiff/baremetal/bootactions/promjoin.yaml
  4. 256
      site/airskiff/baremetal/nodes.yaml
  5. 13
      site/airskiff/deployment/deployment-configuration.yaml
  6. 157
      site/airskiff/networks/common-addresses.yaml
  7. 302
      site/airskiff/networks/physical/networks.yaml
  8. 50
      site/airskiff/profiles/genesis.yaml
  9. 78
      site/airskiff/profiles/hardware/dell_r720.yaml
  10. 272
      site/airskiff/profiles/host/cp_r720.yaml
  11. 105
      site/airskiff/profiles/host/dp_r720.yaml
  12. 55
      site/airskiff/profiles/region.yaml
  13. 2806
      site/airskiff/secrets/certificates/certificates.yaml
  14. 135
      site/airskiff/secrets/certificates/ingress.yaml
  15. 12
      site/airskiff/secrets/passphrases/ceph_fsid.yaml
  16. 11
      site/airskiff/secrets/passphrases/ceph_swift_keystone_password.yaml
  17. 13
      site/airskiff/secrets/passphrases/ipmi_admin_password.yaml
  18. 12
      site/airskiff/secrets/passphrases/maas-region-key.yaml
  19. 11
      site/airskiff/secrets/passphrases/osh_barbican_oslo_db_password.yaml
  20. 11
      site/airskiff/secrets/passphrases/osh_barbican_oslo_messaging_admin_password.yaml
  21. 11
      site/airskiff/secrets/passphrases/osh_barbican_oslo_messaging_password.yaml
  22. 11
      site/airskiff/secrets/passphrases/osh_barbican_password.yaml
  23. 11
      site/airskiff/secrets/passphrases/osh_barbican_rabbitmq_erlang_cookie.yaml
  24. 11
      site/airskiff/secrets/passphrases/osh_cinder_oslo_db_password.yaml
  25. 11
      site/airskiff/secrets/passphrases/osh_cinder_oslo_messaging_admin_password.yaml
  26. 11
      site/airskiff/secrets/passphrases/osh_cinder_oslo_messaging_password.yaml
  27. 11
      site/airskiff/secrets/passphrases/osh_cinder_password.yaml
  28. 11
      site/airskiff/secrets/passphrases/osh_cinder_rabbitmq_erlang_cookie.yaml
  29. 11
      site/airskiff/secrets/passphrases/osh_glance_oslo_db_password.yaml
  30. 11
      site/airskiff/secrets/passphrases/osh_glance_oslo_messaging_admin_password.yaml
  31. 11
      site/airskiff/secrets/passphrases/osh_glance_oslo_messaging_password.yaml
  32. 11
      site/airskiff/secrets/passphrases/osh_glance_password.yaml
  33. 11
      site/airskiff/secrets/passphrases/osh_glance_rabbitmq_erlang_cookie.yaml
  34. 11
      site/airskiff/secrets/passphrases/osh_heat_oslo_db_password.yaml
  35. 11
      site/airskiff/secrets/passphrases/osh_heat_oslo_messaging_admin_password.yaml
  36. 11
      site/airskiff/secrets/passphrases/osh_heat_oslo_messaging_password.yaml
  37. 11
      site/airskiff/secrets/passphrases/osh_heat_password.yaml
  38. 11
      site/airskiff/secrets/passphrases/osh_heat_rabbitmq_erlang_cookie.yaml
  39. 11
      site/airskiff/secrets/passphrases/osh_heat_stack_user_password.yaml
  40. 11
      site/airskiff/secrets/passphrases/osh_heat_trustee_password.yaml
  41. 11
      site/airskiff/secrets/passphrases/osh_horizon_oslo_db_password.yaml
  42. 11
      site/airskiff/secrets/passphrases/osh_infra_elasticsearch_admin_password.yaml
  43. 11
      site/airskiff/secrets/passphrases/osh_infra_grafana_admin_password.yaml
  44. 11
      site/airskiff/secrets/passphrases/osh_infra_grafana_oslo_db_password.yaml
  45. 11
      site/airskiff/secrets/passphrases/osh_infra_grafana_oslo_db_session_password.yaml
  46. 11
      site/airskiff/secrets/passphrases/osh_infra_kibana_admin_password.yaml
  47. 11
      site/airskiff/secrets/passphrases/osh_infra_nagios_admin_password.yaml
  48. 11
      site/airskiff/secrets/passphrases/osh_infra_openstack_exporter_password.yaml
  49. 11
      site/airskiff/secrets/passphrases/osh_infra_oslo_db_admin_password.yaml
  50. 11
      site/airskiff/secrets/passphrases/osh_infra_oslo_db_exporter_password.yaml
  51. 11
      site/airskiff/secrets/passphrases/osh_infra_prometheus_admin_password.yaml
  52. 11
      site/airskiff/secrets/passphrases/osh_infra_rgw_s3_admin_access_key.yaml
  53. 11
      site/airskiff/secrets/passphrases/osh_infra_rgw_s3_admin_secret_key.yaml
  54. 11
      site/airskiff/secrets/passphrases/osh_infra_rgw_s3_elasticsearch_access_key.yaml
  55. 11
      site/airskiff/secrets/passphrases/osh_infra_rgw_s3_elasticsearch_secret_key.yaml
  56. 11
      site/airskiff/secrets/passphrases/osh_keystone_admin_password.yaml
  57. 11
      site/airskiff/secrets/passphrases/osh_keystone_ldap_password.yaml
  58. 11
      site/airskiff/secrets/passphrases/osh_keystone_oslo_db_password.yaml
  59. 11
      site/airskiff/secrets/passphrases/osh_keystone_oslo_messaging_admin_password.yaml
  60. 11
      site/airskiff/secrets/passphrases/osh_keystone_oslo_messaging_password.yaml
  61. 11
      site/airskiff/secrets/passphrases/osh_keystone_rabbitmq_erlang_cookie.yaml
  62. 11
      site/airskiff/secrets/passphrases/osh_neutron_oslo_db_password.yaml
  63. 11
      site/airskiff/secrets/passphrases/osh_neutron_oslo_messaging_admin_password.yaml
  64. 11
      site/airskiff/secrets/passphrases/osh_neutron_oslo_messaging_password.yaml
  65. 11
      site/airskiff/secrets/passphrases/osh_neutron_password.yaml
  66. 11
      site/airskiff/secrets/passphrases/osh_neutron_rabbitmq_erlang_cookie.yaml
  67. 11
      site/airskiff/secrets/passphrases/osh_nova_oslo_db_password.yaml
  68. 11
      site/airskiff/secrets/passphrases/osh_nova_oslo_messaging_admin_password.yaml
  69. 11
      site/airskiff/secrets/passphrases/osh_nova_oslo_messaging_password.yaml
  70. 11
      site/airskiff/secrets/passphrases/osh_nova_password.yaml
  71. 11
      site/airskiff/secrets/passphrases/osh_nova_rabbitmq_erlang_cookie.yaml
  72. 11
      site/airskiff/secrets/passphrases/osh_oslo_cache_secret_key.yaml
  73. 11
      site/airskiff/secrets/passphrases/osh_oslo_db_admin_password.yaml
  74. 11
      site/airskiff/secrets/passphrases/osh_oslo_db_exporter_password.yaml
  75. 11
      site/airskiff/secrets/passphrases/osh_placement_password.yaml
  76. 12
      site/airskiff/secrets/passphrases/tenant_ceph_fsid.yaml
  77. 12
      site/airskiff/secrets/passphrases/ubuntu_crypt_password.yaml
  78. 11
      site/airskiff/secrets/passphrases/ucp_airflow_postgres_password.yaml
  79. 11
      site/airskiff/secrets/passphrases/ucp_armada_keystone_password.yaml
  80. 11
      site/airskiff/secrets/passphrases/ucp_barbican_keystone_password.yaml
  81. 11
      site/airskiff/secrets/passphrases/ucp_barbican_oslo_db_password.yaml
  82. 11
      site/airskiff/secrets/passphrases/ucp_deckhand_keystone_password.yaml
  83. 11
      site/airskiff/secrets/passphrases/ucp_deckhand_postgres_password.yaml
  84. 11
      site/airskiff/secrets/passphrases/ucp_drydock_keystone_password.yaml
  85. 11
      site/airskiff/secrets/passphrases/ucp_drydock_postgres_password.yaml
  86. 11
      site/airskiff/secrets/passphrases/ucp_keystone_admin_password.yaml
  87. 11
      site/airskiff/secrets/passphrases/ucp_keystone_oslo_db_password.yaml
  88. 11
      site/airskiff/secrets/passphrases/ucp_maas_admin_password.yaml
  89. 11
      site/airskiff/secrets/passphrases/ucp_maas_postgres_password.yaml
  90. 11
      site/airskiff/secrets/passphrases/ucp_openstack_exporter_keystone_password.yaml
  91. 11
      site/airskiff/secrets/passphrases/ucp_oslo_db_admin_password.yaml
  92. 11
      site/airskiff/secrets/passphrases/ucp_oslo_messaging_password.yaml
  93. 11
      site/airskiff/secrets/passphrases/ucp_postgres_admin_password.yaml
  94. 11
      site/airskiff/secrets/passphrases/ucp_promenade_keystone_password.yaml
  95. 11
      site/airskiff/secrets/passphrases/ucp_rabbitmq_erlang_cookie.yaml
  96. 11
      site/airskiff/secrets/passphrases/ucp_shipyard_keystone_password.yaml
  97. 11
      site/airskiff/secrets/passphrases/ucp_shipyard_postgres_password.yaml
  98. 12
      site/airskiff/site-definition.yaml
  99. 161
      site/airskiff/software/charts/kubernetes/container-networking/etcd.yaml
  100. 165
      site/airskiff/software/charts/kubernetes/etcd/etcd.yaml

307
doc/source/airskiff.rst

@ -0,0 +1,307 @@
Airskiff
========
* Skiff (n): a shallow, flat-bottomed, open boat
* Airskiff (n): a learning development, and gating environment for Airship
What is Airskiff
----------------
Airskiff is an easy way to get started with the software delivery components
of Airship:
* `Armada`_
* `Deckhand`_
* `Pegleg`_
* `Shipyard`_
Airskiff is packaged with a set of deployment scripts modeled after the
`OpenStack-Helm project`_ for seamless developer setup.
These scripts:
* Download, build, and containerize the Airship components above from source.
* Deploy a Kubernetes cluster using KubeADM.
* Deploy Armada, Deckhand, and Shipyard using the latest `Armada image`_.
* Deploy OpenStack using the Airskiff site and charts from the
`OpenStack-Helm project`_.
.. warning:: Airskiff is not safe for production use. These scripts are
only intended to deploy a minimal development environment.
Common Deployment Requirements
------------------------------
This section covers actions that may be required for some deployment scenarios.
Passwordless sudo
~~~~~~~~~~~~~~~~~
Airskiff relies on scripts that utilize the ``sudo`` command. Throughout this
guide the assumption is that the user is: ``ubuntu``. It is advised to add the
following lines to ``/etc/sudoers``:
.. code-block:: bash
root ALL=(ALL) NOPASSWD: ALL
ubuntu ALL=(ALL) NOPASSWD: ALL
Proxy Configuration
~~~~~~~~~~~~~~~~~~~
.. note:: This section assumes you have properly defined the standard
``http_proxy``, ``https_proxy``, and ``no_proxy`` environment variables and
have followed the `Docker proxy guide`_ to create a systemd drop-in unit.
In order to deploy Airskiff behind proxy servers, define the following
environment variables:
.. code-block:: shell
export USE_PROXY=true
export PROXY=${http_proxy}
export no_proxy=${no_proxy},172.17.0.1,.svc.cluster.local
export NO_PROXY=${NO_PROXY},172.17.0.1,.svc.cluster.local
.. note:: The ``.svc.cluster.local`` address is required to allow the OpenStack
client to communicate without being routed through proxy servers. The IP
address ``172.17.0.1`` is the advertised IP address for the Kubernetes API
server. Replace the addresses if your configuration does not match the one
defined above.
Deploy Airskiff
---------------
Deploy Airskiff using the deployment scripts contained in the
``tools/deployment/airskiff`` directory of the `airship-treasuremap`_
repository.
.. note:: Scripts should be run from the root of ``airship-treasuremap``
repository.
Install required packages
~~~~~~~~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../tools/deployment/airskiff/developer/000-install-packages.sh
:language: shell
:lines: 1,18-
Alternatively, this step can be performed by running the script directly:
.. code-block:: shell
./tools/deployment/airskiff/developer/000-install-packages.sh
Restart your shell session
~~~~~~~~~~~~~~~~~~~~~~~~~~
At this point, restart your shell session to complete adding ``$USER`` to the
``docker`` group.
Build Airship components
~~~~~~~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../tools/deployment/airskiff/developer/005-make-airship.sh
:language: shell
:lines: 1,18-
Alternatively, this step can be performed by running the script directly:
.. code-block:: shell
./tools/deployment/airskiff/developer/005-make-airship.sh
Deploy Kubernetes with KubeADM
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../tools/deployment/airskiff/developer/010-deploy-k8s.sh
:language: shell
:lines: 1,18-
Alternatively, this step can be performed by running the script directly:
.. code-block:: shell
./tools/deployment/airskiff/developer/010-deploy-k8s.sh
Setup OpenStack Client
~~~~~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../tools/deployment/airskiff/developer/020-setup-client.sh
:language: shell
:lines: 1,18-
Alternatively, this step can be performed by running the script directly:
.. code-block:: shell
./tools/deployment/airskiff/developer/020-setup-client.sh
Deploy Airship components using Armada
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../tools/deployment/airskiff/developer/030-armada-bootstrap.sh
:language: shell
:lines: 1,18-
Alternatively, this step can be performed by running the script directly:
.. code-block:: shell
./tools/deployment/airskiff/developer/030-armada-bootstrap.sh
Deploy OpenStack using Airship
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../tools/deployment/airskiff/developer/100-deploy-osh.sh
:language: shell
:lines: 1,18-
Alternatively, this step can be performed by running the script directly:
.. code-block:: shell
./tools/deployment/airskiff/developer/100-deploy-osh.sh
Use Airskiff
------------
The Airskiff deployment scripts install and configure the OpenStack client for
usage on your host machine.
Airship Examples
~~~~~~~~~~~~~~~~
To use Airship services, set the ``OS_CLOUD`` environment variable to
``airship``.
.. code-block:: shell
export OS_CLOUD=airship
List the Airship service endpoints:
.. code-block:: shell
openstack endpoint list
.. note:: ``${SHIPYARD}`` is the path to a cloned `Shipyard`_ repository.
Run Helm tests for all deployed releases:
.. code-block:: shell
${SHIPYARD}/tools/shipyard.sh create action test_site
List all `Shipyard`_ actions:
.. code-block:: shell
${SHIPYARD}/tools/shipyard.sh get actions
For more information about Airship operations, see the
`Shipyard actions`_ documentation.
OpenStack Examples
~~~~~~~~~~~~~~~~~~
To use OpenStack services, set the ``OS_CLOUD`` environment variable to
``openstack``:
.. code-block:: shell
export OS_CLOUD=openstack
List the OpenStack service endpoints:
.. code-block:: shell
openstack endpoint list
List ``Glance`` images:
.. code-block:: shell
openstack image list
Issue a new ``Keystone`` token:
.. code-block:: shell
openstack token issue
.. note:: Airskiff deploys identity, network, cloudformation, placement,
compute, orchestration, and image services. You can deploy more services
by adding chart groups to
``site/airskiff/software/manifests/full-site.yaml``. For more information,
refer to the `site authoring and deployment guide`_.
Develop with Airskiff
---------------------
Once you have successfully deployed a running cluster, changes to Airship
and OpenStack components can be deployed using `Shipyard actions`_ or the
Airskiff deployment scripts.
This example demonstrates deploying `Armada`_ changes using the Airskiff
deployment scripts.
.. note:: ``${ARMADA}`` is the path to your cloned Armada repository that
contains the changes you wish to deploy. ``${TREASUREMAP}`` is the path to
your cloned Treasuremap repository.
Build Armada:
.. code-block:: shell
cd ${ARMADA}
make images
Update Airship components:
.. code-block:: shell
cd ${TREASUREMAP}
./tools/deployment/developer/airskiff/030-armada-bootstrap.sh
Troubleshooting
---------------
This section is intended to help you through the initial troubleshooting
process. If issues persist after following this guide, please join us on
`IRC`_: #airshipit (freenode)
``Missing value auth-url required for auth plugin password``
If this error message appears when using the OpenStack client, verify your
client is configured for authentication:
.. code-block:: shell
# For Airship services
export OS_CLOUD=airship
# For OpenStack services
export OS_CLOUD=openstack
.. _Docker proxy guide: https://docs.docker.com/config/daemon/systemd/
#httphttps-proxy
.. _OpenStack-Helm project: https://docs.openstack.org/openstack-helm/latest/
install/developer/requirements-and-host-config.html
.. _Armada: https://github.com/openstack/airship-armada
.. _Deckhand: https://github.com/openstack/airship-deckhand
.. _Pegleg: https://github.com/openstack/airship-pegleg
.. _Shipyard: https://github.com/openstack/airship-shipyard
.. _Armada image: https://quay.io/repository/airshipit/armada?tab=tags
.. _airship-treasuremap: https://github.com/openstack/airship-treasuremap
.. _Shipyard actions: https://airship-shipyard.readthedocs.io/en/latest/
action-commands.html
.. _IRC: irc://chat.freenode.net:6697/airshipit
.. _site authoring and deployment guide: https://
airship-treasuremap.readthedocs.io/en/latest/authoring_and_deployment.html

1
doc/source/index.rst

@ -194,6 +194,7 @@ Process Flows
authoring_and_deployment
seaworthy
airskiff
.. _Barbican: https://docs.openstack.org/barbican/latest/api/
.. _Helm Homepage: https://helm.sh/

34
site/airskiff/baremetal/bootactions/promjoin.yaml

@ -0,0 +1,34 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# This file defines a boot action which is responsible for fetching the node's
# promjoin script from the promenade API. This is the script responsible for
# installing kubernetes on the node and joining the kubernetes cluster.
# #GLOBAL-CANDIDATE#
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: promjoin
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: site
labels:
application: 'drydock'
data:
signaling: false
# TODO(alanmeadows) move what is global about this document
assets:
- path: /opt/promjoin.sh
type: file
permissions: '555'
# The ip= parameter must match the MaaS network name of the network used
# to contact kubernetes. With a standard, reference Airship deployment where
# L2 networks are shared between all racks, the network name (i.e. calico)
# should be correct.
location: promenade+http://promenade-api.ucp.svc.cluster.local/api/v1.0/join-scripts?design_ref={{ action.design_ref | urlencode }}&hostname={{ node.hostname }}&ip={{ node.network.calico.ip }}{% for k, v in node.labels.items() %}&labels.dynamic={{ k }}={{ v }}{% endfor %}
location_pipeline:
- template
data_pipeline:
- utf8_decode
...

256
site/airskiff/baremetal/nodes.yaml

@ -0,0 +1,256 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# Drydock BaremetalNode resources for a specific rack are stored in this file.
#
# NOTE: For new sites, you should complete the networks/physical/networks.yaml
# file before working on this file.
#
# In this file, you should make the number of `drydock/BaremetalNode/v1`
# resources equal the number of bare metal nodes you have, either by deleting
# excess BaremetalNode definitions (if there are too many), or by copying and
# pasting the last BaremetalNode in the file until you have the correct number
# of baremetal nodes (if there are too few).
#
# Then in each file, address all additional NEWSITE-CHANGEME markers to update
# the data in these files with the right values for your new site.
#
# *NOTE: The Genesis node is counted as one of the control plane nodes. Note
# that the Genesis node does not appear on this bare metal list, because the
# procedure to reprovision the Genesis host with MaaS has not yet been
# implemented. Therefore there will be only three bare metal nodes in this file
# with the 'masters' tag, as the genesis roles are assigned in a difference
# place (profiles/genesis.yaml).
# NOTE: The host profiles for the control plane are further divided into two
# variants: primary and secondary. The only significance this has is that the
# "primary" nodes are active Ceph nodes, whereas the "secondary" nodes are Ceph
# standby nodes. For Ceph quorum, this means that the control plane split will
# be 3 primary + 1 standby host profile, and the Genesis node counts toward one
# of the 3 primary profiles. Other control plane services are not affected by
# primary vs secondary designation.
#
# TODO: Include the hostname naming convention
#
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
# NEWSITE-CHANGEME: Replace with the hostname of the first node in the rack,
# after (excluding) genesis.
name: cab23-r720-12
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: The IPv4 address assigned to each logical network on this
# node. In the reference Airship deployment, this is all logical Networks defined
# in networks/physical/networks.yaml. IP addresses are manually assigned, by-hand.
# (what could possibly go wrong!) The instructions differ for each logical
# network, which are laid out below.
addressing:
# The iDrac/iLo IP of the node. It's important that this match up with the
# node's hostname above, so that the rack number and node position encoded
# in the hostname are accurate and matching the node that IPMI operations
# will be performed against (for poweron, poweroff, PXE boot to wipe disk or
# reconfigure identity, etc - very important to get right for these reasons).
# These addresses should already be assigned to nodes racked and stacked in
# the environment; these are not addresses which MaaS assigns.
- network: oob
address: 10.23.104.12
# The IP of the node on the PXE network. Refer to the static IP range
# defined for the PXE network in networks/physical/networks.yaml. Begin allocating
# IPs from this network, starting with the second IP (inclusive) from the
# allocation range of this subnet (Genesis node will have the first IP).
# Ex: If the start IP for the PXE "static" network is 10.23.20.11, then
# genesis will have 10.23.20.11, this node will have 10.23.20.12, and
# so on with incrementing IP addresses with each additional node.
- network: pxe
address: 10.23.20.12
# Genesis node gets first IP, all other nodes increment IPs from there
# within the allocation range defined for the network in
# networks/physical/networks.yaml
- network: oam
address: 10.23.21.12
# Genesis node gets first IP, all other nodes increment IPs from there
# within the allocation range defined for the network in
# networks/physical/networks.yaml
- network: storage
address: 10.23.23.12
# Genesis node gets first IP, all other nodes increment IPs from there
# within the allocation range defined for the network in
# networks/physical/networks.yaml
- network: overlay
address: 10.23.24.12
# Genesis node gets first IP, all other nodes increment IPs from there
# within the allocation range defined for the network in
# networks/physical/networks.yaml
- network: calico
address: 10.23.22.12
# NEWSITE-CHANGEME: Set the host profile for the node.
# Note that there are different host profiles depending if this is a control
# plane vs data plane node, and different profiles that map to different types
# hardware. Control plane host profiles are further broken down into "primary"
# and "secondary" profiles (refer to the Notes section at the top of this doc).
# Select the host profile that matches up to your type of
# hardware and function. E.g., the r720 here refers to Dell R720 hardware, the
# 'cp' refers to a control plane profile, and the "primary" means it will be
# an active member in the ceph quorum. Refer to profiles/host/ for the list
# of available host profiles specific to this site (otherwise, you may find
# a general set of host profiles at the "type" or "global" layers/folders.
# If you have hardware that is not on this list of profiles, you may need to
# create a new host profile for that hardware.
# Regarding control plane vs other data plane profiles, refer to the notes at
# the beginning of this file. There should be one control plane node per rack,
# including Genesis. Note Genesis won't actually be listed in this file as a
# BaremetalNode, but the rest are.
# This is the second "primary" control plane node after Genesis.
host_profile: cp_r720-primary
metadata:
tags:
# NEWSITE-CHANGEME: See previous comment. Apply 'masters' tag for control
# plane node, and 'workers' tag for data plane hosts.
- 'masters'
# NEWSITE-CHANGEME: Refer to site engineering package or other supporting
# documentation for the specific rack name. This should be a rack name that
# is meaningful to data center personnel (i.e. a rack they could locate if
# you gave them this rack designation).
rack: cab23
...
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
# NEWSITE-CHANGEME: The next node's hostname
name: cab23-r720-13
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: The next node's IPv4 addressing
addressing:
- network: oob
address: 10.23.104.13
- network: pxe
address: 10.23.20.13
- network: oam
address: 10.23.21.13
- network: storage
address: 10.23.23.13
- network: overlay
address: 10.23.24.13
- network: calico
address: 10.23.22.13
# NEWSITE-CHANGEME: The next node's host profile
host_profile: cp_r720-primary
metadata:
# NEWSITE-CHANGEME: The next node's rack designation
rack: cab23
# NEWSITE-CHANGEME: The next node's role desigatnion
tags:
- 'masters'
...
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
# NEWSITE-CHANGEME: The next node's hostname
name: cab23-r720-14
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: The next node's IPv4 addressing
addressing:
- network: oob
address: 10.23.104.14
- network: pxe
address: 10.23.20.14
- network: oam
address: 10.23.21.14
- network: storage
address: 10.23.23.14
- network: overlay
address: 10.23.24.14
- network: calico
address: 10.23.22.14
# NEWSITE-CHANGEME: The next node's host profile
# This is the third "primary" control plane profile after genesis
host_profile: dp_r720
metadata:
# NEWSITE-CHANGEME: The next node's rack designation
rack: cab23
# NEWSITE-CHANGEME: The next node's role desigatnion
tags:
- 'workers'
...
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
# NEWSITE-CHANGEME: The next node's hostname
name: cab23-r720-17
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: The next node's IPv4 addressing
addressing:
- network: oob
address: 10.23.104.17
- network: pxe
address: 10.23.20.17
- network: oam
address: 10.23.21.17
- network: storage
address: 10.23.23.17
- network: overlay
address: 10.23.24.17
- network: calico
address: 10.23.22.17
# NEWSITE-CHANGEME: The next node's host profile
# This is the one and only appearance of the "secondary" control plane profile
host_profile: dp_r720
metadata:
# NEWSITE-CHANGEME: The next node's rack designation
rack: cab23
# NEWSITE-CHANGEME: The next node's role desigatnion
tags:
- 'workers'
...
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
# NEWSITE-CHANGEME: The next node's hostname
name: cab23-r720-19
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: The next node's IPv4 addressing
addressing:
- network: oob
address: 10.23.104.19
- network: pxe
address: 10.23.20.19
- network: oam
address: 10.23.21.19
- network: storage
address: 10.23.23.19
- network: overlay
address: 10.23.24.19
- network: calico
address: 10.23.22.19
# NEWSITE-CHANGEME: The next node's host profile
host_profile: dp_r720
metadata:
# NEWSITE-CHANGEME: The next node's rack designation
rack: cab23
# NEWSITE-CHANGEME: The next node's role desigatnion
tags:
- 'workers'
...

13
site/airskiff/deployment/deployment-configuration.yaml

@ -0,0 +1,13 @@
---
schema: shipyard/DeploymentConfiguration/v1
metadata:
schema: metadata/Document/v1
name: deployment-configuration
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
armada:
manifest: full-site
...

157
site/airskiff/networks/common-addresses.yaml

@ -0,0 +1,157 @@
---
schema: pegleg/CommonAddresses/v1
metadata:
schema: metadata/Document/v1
name: common-addresses
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
calico:
# NEWSITE-CHANGEME: The interface that calico will use. Update if your
# logical bond interface name or calico VLAN have changed from the reference
# site design.
# This should be whichever
# bond and VLAN number specified in networks/physical/networks.yaml for the
# Calico
# network. E.g. VLAN 22 for the calico network as a member of bond0, you
# would set "interface=bond0.22" as shown here.
ip_autodetection_method: interface=bond0.22
etcd:
# etcd service IP address
service_ip: 10.96.232.136
dns:
# Kubernetes cluster domain. Do not change. This is internal to the cluster.
cluster_domain: cluster.local
# DNS service ip
service_ip: 10.96.0.10
# List of upstream DNS forwards. Verify you can reach them from your
# environment. If so, you should not need to change them.
upstream_servers:
- 8.8.8.8
- 8.8.4.4
- 208.67.222.222
# Repeat the same values as above, but formatted as a common separated
# string
upstream_servers_joined: 8.8.8.8,8.8.4.4,208.67.222.222
# NEWSITE-CHANGEME: FQDN for ingress (i.e. "publicly facing" access point)
# Choose FQDN according to the ingress/public FQDN naming conventions at
# the top of this document.
ingress_domain: openstack.svc.cluster.local
genesis:
# NEWSITE-CHANGEME: Update with the hostname for the node which will take on
# the Genesis role. Refer to the hostname naming stardards in
# networks/physical/networks.yaml
# NOTE: Ensure that the genesis node is manually configured with this
# hostname before running `genesis.sh` on the node.
hostname: cab23-r720-11
# NEWSITE-CHANGEME: Calico IP of the Genesis node. Use the "start" value for
# the calico network defined in networks/physical/networks.yaml for this IP.
ip: 10.23.22.11
bootstrap:
# NEWSITE-CHANGEME: Update with the "start" value/IP of the static range
# defined for the pxe network in networks/physical/networks.yaml
ip: 10.23.20.11
kubernetes:
# K8s API service IP
api_service_ip: 10.96.0.1
# etcd service IP
etcd_service_ip: 10.96.0.2
# k8s pod CIDR (network which pod traffic will traverse)
pod_cidr: 10.97.0.0/16
# k8s service CIDR (network which k8s API traffic will traverse)
service_cidr: 10.96.0.0/16
# misc k8s port settings
apiserver_port: 6443
haproxy_port: 6553
service_node_port_range: 30000-32767
# etcd port settings
etcd:
container_port: 2379
haproxy_port: 2378
# NEWSITE-CHANGEME: A list of nodes (apart from Genesis) which act as the
# control plane servers. Ensure that this matches the nodes with the 'masters'
# tags applied in baremetal/nodes.yaml
masters:
- hostname: cab23-r720-12
- hostname: cab23-r720-13
- hostname: cab23-r720-14
# NEWSITE-CHANGEME: Environment proxy information.
# NOTE: Reference Airship sites do not deploy behind a proxy, so this proxy
# section
# should be commented out.
# However if you are in a lab that requires proxy, ensure that these proxy
# settings are correct and reachable in your environment; otherwise update
# them with the correct values for your environment.
proxy:
http: ""
https: ""
no_proxy: []
node_ports:
drydock_api: 30000
maas_api: 30001
maas_proxy: 31800 # hardcoded in MAAS
shipyard_api: 30003
airflow_web: 30004
ntp:
# comma separated NTP server list. Verify that these upstream NTP servers
# are
# reachable in your environment; otherwise update them with the correct
# values for your environment.
servers_joined: '0.ubuntu.pool.ntp.org,1.ubuntu.pool.ntp.org,2.ubuntu.pool.ntp.org,4.ubuntu.pool.ntp.org'
# NOTE: This will be updated soon
ldap:
# NEWSITE-CHANGEME: FQDN for LDAP. Update to the FQDN that is
# relevant for your type of deployment (test vs prod values, etc).
base_url: 'ldap.example.com'
# NEWSITE-CHANGEME: As above, with the protocol included to create a full
# URI
url: 'ldap://ldap.example.com'
# NEWSITE-CHANGEME: Update to the correct expression relevant for this
# deployment (test vs prod values, etc)
auth_path: DC=test,DC=test,DC=com?sAMAccountName?sub?memberof=CN=test,OU=Application,OU=Groups,DC=test,DC=test,DC=com
# NEWSITE-CHANGEME: Update to the correct AD group that contains the users
# relevant for this deployment (test users vs prod users/values, etc)
common_name: test
# NEWSITE-CHANGEME: Update to the correct subdomain for your type of
# deployment (test vs prod values, etc)
subdomain: test
# NEWSITE-CHANGEME: Update to the correct domain for your type of
# deployment (test vs prod values, etc)
domain: example
storage:
ceph:
# NEWSITE-CHANGEME: CIDRs for Ceph. Update to match the network CIDR
# used for the `storage` network in networks/physical/networks.yaml
public_cidr: '10.23.23.0/24'
cluster_cidr: '10.23.23.0/24'
neutron:
# NEWSITE-CHANGEME: Overlay network for VM traffic. Ensure the bond name and
# VLAN number are consistent with what's defined for the bond and the
# overlay
# network in networks/physical/networks.yaml
tunnel_device: 'docker0'
# bond which the overlay is a member of. Ensure the bond name is consistent
# with the bond assigned to the overlay network in
# networks/physical/networks.yaml
external_iface: 'docker0'
openvswitch:
# bond which the overlay is a member of. Ensure the bond name is consistent
# with the bond assigned to the overlay network in
# networks/physical/networks.yaml
external_iface: 'docker0'
...

302
site/airskiff/networks/physical/networks.yaml

@ -0,0 +1,302 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# The purpose of this file is to define all of the NetworkLinks (i.e. layer 1
# devices) and Networks (i.e. layer 3 configurations). The following is standard
# for the logical networks in Airship:
#
# +----------+-----------------------------------+----------------+--------------+----------------------------------------------------+-----------------+
# | Network | | Per-rack or | | | VLAN tagged |
# | Name | Purpose | per-site CIDR? | Has gateway? | Bond | or untagged? |
# +----------+-----------------------------------+----------------+--------------+----------------------------------------------------+-----------------+
# | oob | Out of Band devices (iDrac/iLo) | per-site CIDR | Has gateway | No bond, N/A | Untagged/Native |
# | pxe | PXE boot network | per-site CIDR | No gateway | No bond, no LACP fallback. Dedicated PXE interface | Untagged/Native |
# | oam | management network | per-site CIDR | Has gateway | member of bond0 | tagged |
# | storage | storage network | per-site CIDR | No gateway | member of bond0 | tagged |
# | calico | underlay calico net; k8s traffic | per-site CIDR | No gateway | member of bond0 | tagged |
# | overlay | overlay network for openstack SDN | per-site CIDR | No gateway | member of bond0 | tagged |
# +----------+-----------------------------------+----------------+--------------+----------------------------------------------------+-----------------+
#
# For standard Airship deployments, you should not need to modify the number of
# NetworkLinks and Networks in this file. Only the IP addresses and CIDRs should
# need editing.
#
# TODO: Given that we expect all network broadcast domains to span all racks in
# Airship, we should choose network names that do not include the rack number.
#
# TODO: FQDN naming standards for hosts
#
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# MaaS doesnt own this network like it does the others, so the noconfig label
# is specified.
labels:
noconfig: enabled
bonding:
mode: disabled
mtu: 1500
linkspeed: auto
trunking:
mode: disabled
default_network: oob
allowed_networks:
- oob
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Update with the site's out-of-band CIDR
cidr: 10.23.104.0/24
routes:
# NEWSITE-CHANGEME: Update with the site's out-of-band gateway IP
- subnet: '0.0.0.0/0'
gateway: 10.23.104.1
metric: 100
# NEWSITE-CHANGEME: Update with the site's out-of-band IP allocation range
# FIXME: Is this IP range actually used/allocated for anything? The HW already
# has its OOB IPs assigned. None of the Ubuntu OS's should need IPs on OOB
# network either, as they should be routable via the default gw on OAM network
ranges:
- type: static
start: 10.23.104.11
end: 10.23.104.21
...
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
bonding:
mode: disabled
mtu: 1500
linkspeed: auto
trunking:
mode: disabled
default_network: pxe
allowed_networks:
- pxe
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Update with the site's PXE network CIDR
# NOTE: The CIDR minimum size = (number of nodes * 2) + 10
cidr: 10.23.20.0/24
routes:
- subnet: 0.0.0.0/0
# NEWSITE-CHANGEME: Set the OAM network gateway IP address
gateway: 10.23.20.1
metric: 100
# NOTE: The first 10 IPs in the subnet are reserved for network infrastructure.
# The remainder of the range is divided between two subnets of equal size:
# one static, and one DHCP.
# The DHCP addresses are used when nodes perform a PXE boot (DHCP address gets
# assigned), and when a node is commissioning in MaaS (also uses DHCP to get
# its IP address). However, when MaaS installs the operating system
# ("Deploying/Deployed" states), it will write a static IP assignment to
# /etc/network/interfaces[.d] with IPs from the "static" subnet defined here.
ranges:
# NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
- type: reserved
start: 10.23.20.1
end: 10.23.20.10
# NEWSITE-CHANGEME: Update to the first half of the remaining range after
# excluding the 10 reserved IPs.
- type: static
start: 10.23.20.11
end: 10.23.20.21
# NEWSITE-CHANGEME: Update to the second half of the remaining range after
# excluding the 10 reserved IPs.
- type: dhcp
start: 10.23.20.40
end: 10.23.20.80
dns:
# NEWSITE-CHANGEME: FQDN for bare metal nodes.
# Choose FQDN according to the node FQDN naming conventions at the top of
# this document.
domain: atlantafoundry.com
# List of upstream DNS forwards. Verify you can reach them from your
# environment. If so, you should not need to change them.
# TODO: This should be populated via substitution from common-addresses
servers: '8.8.8.8,8.8.4.4,208.67.222.222'
...
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: data
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
bonding:
mode: 802.3ad
hash: layer3+4
peer_rate: fast
mon_rate: 100
up_delay: 1000
down_delay: 3000
# NEWSITE-CHANGEME: Ensure the network switches in the environment are
# configured for this MTU or greater. Even if switches are configured for or
# can support a slightly higher MTU, there is no need (and negliable benefit)
# to squeeze every last byte into the MTU (e.g., 9216 vs 9100). Leave MTU at
# 9100 for maximum compatibility.
mtu: 9100
linkspeed: auto
trunking:
mode: 802.1q
allowed_networks:
- oam
- storage
- overlay
- calico
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: oam
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Set the VLAN ID which the OAM network is on
vlan: '21'
mtu: 9100
# NEWSITE-CHANGEME: Set the CIDR for the OAM network
# NOTE: The CIDR minimum size = number of nodes + 10
cidr: 10.23.21.0/24
routes:
- subnet: 0.0.0.0/0
# NEWSITE-CHANGEME: Set the OAM network gateway IP address
gateway: 10.23.21.1
metric: 100
ranges:
# NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
- type: reserved
start: 10.23.21.1
end: 10.23.21.10
# NEWSITE-CHANGEME: Update to the remaining range after excluding the 10
# 10 reserved IPs.
- type: static
start: 10.23.21.11
end: 10.23.21.21
dns:
# NEWSITE-CHANGEME: FQDN for bare metal nodes.
# Choose FQDN according to the node FQDN naming conventions at the top of
# this document.
domain: atlantafoundry.com
# List of upstream DNS forwards. Verify you can reach them from your
# environment. If so, you should not need to change them.
# TODO: This should be populated via substitution from common-addresses
servers: '8.8.8.8,8.8.4.4,208.67.222.222'
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: storage
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Set the VLAN ID which the storage network is on
vlan: '23'
mtu: 9100
# NEWSITE-CHANGEME: Set the CIDR for the storage network
# NOTE: The CIDR minimum size = number of nodes + 10
cidr: 10.23.23.0/24
ranges:
# NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
- type: reserved
start: 10.23.23.1
end: 10.23.23.10
# NEWSITE-CHANGEME: Update to the remaining range after excluding the 10
# 10 reserved IPs.
- type: static
start: 10.23.23.11
end: 10.23.23.21
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: overlay
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Set the VLAN ID which the overlay network is on
vlan: '24'
mtu: 9100
# NEWSITE-CHANGEME: Set the CIDR for the overlay network
# NOTE: The CIDR minimum size = number of nodes + 10
cidr: 10.23.24.0/24
ranges:
# NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
- type: reserved
start: 10.23.24.1
end: 10.23.24.10
# NEWSITE-CHANGEME: Update to the remaining range after excluding the 10
# 10 reserved IPs.
- type: static
start: 10.23.24.11
end: 10.23.24.21
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: calico
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Set the VLAN ID which the calico network is on
vlan: '22'
mtu: 9100
# NEWSITE-CHANGEME: Set the CIDR for the calico network
# NOTE: The CIDR minimum size = number of nodes + 10
cidr: 10.23.22.0/24
ranges:
# NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
- type: reserved
start: 10.23.22.1
end: 10.23.22.10
# NEWSITE-CHANGEME: Update to the remaining range after excluding the 10
# 10 reserved IPs.
- type: static
start: 10.23.22.11
end: 10.23.22.21
...

50
site/airskiff/profiles/genesis.yaml

@ -0,0 +1,50 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# The purpose of this file is to apply proper labels to Genesis node so the
# proper services are installed and proper configuration applied. This should
# not need to be changed for a new site.
# #GLOBAL-CANDIDATE#
schema: promenade/Genesis/v1
metadata:
schema: metadata/Document/v1
name: genesis-site
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: genesis-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
labels:
dynamic:
- beta.kubernetes.io/fluentd-ds-ready=true
- calico-etcd=enabled
- ceph-mds=enabled
- ceph-mon=enabled
- ceph-osd=enabled
- ceph-rgw=enabled
- ceph-mgr=enabled
- ceph-bootstrap=enabled
- tenant-ceph-control-plane=enabled
- tenant-ceph-mon=enabled
- tenant-ceph-rgw=enabled
- tenant-ceph-mgr=enabled
- kube-dns=enabled
- kube-ingress=enabled
- kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled
- kubernetes-scheduler=enabled
- promenade-genesis=enabled
- ucp-control-plane=enabled
- maas-control-plane=enabled
- ceph-osd-bootstrap=enabled
- openstack-control-plane=enabled
- openvswitch=enabled
- openstack-l3-agent=enabled
- node-exporter=enabled
...

78
site/airskiff/profiles/hardware/dell_r720.yaml

@ -0,0 +1,78 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: dell_r720
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
# Vendor of the server chassis
vendor: DELL
# Generation of the chassis model
generation: '8'
# Version of the chassis model within its generation - not version of the hardware definition
hw_version: '3'
# The certified version of the chassis BIOS
bios_version: '2.2.3'
# Mode of the default boot of hardware - bios, uefi
boot_mode: bios
# Protocol of boot of the hardware - pxe, usb, hdd
bootstrap_protocol: pxe
# Which interface to use for network booting within the OOB manager, not OS device
pxe_interface: 0
# Map hardware addresses to aliases/roles to allow a mix of hardware configs
# in a site to result in a consistent configuration
device_aliases:
## network
# eno1
pxe_nic01:
address: '0000:01:00.0'
# type could identify expected hardware - used for hardware manifest validation
dev_type: 'I350 Gigabit Network Connection'
bus_type: 'pci'
# enp67s0f0
data_nic01:
address: '0000:43:00.0'
dev_type: 'Ethernet 10G 2P X520 Adapter'
bus_type: 'pci'
# enp67s0f1
data_nic02:
address: '0000:43:00.1'
dev_type: 'Ethernet 10G 2P X520 Adapter'
bus_type: 'pci'
# enp68s0f0
data_nic03:
address: '0000:44:00.0'
dev_type: 'Ethernet 10G 2P X520 Adapter'
bus_type: 'pci'
# enp68s0f1
data_nic04:
address: '0000:44:00.1'
dev_type: 'Ethernet 10G 2P X520 Adapter'
bus_type: 'pci'
## storage
# /dev/sda
bootdisk:
address: '0:2.0.0'
dev_type: 'PERC H710P'
bus_type: 'scsi'
# /dev/sdb
cephjournal1:
address: '0:2.1.0'
dev_type: 'PERC H710P'
bus_type: 'scsi'
# /dev/sdc
cephjournal2:
address: '0:2.2.0'
dev_type: 'PERC H710P'
bus_type: 'scsi'
# /dev/sdc
ephemeral:
address: '0:2.3.0'
dev_type: 'PERC H710P'
bus_type: 'scsi'
...

272
site/airskiff/profiles/host/cp_r720.yaml

@ -0,0 +1,272 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# The primary control plane host profile for Airship for DELL R720s, and
# should not need to be altered if you are using matching HW. The active
# participants in the Ceph cluster run on this profile. Other control plane
# services are not affected by primary vs secondary designation.
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: cp_r720-primary
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: site
parentSelector:
hosttype: cp-global
actions:
- method: replace
path: .interfaces
- method: replace
path: .storage
- method: merge
path: .
data:
hardware_profile: dell_r720
primary_network: oam
interfaces:
pxe:
device_link: pxe
slaves:
- pxe_nic01
networks:
- pxe
bond0:
device_link: data
slaves:
- data_nic01
- data_nic02
- data_nic03
- data_nic04
networks:
- oam
- storage
- overlay
- calico
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>100g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
cephjournal1:
partitions:
- name: 'ceph-j1'
size: '10g'
- name: 'ceph-j2'
size: '10g'
- name: 'ceph-j3'
size: '10g'
- name: 'ceph-j4'
size: '10g'
cephjournal2:
partitions:
- name: 'ceph-j5'
size: '10g'
- name: 'ceph-j6'
size: '10g'
- name: 'ceph-j7'
size: '10g'
- name: 'ceph-j8'
size: '10g'
platform:
kernel: 'hwe-16.04'
kernel_params:
console: 'ttyS1,115200n8'
metadata:
owner_data:
openstack-l3-agent: enabled
...
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: cp_r740-secondary
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: site
parentSelector:
hosttype: cp-global
actions:
- method: replace
path: .interfaces
- method: replace
path: .storage
- method: replace
path: .metadata.owner_data
- method: merge
path: .
data:
hardware_profile: dell_r720
primary_network: oam
interfaces:
pxe:
device_link: pxe
slaves:
- pxe_nic01
networks:
- pxe
bond0:
device_link: data
slaves:
- data_nic01
- data_nic02
- data_nic03
- data_nic04
networks:
- oam
- storage
- overlay
- calico
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>100g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
cephjournal1:
partitions:
- name: 'ceph-j1'
size: '10g'
- name: 'ceph-j2'
size: '10g'
- name: 'ceph-j3'
size: '10g'
- name: 'ceph-j4'
size: '10g'
cephjournal2:
partitions:
- name: 'ceph-j5'
size: '10g'
- name: 'ceph-j6'
size: '10g'
- name: 'ceph-j7'
size: '10g'
- name: 'ceph-j8'
size: '10g'
platform:
kernel: 'hwe-16.04'
kernel_params:
console: 'ttyS1,115200n8'
metadata:
owner_data:
control-plane: enabled
ucp-control-plane: enabled
openstack-control-plane: enabled
openstack-heat: enabled
openstack-keystone: enabled
openstack-rabbitmq: enabled
openstack-dns-helper: enabled
openstack-mariadb: enabled
openstack-nova-control: enabled
# openstack-etcd: enabled
openstack-mistral: enabled
openstack-memcached: enabled
openstack-glance: enabled
openstack-horizon: enabled
openstack-cinder-control: enabled
openstack-cinder-volume: control
openstack-neutron: enabled
openvswitch: enabled
ucp-barbican: enabled
# ceph-mon: enabled
ceph-mgr: enabled
ceph-osd: enabled
ceph-mds: enabled
ceph-rgw: enabled
ucp-maas: enabled
kube-dns: enabled
tenant-ceph-control-plane: enabled
# tenant-ceph-mon: enabled
tenant-ceph-rgw: enabled
tenant-ceph-mgr: enabled
kubernetes-apiserver: enabled
kubernetes-controller-manager: enabled
# kubernetes-etcd: enabled
kubernetes-scheduler: enabled
tiller-helm: enabled
# kube-etcd: enabled
calico-policy: enabled
calico-node: enabled
# calico-etcd: enabled
ucp-armada: enabled
ucp-drydock: enabled
ucp-deckhand: enabled
ucp-shipyard: enabled
IAM: enabled
ucp-promenade: enabled
prometheus-server: enabled
prometheus-client: enabled
fluentd: enabled
influxdb: enabled
kibana: enabled
elasticsearch-client: enabled
elasticsearch-master: enabled
elasticsearch-data: enabled
postgresql: enabled
kube-ingress: enabled
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
...

105
site/airskiff/profiles/host/dp_r720.yaml

@ -0,0 +1,105 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# The data plane host profile for Airship for DELL R720s, and should
# not need to be altered if you are using matching HW. The host profile is setup
# for cpu isolation (for nova pinning), hugepages, and sr-iov.
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: dp_r720
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: site
parentSelector:
hosttype: dp-global
actions:
- method: replace
path: .interfaces
- method: replace
path: .storage
- method: merge
path: .
data:
hardware_profile: dell_r720
primary_network: oam
interfaces:
pxe:
device_link: pxe
slaves:
- pxe_nic01
networks:
- pxe
bond0:
device_link: data
slaves:
- data_nic01
- data_nic02
- data_nic03
- data_nic04
networks:
- oam
- storage
- overlay
- calico
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>100g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
cephjournal1:
partitions:
- name: 'ceph-j1'
size: '10g'
- name: 'ceph-j2'
size: '10g'
cephjournal2:
partitions:
- name: 'ceph-j3'
size: '10g'
- name: 'ceph-j4'
size: '10g'
ephemeral:
partitions:
- name: 'nova'
size: '99%'
filesystem:
mountpoint: '/var/lib/nova'
fstype: 'ext4'
mount_options: 'defaults'
platform:
kernel: 'hwe-16.04'
kernel_params:
console: 'ttyS1,115200n8'
...

55
site/airskiff/profiles/region.yaml

@ -0,0 +1,55 @@
---
# NOTE: This file is ignored by Airskiff and is copied from the seaworthy site.
# The purpose of this file is to define the drydock Region, which in turn drives
# the MaaS region.
schema: 'drydock/Region/v1'
metadata:
schema: 'metadata/Document/v1'
# NEWSITE-CHANGEME: Replace with the site name
name: airship-seaworthy
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
substitutions:
# NEWSITE-CHANGEME: Substitutions from deckhand SSH public keys into the
# list of authorized keys which MaaS will register for the build-in "ubuntu"
# account during the PXE process. Create a substitution rule for each SSH
# key that should have access to the "ubuntu" account (useful for trouble-
# shooting problems before UAM or UAM-lite is operational). SSH keys are
# stored as secrets in site/airship-seaworthy/secrets.
- dest:
# Add/replace the first item in the list
path: .authorized_keys[0]
src:
schema: deckhand/PublicKey/v1
# This should match the "name" metadata of the SSH key which will be
# substituted, located in site/airship-seaworthy/secrets folder.
name: airship_ssh_public_key
path: .
- dest:
path: .repositories.main_archive
src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.repositories.main_archive
# Second key example
#- dest:
# # Increment the list index
# path: .authorized_keys[1]
# src:
# schema: deckhand/PublicKey/v1
# # your ssh key
# name: MY_USER_ssh_public_key
# path: .
data:
tag_definitions: []
# This is the list of SSH keys which MaaS will register for the built-in
# "ubuntu" account during the PXE process. This list is populated by
# substitution, so the same SSH keys do not need to be repeated in multiple
# manifests.
authorized_keys: []
repositories:
remove_unlisted: true
...

2806
site/airskiff/secrets/certificates/certificates.yaml
File diff suppressed because it is too large
View File

135
site/airskiff/secrets/certificates/ingress.yaml

@ -0,0 +1,135 @@
---
# Example manifest for ingress cert.
# Shall be replaced with proper/valid set.
# Self-signed certs are not supported.
metadata:
layeringDefinition:
abstract: false
layer: site
name: ingress-crt