Clean up v1 structure

Change-Id: I12feb1db2ef4ffe58be00f0c290b37e7f184efd6
This commit is contained in:
Dmitry Ukov 2019-10-02 09:56:10 +00:00
parent c42bbc7333
commit d247bb6057
845 changed files with 2 additions and 58045 deletions

View File

@ -1,10 +0,0 @@
[style]
based_on_style = pep8
spaces_before_comment = 2
column_limit = 79
blank_line_before_nested_class_or_def = false
blank_line_before_module_docstring = true
split_before_logical_operator = true
split_before_first_argument = true
allow_split_before_dict_value = false
split_before_arithmetic_operator = true

View File

@ -11,29 +11,12 @@
# limitations under the License.
- project:
templates:
- docs-on-readthedocs
vars:
rtd_webhook_id: '47687'
rtd_project_name: 'airship-treasuremap'
check:
jobs:
- treasuremap-seaworthy-site-lint
- treasuremap-seaworthy-virt-site-lint
- treasuremap-airskiff-ubuntu-site-lint
- treasuremap-airskiff-suse-site-lint
- treasuremap-airsloop-site-lint
- treasuremap-aiab-site-lint
- treasuremap-airskiff-deployment-ubuntu
- treasuremap-airskiff-deployment-suse
- noop
gate:
jobs:
- treasuremap-seaworthy-site-lint
- treasuremap-seaworthy-virt-site-lint
- treasuremap-airskiff-ubuntu-site-lint
- treasuremap-airskiff-suse-site-lint
- treasuremap-airsloop-site-lint
- treasuremap-aiab-site-lint
- noop
post:
jobs:
- treasuremap-upload-git-mirror
@ -45,156 +28,6 @@
- name: ubuntu-bionic
label: ubuntu-bionic
- job:
name: treasuremap-site-lint
description:
Lint a site using Pegleg. Default site is seaworthy.
nodeset: treasuremap-single-node
timeout: 900
pre-run:
- tools/gate/playbooks/install-docker.yaml
- tools/gate/playbooks/git-config.yaml
run: tools/gate/playbooks/site-lint.yaml
vars:
site: seaworthy
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- job:
name: treasuremap-seaworthy-site-lint
description: |
Lint the seaworthy site using Pegleg.
parent: treasuremap-site-lint
vars:
site: seaworthy
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^site/seaworthy-virt/.*$
- ^site/airskiff/.*$
- ^site/airsloop/.*$
- ^site/aiab/.*$
- job:
name: treasuremap-seaworthy-virt-site-lint
description: |
Lint the seaworthy site using Pegleg.
parent: treasuremap-site-lint
vars:
site: seaworthy-virt
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^site/seaworthy/.*$
- ^site/airskiff/.*$
- ^site/airsloop/.*$
- ^site/aiab/.*$
- job:
name: treasuremap-airskiff-ubuntu-site-lint
description: |
Lint the airskiff site using Pegleg.
parent: treasuremap-site-lint
vars:
site: airskiff
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^site/seaworthy/.*$
- ^site/seaworthy-virt/.*$
- ^site/airsloop/.*$
- ^site/aiab/.*$
- job:
name: treasuremap-airskiff-suse-site-lint
description: |
Lint the airskiff-suse site using Pegleg.
parent: treasuremap-site-lint
vars:
site: airskiff-suse
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^site/seaworthy/.*$
- ^site/seaworthy-virt/.*$
- ^site/airsloop/.*$
- ^site/aiab/.*$
- job:
name: treasuremap-airsloop-site-lint
description: |
Lint the airsloop site using Pegleg.
parent: treasuremap-site-lint
vars:
site: airsloop
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^site/seaworthy/.*$
- ^site/seaworthy-virt/.*$
- ^site/airskiff/.*$
- ^site/aiab/.*$
- job:
name: treasuremap-aiab-site-lint
description: |
Lint the aiab site using Pegleg.
parent: treasuremap-site-lint
pre-run:
- tools/gate/playbooks/generate-certs.yaml
vars:
site: aiab
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^site/seaworthy/.*$
- ^site/seaworthy-virt/.*$
- ^site/airskiff/.*$
- ^site/airsloop/.*$
- job:
name: treasuremap-airskiff-deployment-ubuntu
nodeset: treasuremap-single-node
description: |
Deploy Memcached using Airskiff and latest Treasuremap changes.
voting: false
timeout: 9600
pre-run:
- tools/gate/playbooks/git-config.yaml
- tools/gate/playbooks/airskiff-reduce-site.yaml
run: tools/gate/playbooks/airskiff-deploy-gate.yaml
post-run: tools/gate/playbooks/debug-report.yaml
vars:
site: airskiff
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^site/seaworthy/.*$
- ^site/airsloop/.*$
- ^site/aiab/.*$
- job:
name: treasuremap-airskiff-deployment-suse
nodeset: treasuremap-single-node
description: |
Deploy Memcached using Airskiff-suse and latest Treasuremap changes.
voting: false
timeout: 9600
pre-run:
- tools/gate/playbooks/git-config.yaml
- tools/gate/playbooks/airskiff-reduce-site.yaml
run: tools/gate/playbooks/airskiff-deploy-gate.yaml
vars:
site: airskiff-suse
post-run: tools/gate/playbooks/debug-report.yaml
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^site/seaworthy/.*$
- ^site/airsloop/.*$
- ^site/aiab/.*$
- secret:
name: airshipit-github-secret
data:

View File

@ -1,2 +0,0 @@
sphinx>=1.6.2
sphinx_rtd_theme>=0.4.3

View File

@ -1,293 +0,0 @@
Airskiff: Lightweight Airship for Dev
=====================================
* Skiff (n): a shallow, flat-bottomed, open boat
* Airskiff (n): a learning development, and gating environment for Airship
What is Airskiff
----------------
Airskiff is an easy way to get started with the software delivery components
of Airship:
* `Armada`_
* `Deckhand`_
* `Pegleg`_
* `Shipyard`_
Airskiff is packaged with a set of deployment scripts modeled after the
`OpenStack-Helm project`_ for seamless developer setup.
These scripts:
* Download, build, and containerize the Airship components above from source.
* Deploy a Kubernetes cluster using Minikube.
* Deploy Armada, Deckhand, and Shipyard using the latest `Armada image`_.
* Deploy OpenStack using the Airskiff site and charts from the
`OpenStack-Helm project`_.
.. warning:: Airskiff is not safe for production use. These scripts are
only intended to deploy a minimal development environment.
Common Deployment Requirements
------------------------------
This section covers actions that may be required for some deployment scenarios.
Passwordless sudo
~~~~~~~~~~~~~~~~~
Airskiff relies on scripts that utilize the ``sudo`` command. Throughout this
guide the assumption is that the user is: ``ubuntu``. It is advised to add the
following lines to ``/etc/sudoers``:
.. code-block:: bash
root ALL=(ALL) NOPASSWD: ALL
ubuntu ALL=(ALL) NOPASSWD: ALL
Proxy Configuration
~~~~~~~~~~~~~~~~~~~
.. note:: This section assumes you have properly defined the standard
``http_proxy``, ``https_proxy``, and ``no_proxy`` environment variables and
have followed the `Docker proxy guide`_ to create a systemd drop-in unit.
In order to deploy Airskiff behind proxy servers, define the following
environment variables:
.. code-block:: shell
export USE_PROXY=true
export PROXY=${http_proxy}
export no_proxy=${no_proxy},10.0.2.15,.svc.cluster.local
export NO_PROXY=${NO_PROXY},10.0.2.15,.svc.cluster.local
.. note:: The ``.svc.cluster.local`` address is required to allow the OpenStack
client to communicate without being routed through proxy servers. The IP
address ``10.0.2.15`` is the advertised IP address of the minikube Kubernetes
cluster. Replace the addresses if your configuration does not match the one
defined above.
Deploy Airskiff
---------------
Deploy Airskiff using the deployment scripts contained in the
``tools/deployment/airskiff`` directory of the `airship-treasuremap`_
repository.
.. note:: Scripts should be run from the root of ``treasuremap`` repository.
Clone Dependencies
~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../tools/deployment/airskiff/developer/000-clone-dependencies.sh
:language: shell
:lines: 1,18-
Alternatively, this step can be performed by running the script directly:
.. code-block:: shell
./tools/deployment/airskiff/developer/000-clone-dependencies.sh
Deploy Kubernetes with Minikube
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../tools/deployment/airskiff/developer/010-deploy-k8s.sh
:language: shell
:lines: 1,18-
Alternatively, this step can be performed by running the script directly:
.. code-block:: shell
./tools/deployment/airskiff/developer/010-deploy-k8s.sh
Restart your shell session
~~~~~~~~~~~~~~~~~~~~~~~~~~
At this point, restart your shell session to complete adding ``$USER`` to the
``docker`` group.
Setup OpenStack Client
~~~~~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../tools/deployment/airskiff/developer/020-setup-client.sh
:language: shell
:lines: 1,18-
Alternatively, this step can be performed by running the script directly:
.. code-block:: shell
./tools/deployment/airskiff/developer/020-setup-client.sh
Deploy Airship components using Armada
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../tools/deployment/airskiff/developer/030-armada-bootstrap.sh
:language: shell
:lines: 1,18-
Alternatively, this step can be performed by running the script directly:
.. code-block:: shell
./tools/deployment/airskiff/developer/030-armada-bootstrap.sh
Deploy OpenStack using Airship
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../tools/deployment/airskiff/developer/100-deploy-osh.sh
:language: shell
:lines: 1,18-
Alternatively, this step can be performed by running the script directly:
.. code-block:: shell
./tools/deployment/airskiff/developer/100-deploy-osh.sh
Use Airskiff
------------
The Airskiff deployment scripts install and configure the OpenStack client for
usage on your host machine.
Airship Examples
~~~~~~~~~~~~~~~~
To use Airship services, set the ``OS_CLOUD`` environment variable to
``airship``.
.. code-block:: shell
export OS_CLOUD=airship
List the Airship service endpoints:
.. code-block:: shell
openstack endpoint list
.. note:: ``${SHIPYARD}`` is the path to a cloned `Shipyard`_ repository.
Run Helm tests for all deployed releases:
.. code-block:: shell
${SHIPYARD}/tools/shipyard.sh create action test_site
List all `Shipyard`_ actions:
.. code-block:: shell
${SHIPYARD}/tools/shipyard.sh get actions
For more information about Airship operations, see the
`Shipyard actions`_ documentation.
OpenStack Examples
~~~~~~~~~~~~~~~~~~
To use OpenStack services, set the ``OS_CLOUD`` environment variable to
``openstack``:
.. code-block:: shell
export OS_CLOUD=openstack
List the OpenStack service endpoints:
.. code-block:: shell
openstack endpoint list
List ``Glance`` images:
.. code-block:: shell
openstack image list
Issue a new ``Keystone`` token:
.. code-block:: shell
openstack token issue
.. note:: Airskiff deploys identity, network, cloudformation, placement,
compute, orchestration, and image services. You can deploy more services
by adding chart groups to
``site/airskiff/software/manifests/full-site.yaml``. For more information,
refer to the `site authoring and deployment guide`_.
Develop with Airskiff
---------------------
Once you have successfully deployed a running cluster, changes to Airship
and OpenStack components can be deployed using `Shipyard actions`_ or the
Airskiff deployment scripts.
This example demonstrates deploying `Armada`_ changes using the Airskiff
deployment scripts.
.. note:: ``${ARMADA}`` is the path to your cloned Armada repository that
contains the changes you wish to deploy. ``${TREASUREMAP}`` is the path to
your cloned Treasuremap repository.
Build Armada:
.. code-block:: shell
cd ${ARMADA}
make images
Update Airship components:
.. code-block:: shell
cd ${TREASUREMAP}
./tools/deployment/developer/airskiff/030-armada-bootstrap.sh
Troubleshooting
---------------
This section is intended to help you through the initial troubleshooting
process. If issues persist after following this guide, please join us on
`IRC`_: #airshipit (freenode)
``Missing value auth-url required for auth plugin password``
If this error message appears when using the OpenStack client, verify your
client is configured for authentication:
.. code-block:: shell
# For Airship services
export OS_CLOUD=airship
# For OpenStack services
export OS_CLOUD=openstack
.. _Docker proxy guide: https://docs.docker.com/config/daemon/systemd/
#httphttps-proxy
.. _OpenStack-Helm project: https://docs.openstack.org/openstack-helm/latest/
install/developer/requirements-and-host-config.html
.. _Armada: https://opendev.org/airship/armada
.. _Deckhand: https://opendev.org/airship/deckhand
.. _Pegleg: https://opendev.org/airship/pegleg
.. _Shipyard: https://opendev.org/airship/shipyard
.. _Armada image: https://quay.io/repository/airshipit/armada?tab=tags
.. _airship-treasuremap: https://opendev.org/airship/treasuremap
.. _Shipyard actions: https://airship-shipyard.readthedocs.io/en/latest/
action-commands.html
.. _IRC: irc://chat.freenode.net:6697/airshipit
.. _site authoring and deployment guide: https://
airship-treasuremap.readthedocs.io/en/latest/authoring_and_deployment.html

View File

@ -1,631 +0,0 @@
Airsloop: Simple Bare-Metal Airship
===================================
Airsloop is a two bare-metal server site deployment reference.
The goal of this site is to be used as a reference for simplified Airship
deployments with one control and one or more compute nodes.
It is recommended to get familiar with the `Site Authoring and Deployment Guide`_
documentation before deploying Airsloop in the lab. Most steps and concepts
including setting up the Genesis node are the same.
.. _Site Authoring and Deployment Guide: https://airship-treasuremap.readthedocs.io/en/latest/authoring_and_deployment.html
.. image:: diagrams/airsloop-architecture.png
Various resiliency and security features are tuned down via configuration.
* Two bare-metal server setup with 1 control, and 1 compute.
Most components are scaled to a single replica and doesn't carry
any HA as there is only a single control plane host.
* No requirements for DNS/certificates.
HTTP and internal cluster DNS is used.
* Ceph set to use the single disk.
This generally provides minimalistic no-touch Ceph deployment.
No replication of Ceph data (single copy).
* Simplified networking (no bonding).
Two network interfaces are used by default (flat PXE, and DATA network
with VLANs for OAM, Calico, Storage, and OpenStack Overlay).
* Generic hostnames used (airsloop-control-1, airsloop-compute-1) that
simplifies generation of k8s certificates.
Airsloop site manifests are available at
`site/airsloop <https://opendev.org/airship/treasuremap/src/branch/master/site/airsloop>`__.
Hardware
--------
While HW configuration is flexible, Airsloop reference manifests
reflect a single control and a single compute node. The aim of
this is to create a minimalistic lab/demo reference environment.
Increasing the number of compute nodes will require site overrides
to align parts of the system such as Ceph OSDs, etcd, etc.
See host profiles for the servers
`here <https://opendev.org/airship/treasuremap/src/branch/master/site/airsloop/profiles/host>`__.
+------------+-------------------------+
| Node | Hostnames |
+============+=========================+
| control | airsloop-control-1 |
+------------+-------------------------+
| compute | airsloop-compute-1 |
+------------+-------------------------+
Network
-------
Physical (underlay) networks are described in Drydock site configuration
`here <https://opendev.org/airship/treasuremap/src/branch/master/site/airsloop/networks/physical/networks.yaml>`__.
It defines OOB (iLO/IPMI), untagged PXE, and multiple tagged general use networks.
Also no bonded interfaces are used in Airsloop deployment.
The networking reference is simplified compared to Airship Seaworthy
site. There are only two NICs required (excluding oob), one for PXE
and another one for the rest of the networks separated using VLAN segmentation.
Below is the reference network configuration:
+------------+------------+-----------+---------------+
| NICs | VLANs | Names | CIDRs |
+============+============+===========+===============+
| oob | N/A | oob |10.22.104.0/24 |
+------------+------------+-----------+---------------+
| pxe | N/A | pxe |10.22.70.0/24 |
+------------+------------+-----------+---------------+
| | 71 | oam |10.22.71.0/24 |
| +------------+-----------+---------------+
| | 72 | calico |10.22.72.0/24 |
| data +------------+-----------+---------------+
| | 73 | storage |10.22.73.0/24 |
| +------------+-----------+---------------+
| | 74 | overlay |10.22.74.0/24 |
+------------+------------+-----------+---------------+
Calico overlay for k8s POD networking uses IPIP mesh.
Storage
-------
Because Airsloop is a minimalistic deployment the required number of disks is just
one per node. That disk is not only used by the OS but also by Ceph Journals and OSDs.
The way that this is achieved is by using directories and not extra
disks for Ceph storage. Ceph OSD configuration can be changed in a `Ceph chart override <https://opendev.org/airship/treasuremap/src/branch/master/type/sloop/charts/ucp/ceph/ceph-osd.yaml>`__.
The following Ceph chart configuration is used:
.. code-block:: yaml
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
Host Profiles
-------------
Host profiles in Airship are tightly coupled with the hardware profiles.
That means every disk or interface which is described in host profiles
should have a corresponding reference to the hardware profile which is
being used.
Airship always identifies every NIC or disk by its PCI or
SCSI address and that means that the interfaces and the disks that are
defined in host and hardware profiles should have the correct PCI and
SCSI addresses objectively.
Let's give an example by following the host profile of Airsloop site.
In this `Host Profile <https://opendev.org/airship/treasuremap/src/branch/master/site/airsloop/profiles/host/compute.yaml>`__
is defined that the slave interface that will be used for the pxe
boot will be the pxe_nic01. That means a corresponding entry should
exist in this `Hardware Profile <https://opendev.org/airship/treasuremap/src/branch/master/site/airsloop/profiles/hardware/dell_r720xd.yaml>`__
which it does. So when drydock and maas try to deploy the node it will
identify the interface by the PCI address that is written in the
Hardware profile.
A simple way to find out which PCI or SCSI address corresponds to which
NIC or Disk is to use the lshw command. More information about that
command can be found `Here <https://linux.die.net/man/1/lshw>`__.
Extend Cluster
--------------
This section describes what changes need to be made to the existing
manifests of Airsloop for the addition of an extra compute node to the
cluster.
First and foremost the user should go to the `nodes.yaml <https://opendev.org/airship/treasuremap/src/branch/master/site/airsloop/baremetal/nodes.yaml>`__
file and add an extra section for the new compute node.
The next step is to add a similar section as the existing
airsloop-compute-1 section to the `pki-catalog.yaml <https://opendev.org/airship/treasuremap/src/branch/master/site/airsloop/pki/pki-catalog.yaml>`__.
This is essential for the correct generation of certificates and the
correct communication between the nodes in the cluster.
Also every time the user adds an extra compute node to the cluster then the
number of OSDs that are managed by this manifest `Ceph-client <https://opendev.org/airship/treasuremap/src/branch/master/site/airsloop/software/charts/osh/ceph/ceph-client.yaml>`__
should be increased by one.
Last step is to regenerate the certificates which correspond to this
`certificates.yaml <https://opendev.org/airship/treasuremap/src/branch/master/site/airsloop/secrets/certificates/certificates.yaml>`__
file so the changes in the pki-catalog.yaml file takes place.
This can be done through the promenade CLI.
Getting Started
---------------
**Update Site Manifests.**
Carefully review site manifests (site/airsloop) and update the configuration
to match the hardware, networking setup and other specifics of the lab.
See more details at `Site Authoring and Deployment Guide`_.
.. note:: Many manifest files (YAMLs) contain documentation in comments
that instruct what changes are required for specific sections.
1. Build Site Documents
.. code-block:: bash
tools/airship pegleg site -r /target collect airsloop -s collect
mkdir certs
tools/airship promenade generate-certs -o /target/certs /target/collect/*.yaml
mkdir bundle
tools/airship promenade build-all -o /target/bundle /target/collect/*.yaml /target/certs/*.yaml
See more details at `Building Site documents`_, use site ``airsloop``.
.. _Building Site documents: https://airship-treasuremap.readthedocs.io/en/latest/authoring_and_deployment.html#building-site-documents
2. Deploy Genesis
Deploy the Genesis node, see more details at `Genesis node`_.
.. _Genesis node: https://airship-treasuremap.readthedocs.io/en/latest/authoring_and_deployment.html#genesis-node
Genesis is the first node in the cluster and serves as a control node.
In Airsloop configuration Genesis is the only control node (airsloop-control-1).
Airsloop is using non-bonded network interfaces:
.. code-block:: bash
auto lo
iface lo inet loopback
auto eno1
iface eno1 inet static
address 10.22.70.21/24
auto enp67s0f0
iface enp67s0f0 inet manual
auto enp67s0f0.71
iface enp67s0f0.71 inet static
address 10.22.71.21/24
gateway 10.22.71.1
dns-nameservers 8.8.8.8 8.8.4.4
vlan-raw-device enp67s0f0
vlan_id 71
auto enp67s0f0.72
iface enp67s0f0.72 inet static
address 10.22.72.21/24
vlan-raw-device enp67s0f0
vlan_id 72
auto enp67s0f0.73
iface enp67s0f0.73 inet static
address 10.22.73.21/24
vlan-raw-device enp67s0f0
vlan_id 73
auto enp67s0f0.74
iface enp67s0f0.74 inet static
address 10.22.74.21/24
vlan-raw-device enp67s0f0
vlan_id 74
Execute Genesis bootstrap script on the Genesis server.
.. code-block:: bash
sudo ./genesis.sh
3. Deploy Site
.. code-block:: bash
tools/airship shipyard create configdocs design --directory=/target/collect
tools/airship shipyard commit configdocs
tools/airship shipyard create action deploy_site
tools/shipyard get actions
See more details at `Deploy Site with Shipyard`_.
.. _Deploy Site with Shipyard: https://airship-treasuremap.readthedocs.io/en/latest/authoring_and_deployment.html#deploy-site-with-shipyard
Deploying Behind a Proxy
------------------------
The following documents show the main differences you need to make in order to have
airsloop run behind a proxy.
.. note::
The "-" sign refers to a line that needs to be omitted (replaced), and the "+" sign refers to a
line replacing the omitted line, or simply a line that needs to be added to your yaml.
Under site/airsloop/software/charts/osh/openstack-glance/ create a glance.yaml file as follows:
.. code-block:: yaml
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: glance
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: glance-type
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
test:
enabled: false
...
Under site/airsloop/software/config/ create a versions.yaml file in the following format:
.. code-block:: yaml
---
data:
charts:
kubernetes:
apiserver:
proxy_server: proxy.example.com:8080
apiserver-htk:
proxy_server: proxy.example.com:8080
calico:
calico:
proxy_server: proxy.example.com:8080
calico-htk:
proxy_server: proxy.example.com:8080
etcd:
proxy_server: proxy.example.com:8080
etcd-htk:
proxy_server: proxy.example.com:8080
controller-manager:
proxy_server: proxy.example.com:8080
controller-manager-htk:
proxy_server: proxy.example.com:8080
coredns:
proxy_server: proxy.example.com:8080
coredns-htk:
proxy_server: proxy.example.com:8080
etcd:
proxy_server: proxy.example.com:8080
etcd-htk:
proxy_server: proxy.example.com:8080
haproxy:
proxy_server: proxy.example.com:8080
haproxy-htk:
proxy_server: proxy.example.com:8080
ingress:
proxy_server: proxy.example.com:8080
ingress-htk:
proxy_server: proxy.example.com:8080
proxy:
proxy_server: proxy.example.com:8080
proxy-htk:
proxy_server: proxy.example.com:8080
scheduler:
proxy_server: proxy.example.com:8080
scheduler-htk:
proxy_server: proxy.example.com:8080
osh:
barbican:
proxy_server: proxy.example.com:8080
cinder:
proxy_server: proxy.example.com:8080
cinder-htk:
proxy_server: proxy.example.com:8080
glance:
proxy_server: proxy.example.com:8080
glance-htk:
proxy_server: proxy.example.com:8080
heat:
proxy_server: proxy.example.com:8080
heat-htk:
proxy_server: proxy.example.com:8080
helm_toolkit:
proxy_server: proxy.example.com:8080
horizon:
proxy_server: proxy.example.com:8080
horizon-htk:
proxy_server: proxy.example.com:8080
ingress:
proxy_server: proxy.example.com:8080
ingress-htk:
proxy_server: proxy.example.com:8080
keystone:
proxy_server: proxy.example.com:8080
keystone-htk:
proxy_server: proxy.example.com:8080
libvirt:
proxy_server: proxy.example.com:8080
libvirt-htk:
proxy_server: proxy.example.com:8080
mariadb:
proxy_server: proxy.example.com:8080
mariadb-htk:
proxy_server: proxy.example.com:8080
memcached:
proxy_server: proxy.example.com:8080
memcached-htk:
proxy_server: proxy.example.com:8080
neutron:
proxy_server: proxy.example.com:8080
neutron-htk:
proxy_server: proxy.example.com:8080
nova:
proxy_server: proxy.example.com:8080
nova-htk:
proxy_server: proxy.example.com:8080
openvswitch:
proxy_server: proxy.example.com:8080
openvswitch-htk:
proxy_server: proxy.example.com:8080
rabbitmq:
proxy_server: proxy.example.com:8080
rabbitmq-htk:
proxy_server: proxy.example.com:8080
tempest:
proxy_server: proxy.example.com:8080
tempest-htk:
proxy_server: proxy.example.com:8080
osh_infra:
elasticsearch:
proxy_server: proxy.example.com:8080
fluentbit:
proxy_server: proxy.example.com:8080
fluentd:
proxy_server: proxy.example.com:8080
grafana:
proxy_server: proxy.example.com:8080
helm_toolkit:
proxy_server: proxy.example.com:8080
kibana:
proxy_server: proxy.example.com:8080
nagios:
proxy_server: proxy.example.com:8080
nfs_provisioner:
proxy_server: proxy.example.com:8080
podsecuritypolicy:
proxy_server: proxy.example.com:8080
prometheus:
proxy_server: proxy.example.com:8080
prometheus_alertmanager:
proxy_server: proxy.example.com:8080
prometheus_kube_state_metrics:
proxy_server: proxy.example.com:8080
prometheus_node_exporter:
proxy_server: proxy.example.com:8080
prometheus_openstack_exporter:
proxy_server: proxy.example.com:8080
prometheus_process_exporter:
proxy_server: proxy.example.com:8080
ucp:
armada:
proxy_server: proxy.example.com:8080
armada-htk:
proxy_server: proxy.example.com:8080
barbican:
proxy_server: proxy.example.com:8080
barbican-htk:
proxy_server: proxy.example.com:8080
ceph-client:
proxy_server: proxy.example.com:8080
ceph-htk:
proxy_server: proxy.example.com:8080
ceph-mon:
proxy_server: proxy.example.com:8080
ceph-osd:
proxy_server: proxy.example.com:8080
ceph-provisioners:
proxy_server: proxy.example.com:8080
ceph-rgw:
proxy_server: proxy.example.com:8080
deckhand:
proxy_server: proxy.example.com:8080
deckhand-htk:
proxy_server: proxy.example.com:8080
divingbell:
proxy_server: proxy.example.com:8080
divingbell-htk:
proxy_server: proxy.example.com:8080
drydock:
proxy_server: proxy.example.com:8080
drydock-htk:
proxy_server: proxy.example.com:8080
ingress:
proxy_server: proxy.example.com:8080
ingress-htk:
proxy_server: proxy.example.com:8080
keystone:
proxy_server: proxy.example.com:8080
keystone-htk:
proxy_server: proxy.example.com:8080
maas:
proxy_server: proxy.example.com:8080
maas-htk:
proxy_server: proxy.example.com:8080
mariadb:
proxy_server: proxy.example.com:8080
mariadb-htk:
proxy_server: proxy.example.com:8080
memcached:
proxy_server: proxy.example.com:8080
memcached-htk:
proxy_server: proxy.example.com:8080
postgresql:
proxy_server: proxy.example.com:8080
postgresql-htk:
proxy_server: proxy.example.com:8080
promenade:
proxy_server: proxy.example.com:8080
promenade-htk:
proxy_server: proxy.example.com:8080
rabbitmq:
proxy_server: proxy.example.com:8080
rabbitmq-htk:
proxy_server: proxy.example.com:8080
shipyard:
proxy_server: proxy.example.com:8080
shipyard-htk:
proxy_server: proxy.example.com:8080
tenant-ceph-client:
proxy_server: proxy.example.com:8080
tenant-ceph-htk:
proxy_server: proxy.example.com:8080
tenant-ceph-mon:
proxy_server: proxy.example.com:8080
tenant-ceph-osd:
proxy_server: proxy.example.com:8080
tenant-ceph-provisioners:
proxy_server: proxy.example.com:8080
tenant-ceph-rgw:
proxy_server: proxy.example.com:8080
tiller:
proxy_server: proxy.example.com:8080
tiller-htk:
proxy_server: proxy.example.com:8080
metadata:
name: software-versions
replacement: true
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: software-versions-global
actions:
- method: merge
path: .
storagePolicy: cleartext
schema: metadata/Document/v1
schema: pegleg/SoftwareVersions/v1
...
Update site/airsloop/networks/common-addresses.yaml to add the proxy information as follows:
.. code-block:: diff
# settings are correct and reachable in your environment; otherwise update
# them with the correct values for your environment.
proxy:
- http: ""
- https: ""
- no_proxy: []
+ http: "proxy.example.com:8080"
+ https: "proxy.example.com:8080"
+ no_proxy:
+ - 127.0.0.1
Under site/airsloop/software/charts/ucp/ create the file maas.yaml with the following format:
.. code-block:: yaml
---
# This file defines site-specific deviations for MaaS.
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: ucp-maas
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: ucp-maas-type
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
conf:
maas:
proxy:
proxy_enabled: true
peer_proxy_enabled: true
proxy_server: 'http://proxy.example.com:8080'
...
Under site/airsloop/software/charts/ucp/ create a promenade.yaml file in the following format:
.. code-block:: yaml
---
# This file defines site-specific deviations for Promenade.
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: ucp-promenade
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: ucp-promenade-type
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
pod:
env:
promenade_api:
- name: http_proxy
value: http://proxy.example.com:8080
- name: https_proxy
value: http://proxy.example.com:8080
- name: no_proxy
value: "127.0.0.1,localhost,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster.local,.cluster.local"
- name: HTTP_PROXY
value: http://proxy.example.com:8080
- name: HTTP_PROXY
value: http://proxy.example.com:8080
- name: HTTPS_PROXY
value: http://proxy.example.com:8080
- name: NO_PROXY
value: "127.0.0.1,localhost,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster.local,.cluster.local"
...

View File

@ -1,770 +0,0 @@
Site Authoring and Deployment Guide
===================================
The document contains the instructions for standing up a greenfield
Airship site. This can be broken down into two high-level pieces:
1. **Site authoring guide(s)**: Describes how to craft site manifests
and configs required to perform a deployment. The primary site
authoring guide is for deploying Airship sites, where OpenStack
is the target platform deployed on top of Airship.
2. **Deployment guide(s)**: Describes how to apply site manifests for a
given site.
This document is an "all in one" site authoring guide + deployment guide
for a standard Airship deployment. For the most part, the site
authoring guidance lives within ``seaworthy`` reference site in the
form of YAML comments.
Support
-------
Bugs may be viewed and reported at the following locations, depending on
the component:
- OpenStack Helm: `OpenStack Storyboard group
<https://storyboard.openstack.org/#!/project_group/64>`__
- Airship: Bugs may be filed using OpenStack Storyboard for specific
projects in `Airship
group <https://storyboard.openstack.org/#!/project_group/85>`__:
- `Airship Armada <https://storyboard.openstack.org/#!/project/1002>`__
- `Airship
Deckhand <https://storyboard.openstack.org/#!/project/1004>`__
- `Airship
Divingbell <https://storyboard.openstack.org/#!/project/1001>`__
- `Airship
Drydock <https://storyboard.openstack.org/#!/project/1005>`__
- `Airship MaaS <https://storyboard.openstack.org/#!/project/1007>`__
- `Airship Pegleg <https://storyboard.openstack.org/#!/project/1008>`__
- `Airship
Promenade <https://storyboard.openstack.org/#!/project/1009>`__
- `Airship
Shipyard <https://storyboard.openstack.org/#!/project/1010>`__
- `Airship Treasuremap
<https://storyboard.openstack.org/#!/project/airship/treasuremap>`__
Terminology
-----------
**Cloud**: A platform that provides a standard set of interfaces for
`IaaS <https://en.wikipedia.org/wiki/Infrastructure_as_a_service>`__
consumers.
**OSH**: (`OpenStack Helm <https://docs.openstack.org/openstack-helm/latest/>`__) is a
collection of Helm charts used to deploy OpenStack on Kubernetes.
**Helm**: (`Helm <https://helm.sh/>`__) is a package manager for Kubernetes.
Helm Charts help you define, install, and upgrade Kubernetes applications.
**Undercloud/Overcloud**: Terms used to distinguish which cloud is
deployed on top of the other. In Airship sites, OpenStack (overcloud)
is deployed on top of Kubernetes (undercloud).
**Airship**: A specific implementation of OpenStack Helm charts that deploy
Kubernetes. This deployment is the primary focus of this document.
**Control Plane**: From the point of view of the cloud service provider,
the control plane refers to the set of resources (hardware, network,
storage, etc.) configured to provide cloud services for customers.
**Data Plane**: From the point of view of the cloud service provider,
the data plane is the set of resources (hardware, network, storage,
etc.) configured to run consumer workloads. When used in this document,
"data plane" refers to the data plane of the overcloud (OSH).
**Host Profile**: A host profile is a standard way of configuring a bare
metal host. It encompasses items such as the number of bonds, bond slaves,
physical storage mapping and partitioning, and kernel parameters.
Versioning
----------
Airship reference manifests are delivered monthly as release tags in the
`Treasuremap <https://github.com/airshipit/treasuremap/releases>`__.
The releases are verified by `Seaworthy
<https://airship-treasuremap.readthedocs.io/en/latest/seaworthy.html>`__,
`Airsloop
<https://airship-treasuremap.readthedocs.io/en/latest/airsloop.html>`__,
and `Airship-in-a-Bottle
<https://github.com/airshipit/treasuremap/blob/master/tools/deployment/aiab/README.rst>`__
pipelines before delivery and are recommended for deployments instead of using
the master branch directly.
Component Overview
------------------
.. image:: diagrams/component_list.png
Node Overview
-------------
This document refers to several types of nodes, which vary in their
purpose, and to some degree in their orchestration / setup:
- **Build node**: This refers to the environment where configuration
documents are built for your environment (e.g., your laptop)
- **Genesis node**: The "genesis" or "seed node" refers to a node used
to get a new deployment off the ground, and is the first node built
in a new deployment environment
- **Control / Master nodes**: The nodes that make up the control
plane. (Note that the genesis node will be one of the controller
nodes)
- **Compute / Worker Nodes**: The nodes that make up the data
plane
Hardware Preparation
--------------------
The Seaworthy site reference shows a production-worthy deployment that includes
multiple disks, as well as redundant/bonded network configuration.
Airship hardware requirements are flexible, and the system can be deployed
with very minimal requirements if needed (e.g., single disk, single network).
For simplified non-bonded, and single disk examples, see
`Airsloop <https://airship-treasuremap.readthedocs.io/en/latest/airsloop.html>`__.
BIOS and IPMI
~~~~~~~~~~~~~
1. Virtualization enabled in BIOS
2. IPMI enabled in server BIOS (e.g., IPMI over LAN option enabled)
3. IPMI IPs assigned, and routed to the environment you will deploy into
Note: Firmware bugs related to IPMI are common. Ensure you are running the
latest firmware version for your hardware. Otherwise, it is recommended to
perform an iLo/iDrac reset, as IPMI bugs with long-running firmware are not
uncommon.
4. Set PXE as first boot device and ensure the correct NIC is selected for PXE.
Disk
~~~~
1. For servers that are in the control plane (including genesis):
- Two-disk RAID-1: Operating System
- Two disks JBOD: Ceph Journal/Meta for control plane
- Remaining disks JBOD: Ceph OSD for control plane
2. For servers that are in the tenant data plane (compute nodes):
- Two-disk RAID-1: Operating System
- Two disks JBOD: Ceph Journal/Meta for tenant-ceph
- Two disks JBOD: Ceph OSD for tenant-ceph
- Remaining disks configured according to the host profile target
for each given server (e.g., RAID-10 for OpenStack ephemeral).
Network
~~~~~~~
1. You have a dedicated PXE interface on untagged/native VLAN,
1x1G interface (eno1)
2. You have VLAN segmented networks,
2x10G bonded interfaces (enp67s0f0 and enp68s0f1)
- Management network (routed/OAM)
- Calico network (Kubernetes control channel)
- Storage network
- Overlay network
- Public network
See detailed network configuration in the
``site/${NEW_SITE}/networks/physical/networks.yaml`` configuration file.
Hardware sizing and minimum requirements
----------------------------------------
+-----------------+----------+----------+----------+
| Node | Disk | Memory | CPU |
+=================+==========+==========+==========+
| Build (laptop) | 10 GB | 4 GB | 1 |
+-----------------+----------+----------+----------+
| Genesis/Control | 500 GB | 64 GB | 24 |
+-----------------+----------+----------+----------+
| Compute | N/A* | N/A* | N/A* |
+-----------------+----------+----------+----------+
* Workload driven (determined by host profile)
See detailed hardware configuration in the
``site/${NEW_SITE}/networks/profiles`` folder.
Establishing build node environment
-----------------------------------
1. On the machine you wish to use to generate deployment files, install required
tooling
.. code-block:: bash
sudo apt -y install docker.io git
2. Clone the ``treasuremap`` git repo as follows
.. code-block:: bash
git clone https://opendev.org/airship/treasuremap.git
cd treasuremap && git checkout <release-tag>
Building site documents
-----------------------
This section goes over how to put together site documents according to
your specific environment and generate the initial Promenade bundle
needed to start the site deployment.
Preparing deployment documents
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In its current form, Pegleg provides an organized structure for YAML
elements that separates common site elements (i.e., ``global``
folder) from unique site elements (i.e., ``site`` folder).
To gain a full understanding of the Pegleg structure, it is highly
recommended to read the Pegleg documentation on this topic
`here <https://airship-pegleg.readthedocs.io/>`__.
The ``seaworthy`` site may be used as reference site. It is the
principal pipeline for integration and continuous deployment testing of Airship.
Change directory to the ``site`` folder and copy the
``seaworthy`` site as follows:
.. code-block:: bash
NEW_SITE=mySite # replace with the name of your site
cd treasuremap/site
cp -r seaworthy $NEW_SITE
Remove ``seaworthy`` specific certificates.
.. code-block:: bash
rm -f site/${NEW_SITE}/secrets/certificates/certificates.yaml
You will then need to manually make changes to these files. These site
manifests are heavily commented to explain parameters, and more importantly
identify all of the parameters that need to change when authoring a new
site.
These areas which must be updated for a new site are flagged with the
label ``NEWSITE-CHANGEME`` in YAML comments. Search for all instances
of ``NEWSITE-CHANGEME`` in your new site definition. Then follow the
instructions that accompany the tag in order to make all needed changes
to author your new Airship site.
Because some files depend on (or will repeat) information from others,
the order in which you should build your site files is as follows:
1. site/$NEW\_SITE/networks/physical/networks.yaml
2. site/$NEW\_SITE/baremetal/nodes.yaml
3. site/$NEW\_SITE/networks/common-addresses.yaml
4. site/$NEW\_SITE/pki/pki-catalog.yaml
5. All other site files
Register DNS names
~~~~~~~~~~~~~~~~~~
Airship has two virtual IPs.
See ``data.vip`` in section of
``site/${NEW_SITE}/networks/common-addresses.yaml`` configuration file.
Both are implemented via Kubernetes ingress controller and require FQDNs/DNS.
Register the following list of DNS names:
::
+---+---------------------------+-------------+
| A | iam-sw.DOMAIN | ingress-vip |
| A | shipyard-sw.DOMAIN | ingress-vip |
+---+---------------------------+-------------+
| A | cloudformation-sw.DOMAIN | ingress-vip |
| A | compute-sw.DOMAIN | ingress-vip |
| A | dashboard-sw.DOMAIN | ingress-vip |
| A | grafana-sw.DOMAIN | ingress-vip |
+---+---------------------------+-------------+
| A | identity-sw.DOMAIN | ingress-vip |
| A | image-sw.DOMAIN | ingress-vip |
| A | kibana-sw.DOMAIN | ingress-vip |
| A | nagios-sw.DOMAIN | ingress-vip |
| A | network-sw.DOMAIN | ingress-vip |
| A | nova-novncproxy-sw.DOMAIN | ingress-vip |
| A | object-store-sw.DOMAIN | ingress-vip |
| A | orchestration-sw.DOMAIN | ingress-vip |
| A | placement-sw.DOMAIN | ingress-vip |
| A | volume-sw.DOMAIN | ingress-vip |
+---+---------------------------+-------------+
| A | maas-sw.DOMAIN | maas-vip |
| A | drydock-sw.DOMAIN | maas-vip |
+---+---------------------------+-------------+
Here ``DOMAIN`` is a name of ingress domain, you can find it in the
``data.dns.ingress_domain`` section of
``site/${NEW_SITE}/secrets/certificates/ingress.yaml`` configuration file.
Run the following command to get an up-to-date list of required DNS names:
.. code-block:: bash
grep -E 'host: .+DOMAIN' site/${NEW_SITE}/software/config/endpoints.yaml | \
sort -u | awk '{print $2}'
Update Secrets
~~~~~~~~~~~~~~
Replace passphrases under ``site/${NEW_SITE}/secrets/passphrases/``
with random generated ones:
- Passphrases generation ``openssl rand -hex 10``
- UUID generation ``uuidgen`` (e.g., for Ceph filesystem ID)
- Update ``secrets/passphrases/ipmi_admin_password.yaml`` with IPMI password
- Update ``secrets/passphrases/ubuntu_crypt_password.yaml`` with password hash:
.. code-block:: python
python3 -c "from crypt import *; print(crypt('<YOUR_PASSWORD>', METHOD_SHA512))"
Configure certificates in ``site/${NEW_SITE}/secrets/certificates/ingress.yaml``,
they need to be issued for the domains configured in the ``Register DNS names`` section.
.. caution::
It is required to configure valid certificates. Self-signed certificates
are not supported.
Control Plane & Tenant Ceph Cluster Notes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Configuration variables for ceph control plane are located in:
- ``site/${NEW_SITE}/software/charts/ucp/ceph/ceph-osd.yaml``
- ``site/${NEW_SITE}/software/charts/ucp/ceph/ceph-client.yaml``
Configuration variables for tenant ceph are located in:
- ``site/${NEW_SITE}/software/charts/osh/openstack-tenant-ceph/ceph-osd.yaml``
- ``site/${NEW_SITE}/software/charts/osh/openstack-tenant-ceph/ceph-client.yaml``
Configuration summary:
- data/values/conf/storage/osd[\*]/data/location: The block device that
will be formatted by the Ceph chart and used as a Ceph OSD disk
- data/values/conf/storage/osd[\*]/journal/location: The block device
backing the ceph journal used by this OSD. Refer to the journal
paradigm below.
- data/values/conf/pool/target/osd: Number of OSD disks on each node
Assumptions:
1. Ceph OSD disks are not configured for any type of RAID. Instead, they
are configured as JBOD when connected through a RAID controller.
If the RAID controller does not support JBOD, put each disk in its
own RAID-0 and enable RAID cache and write-back cache if the
RAID controller supports it.
2. Ceph disk mapping, disk layout, journal and OSD setup is the same
across Ceph nodes, with only their role differing. Out of the 4
control plane nodes, we expect to have 3 actively participating in
the Ceph quorum, and the remaining 1 node designated as a standby
Ceph node which uses a different control plane profile
(cp\_*-secondary) than the other three (cp\_*-primary).
3. If performing a fresh install, disks are unlabeled or not labeled from a
previous Ceph install, so that Ceph chart will not fail disk
initialization.
.. important::
It is highly recommended to use SSD devices for Ceph Journal partitions.
If you have an operating system available on the target hardware, you
can determine HDD and SSD devices with:
.. code-block:: bash
lsblk -d -o name,rota
where a ``rota`` (rotational) value of ``1`` indicates a spinning HDD,
and where a value of ``0`` indicates non-spinning disk (i.e., SSD). (Note:
Some SSDs still report a value of ``1``, so it is best to go by your
server specifications).
For OSDs, pass in the whole block device (e.g., ``/dev/sdd``), and the
Ceph chart will take care of disk partitioning, formatting, mounting,
etc.
For Ceph Journals, you can pass in a specific partition (e.g., ``/dev/sdb1``).
Note that it's not required to pre-create these partitions. The Ceph chart
will create journal partitions automatically if they don't exist.
By default the size of every journal partition is 10G. Make sure
there is enough space available to allocate all journal partitions.
Consider the following example where:
- /dev/sda is an operating system RAID-1 device (SSDs for OS root)
- /dev/sd[bc] are SSDs for ceph journals
- /dev/sd[efgh] are HDDs for OSDs
The data section of this file would look like:
.. code-block:: yaml
data:
values:
conf:
storage:
osd:
- data:
type: block-logical
location: /dev/sde
journal:
type: block-logical
location: /dev/sdb1
- data:
type: block-logical
location: /dev/sdf
journal:
type: block-logical
location: /dev/sdb2
- data:
type: block-logical
location: /dev/sdg
journal:
type: block-logical
location: /dev/sdc1
- data:
type: block-logical
location: /dev/sdh
journal:
type: block-logical
location: /dev/sdc2
Manifest linting and combining layers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
After constituent YAML configurations are finalized, use Pegleg to lint
your manifests. Resolve any issues that result from linting before
proceeding:
.. code-block:: bash
sudo tools/airship pegleg site -r /target lint $NEW_SITE
Note: ``P001`` and ``P005`` linting errors are expected for missing
certificates, as they are not generated until the next section. You may
suppress these warnings by appending ``-x P001 -x P005`` to the lint
command.
Next, use Pegleg to perform the merge that will yield the combined
global + site type + site YAML:
.. code-block:: bash
sudo tools/airship pegleg site -r /target collect $NEW_SITE
Perform a visual inspection of the output. If any errors are discovered,
you may fix your manifests and re-run the ``lint`` and ``collect``
commands.
Once you have error-free output, save the resulting YAML as follows:
.. code-block:: bash
sudo tools/airship pegleg site -r /target collect $NEW_SITE \
-s ${NEW_SITE}_collected
This output is required for subsequent steps.
Lastly, you should also perform a ``render`` on the documents. The
resulting render from Pegleg will not be used as input in subsequent
steps, but is useful for understanding what the document will look like
once Deckhand has performed all substitutions, replacements, etc. This
is also useful for troubleshooting and addressing any Deckhand errors
prior to submitting via Shipyard:
.. code-block:: bash
sudo tools/airship pegleg site -r /target render $NEW_SITE
Inspect the rendered document for any errors. If there are errors,
address them in your manifests and re-run this section of the document.
Building the Promenade bundle
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Create an output directory for Promenade certs and run
.. code-block:: bash
mkdir ${NEW_SITE}_certs
sudo tools/airship promenade generate-certs \
-o /target/${NEW_SITE}_certs /target/${NEW_SITE}_collected/*.yaml
Estimated runtime: About **1 minute**
After the certificates has been successfully created, copy the generated
certificates into the security folder. Example:
.. code-block:: bash
mkdir -p site/${NEW_SITE}/secrets/certificates
sudo cp ${NEW_SITE}_certs/certificates.yaml \
site/${NEW_SITE}/secrets/certificates/certificates.yaml
Regenerate collected YAML files to include copied certificates:
.. code-block:: bash
sudo rm -rf ${NEW_SITE}_collected ${NEW_SITE}_certs
sudo tools/airship pegleg site -r /target collect $NEW_SITE \
-s ${NEW_SITE}_collected
Finally, create the Promenade bundle:
.. code-block:: bash
mkdir ${NEW_SITE}_bundle
sudo tools/airship promenade build-all --validators \
-o /target/${NEW_SITE}_bundle /target/${NEW_SITE}_collected/*.yaml
Genesis node
------------
Initial setup
~~~~~~~~~~~~~
Before starting, ensure that the BIOS and IPMI settings match those
stated previously in this document. Also ensure that the hardware RAID
is setup for this node per the control plane disk configuration stated
previously in this document.
Then, start with a manual install of Ubuntu 16.04 on the genesis node, the node
you will use to seed the rest of your environment. Use standard `Ubuntu
ISO <http://releases.ubuntu.com/16.04>`__.
Ensure to select the following:
- UTC timezone
- Hostname that matches the genesis hostname given in
``data.genesis.hostname`` in
``site/${NEW_SITE}/networks/common-addresses.yaml``.
- At the ``Partition Disks`` screen, select ``Manual`` so that you can
setup the same disk partitioning scheme used on the other control
plane nodes that will be deployed by MaaS. Select the first logical
device that corresponds to one of the RAID-1 arrays already setup in
the hardware controller. On this device, setup partitions matching
those defined for the ``bootdisk`` in your control plane host profile
found in ``site/${NEW_SITE}/profiles/host``.
(e.g., 30G for /, 1G for /boot, 100G for /var/log, and all remaining
storage for /var). Note that the volume size syntax looking like
``>300g`` in Drydock means that all remaining disk space is allocated
to this volume, and that volume needs to be at least 300G in
size.
- When you get to the prompt, "How do you want to manage upgrades on
this system?", choose "No automatic updates" so that packages are
only updated at the time of our choosing (e.g., maintenance windows).
- Ensure the grub bootloader is also installed to the same logical
device as in the previous step (this should be default behavior).
After installation, ensure the host has outbound internet access and can
resolve public DNS entries (e.g., ``nslookup google.com``,
``curl https://www.google.com``).
Ensure that the deployed genesis hostname matches the hostname in
``data.genesis.hostname`` in
``site/${NEW_SITE}/networks/common-addresses.yaml``.
If it does not match, then either change the hostname of the node to
match the configuration documents, or re-generate the configuration with
the correct hostname.
To change the hostname of the deployed node, you may run the following:
.. code-block:: bash
sudo hostname $NEW_HOSTNAME
sudo sh -c "echo $NEW_HOSTNAME > /etc/hostname"
sudo vi /etc/hosts # Anywhere the old hostname appears in the file, replace
# with the new hostname
Or, as an alternative, update the genesis hostname
in the site definition and then repeat the steps in the previous two sections,
"Manifest linting and combining layers" and "Building the Promenade bundle".
Installing matching kernel version
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Install the same kernel version on the genesis host that MaaS will use
to deploy new baremetal nodes.
To do this, first you must determine the kernel version that
will be deployed to those nodes. Start by looking at the host profile
definition used to deploy other control plane nodes by searching for
``control-plane: enabled``. Most likely this will be a file under
``global/profiles/host``. In this file, find the kernel info. Example:
.. code-block:: bash
platform:
image: 'xenial'
kernel: 'hwe-16.04'
kernel_params:
kernel_package: 'linux-image-4.15.0-46-generic'
It is recommended to install matching (and previously tested) kernel
.. code-block:: bash
sudo apt-get install linux-image-4.15.0-46-generic
Check the installed packages on the genesis host with ``dpkg --list``.
If there are any later kernel versions installed, remove them with
``sudo apt remove``, so that the newly installed kernel is the latest
available. Boot the genesis node using the installed kernel.
Install ntpdate/ntp
~~~~~~~~~~~~~~~~~~~
Install and run ntpdate, to ensure a reasonably sane time on genesis
host before proceeding:
.. code-block:: bash
sudo apt -y install ntpdate
sudo ntpdate ntp.ubuntu.com
If your network policy does not allow time sync with external time
sources, specify a local NTP server instead of using ``ntp.ubuntu.com``.
Then, install the NTP client:
.. code-block:: bash
sudo apt -y install ntp
Add the list of NTP servers specified in ``data.ntp.servers_joined`` in
file
``site/${NEW_SITE}/networks/common-address.yaml``
to ``/etc/ntp.conf`` as follows:
::
pool NTP_SERVER1 iburst
pool NTP_SERVER2 iburst
(repeat for each NTP server with correct NTP IP or FQDN)
Then, restart the NTP service:
.. code-block:: bash
sudo service ntp restart
If you cannot get good time to your selected time servers,
consider using alternate time sources for your deployment.
Disable the apparmor profile for ntpd:
.. code-block:: bash
sudo ln -s /etc/apparmor.d/usr.sbin.ntpd /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.ntpd
This prevents an issue with the MaaS containers, which otherwise get
permission denied errors from apparmor when the MaaS container tries to
leverage libc6 for /bin/sh when MaaS container ntpd is forcefully
disabled.
Promenade bootstrap
~~~~~~~~~~~~~~~~~~~
Copy the ``${NEW_SITE}_bundle`` directory from the build node to the genesis
node, into the home directory of the user there (e.g., ``/home/ubuntu``).
Then, run the following script as sudo on the genesis node:
.. code-block:: bash
cd ${NEW_SITE}_bundle
sudo ./genesis.sh
Estimated runtime: **1h**
Following completion, run the ``validate-genesis.sh`` script to ensure
correct provisioning of the genesis node:
.. code-block:: bash
cd ${NEW_SITE}_bundle
sudo ./validate-genesis.sh
Estimated runtime: **2m**
Deploy Site with Shipyard
-------------------------
Export valid login credentials for one of the Airship Keystone users defined
for the site. Currently there are no authorization checks in place, so
the credentials for any of the site-defined users will work. For
example, we can use the ``shipyard`` user, with the password that was
defined in
``site/${NEW_SITE}/secrets/passphrases/ucp_shipyard_keystone_password.yaml``.
Example:
.. code-block:: bash
export OS_AUTH_URL="https://iam-sw.DOMAIN:443/v3"
export OS_USERNAME=shipyard
export OS_PASSWORD=password123
Next, load collected site manifests to Shipyard
.. code-block:: bash
sudo -E tools/airship shipyard create configdocs ${NEW_SITE} \
--directory=/target/${NEW_SITE}_collected
sudo tools/airship shipyard commit configdocs
Estimated runtime: **3m**
Now deploy the site with shipyard:
.. code-block:: bash
tools/airship shipyard create action deploy_site
Estimated runtime: **3h**
Check periodically for successful deployment:
.. code-block:: bash
tools/airship shipyard get actions
tools/airship shipyard describe action/<ACTION>
Disable password-based login on genesis
---------------------------------------
Before proceeding, verify that your SSH access to the genesis node is
working with your SSH key (i.e., not using password-based
authentication).
Then, disable password-based SSH authentication on genesis in
``/etc/ssh/sshd_config`` by uncommenting the ``PasswordAuthentication``
and setting its value to ``no``. Example:
::
PasswordAuthentication no
Then, restart the ssh service:
::
sudo systemctl restart ssh

View File

@ -1,160 +0,0 @@
# -*- coding: utf-8 -*-
#
# shipyard documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 16 03:40:50 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Airship Integration'
copyright = u'2018 AT&T Intellectual Property.'
author = u'Airship Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1.0'
# The full version, including alpha/beta/rc tags.
release = u'0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ucpintdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'airshipint.tex', u'Airship Integration Documentation',
u'Airship Authors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'AirshipIntegration', u'Airship Integration Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Airship Integration', u'Airship Integration Documentation',
author, 'Airship Integration',
'Airship documentation',
'Miscellaneous'),
]

View File

@ -1,187 +0,0 @@
Configuration Update Guide
==========================
The guide contains the instructions for updating the configuration of
a deployed Airship environment. Please refer to
`Site Authoring and Deployment Guide <https://airship-treasuremap.readthedocs.io/en/latest/authoring_and_deployment.html>`__
if you do not have an Airship environment already deployed.
Update of an Airship environment consists of the following stages:
1. **Prepare the configuration**: before deploying any changes, a user
should prepare and validate the manifests on a build node using
`Airship Pegleg <https://airship-pegleg.readthedocs.io/en/latest/>`__.
2. **Deploy the changes**: during this stage, a user uploads the
configuration to the Airship environment and starts the deployment using
`Airship Shipyard <https://airship-shipyard.readthedocs.io/en/latest/>`__.
.. note::
This guide assumes you have
`Airship Pegleg <https://airship-pegleg.readthedocs.io/en/latest/>`__ and
`Airship Shipyard <https://airship-shipyard.readthedocs.io/en/latest/>`__
tools installed and configured; please refer to
`Site Authoring and Deployment Guide <https://airship-treasuremap.readthedocs.io/en/latest/authoring_and_deployment.html>`__
for the details.
Configuring Airship CLI
-----------------------
Clone the Airship Treasuremap repository and switch to correct version.
::
git clone https://opendev.org/airship/treasuremap
cd treasuremap/
# List available tags.
git tag --list
# Switch to the version your site is using.
git checkout {your-tag}
# Go back to a previous directory.
cd ..
Configure environment variables with the name of your site, and specify a path
to the directory where site configuration is stored; for this example, we use
`Airship Seaworthy <https://airship-treasuremap.readthedocs.io/en/latest/seaworthy.html>`__
site:
::
export SITE=seaworthy
export SITE_PATH=treasuremap/site/seaworthy
Updating the manifests
----------------------
Changing the configuration consists of the following steps:
1. Change site manifests.
2. Lint the manifests.
3. Collect the manifests.
4. Copy the manifests to the Airship environment.
Linting and collecting the manifests is done using
`Airship Pegleg <https://airship-pegleg.readthedocs.io/en/latest/>`__.
For this example, we are going to update a debug level for keystone logs
in a site layer.
.. note::
It is also possible to update the configuration in a global layer;
for more details on Airship layering mechanism see
`Pegleg Definition Artifact Layout <https://airship-pegleg.readthedocs.io/en/latest/artifacts.html>`__
documentation.
Create an override file
``${SITE_PATH}/software/charts/osh/openstack-keystone/keystone.yaml``
with the following content:
::
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: keystone
replacement: true
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: keystone-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
conf:
logging:
logger_keystone:
level: DEBUG
...
Check that the configuration is valid:
::
sudo ./treasuremap/tools/airship pegleg site -r treasuremap/ \
lint ${SITE}
Collect the configuration:
::
sudo ./treasuremap/tools/airship pegleg site \
-r treasuremap/ collect $SITE -s ${SITE}_collected
Copy the configuration to a node that has the access to the site's
Shipyard API, if the current node does not; this node can be one
of your controllers:
::
scp -r ${SITE}_collected {genesis-ip}:/home/{user-name}/${SITE}_collected
Deploying the changes
---------------------
After you copied the manifests, there are just a few steps needed to start
the deployment:
1. Upload the changes to
`Airship Deckhand <https://airship-deckhand.readthedocs.io/en/latest/>`__.
2. Start the deployment using
`Airship Shipyard <https://airship-shipyard.readthedocs.io/en/latest/>`__.
Install Airship CLI as described in `Configuring Airship CLI`_ section.
Set the name of your site:
::
export SITE=seaworthy
Configure credentials for accessing Shipyard; the password is stored
in ``ucp_shipyard_keystone_password`` secret, you can find it in
``site/seaworthy/secrets/passphrases/ucp_shipyard_keystone_password.yaml``
configuration file of your site.
::
export OS_USERNAME=shipyard
export OS_PASSWORD={shipyard_password}
Upload the changes to `Airship Deckhand <https://airship-deckhand.readthedocs.io/en/latest/>`__:
::
# Upload the configuration.
sudo -E ./treasuremap/tools/airship shipyard \
create configdocs ${SITE} --replace --directory=${SITE}_collected
# Commit the configuration.
sudo -E ./treasuremap/tools/airship shipyard commit configdocs
Run the deployment:
::
sudo -E ./treasuremap/tools/airship shipyard create action update_site
You can also run ``update_software`` instead of ``update_site`` which skips
hardware configuration and only applies the changes to services that are running
on top of Kubernetes.
Now you can track the deployment progress using the following commands:
::
# Get all actions that were executed on you environment.
sudo -E ./treasuremap/tools/airship shipyard get actions
# Show all the steps within the action.
sudo -E ./treasuremap/tools/airship shipyard describe action/{action_id}
All steps will have status ``success`` when the update finishes.

View File

@ -1,242 +0,0 @@
=================
Development Guide
=================
Welcome
-------
Thank you for your interest in Airship. Our community is eager to help you
contribute to the success of our project and welcome you as a member of our
community!
We invite you to reach out to us at any time via the `Airship mailing list`_ or
`#airshipit IRC channel`_ on freenode.
Welcome aboard!
.. _Airship mailing list: http://lists.airshipit.org
.. _#airshipit IRC channel: irc://chat.freenode.net:6667
Getting Started
---------------
Airship is a collection of open source tools for automating cloud provisioning
and management. Airship provides a declarative framework for defining and
managing the life cycle of open infrastructure tools and the underlying
hardware. These tools include OpenStack for virtual machines, Kubernetes for
container orchestration, and MaaS for bare metal, with planned support for
OpenStack Ironic.
We recommend that new contributors begin by reading the high-level architecture
overview included in our `treasuremap`_ documentation. The architectural
overview introduces each Airship component, their core responsibilities, and
their integration points.
.. _treasuremap: https://airship-treasuremap.readthedocs.io/en/latest
Deep Dive
---------
Each Airship component is accompanied by its own documentation that provides an
extensive overview of the component. With so many components, it can be
challenging to find a starting point.
We recommend the following:
Try an Airship environment
~~~~~~~~~~~~~~~~~~~~~~~~~~
Airship provides two single-node environments for demo and development purpose.
`Airship-in-a-Bottle`_ is a set of reference documents and shell scripts that
stand up a full Airship environment with the execution of a script.
`Airskiff`_ is a light-weight development environment bundled with a set of
deployment scripts that provides a single-node Airship environment. Airskiff
uses minikube to bootstrap Kubernetes, so it does not include Drydock, MaaS, or
Promenade.
Additionally, we provide a reference architecture for easily deploying a
smaller, demo site.
`Airsloop`_ is a fully-authored Airship site that can be quickly deployed as a
baremetal, demo lab.
.. _Airship-in-a-Bottle: https://opendev.org/airship/in-a-bottle
.. _Airskiff: https://airship-treasuremap.readthedocs.io/en/latest/airskiff.html
.. _Airsloop: https://airship-treasuremap.readthedocs.io/en/latest/airsloop.html
Focus on a component
~~~~~~~~~~~~~~~~~~~~
When starting out, focusing on one Airship component allows you to become
intricately familiar with the responsibilities of that component and understand
its function in the Airship integration. Because the components are modeled
after each other, you will also become familiar with the same patterns and
conventions that all Airship components use.
Airship source code lives in the `OpenDev Airship namespace`_. To clone an
Airship project, execute the following, replacing `<component>` with the name
of the Airship component you want to clone.
.. code-block bash::
git clone https://opendev.org/airship/<component>.git
Refer to the component's documentation to get started. A list of each
component's documentation is listed below for reference:
* `Armada`_
* `Deckhand`_
* `Divingbell`_
* `Drydock`_
* `Pegleg`_
* `Promenade`_
* `Shipyard`_
.. _OpenDev Airship namespace: https://opendev.org/airship
.. _Armada: https://airship-armada.readthedocs.io
.. _Deckhand: https://airship-deckhand.readthedocs.io
.. _Divingbell: https://airship-divingbell.readthedocs.io
.. _Drydock: https://airship-drydock.readthedocs.io
.. _Pegleg: https://airship-pegleg.readthedocs.io
.. _Promenade: https://airship-promenade.readthedocs.io
.. _Shipyard: https://airship-shipyard.readthedocs.io
Find a Storyboard task or story
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Airship work items are tracked using Storyboard. A board of items can be found
`here`_.
Once you find an item to work on, simply assign the item to yourself or leave a
comment that you plan to provide implementation for the item.
.. _here: https://storyboard.openstack.org/#!/project_group/85
Testing Changes
---------------
Testing of Airship changes can be accomplished several ways:
#. Standalone, single component testing
#. Integration testing
#. Linting, unit, and functional tests/linting
.. note:: Testing changes to charts in Airship repositories is best
accomplished using the integration method describe below.
Standalone Testing
~~~~~~~~~~~~~~~~~~
Standalone testing of Airship components, i.e. using an Airship component as a
Python project, provides the quickest feedback loop of the three methods and
allows developers to make changes on the fly. We recommend testing initial code
changes using this method to see results in real-time.
Each Airship component written in Python has pre-requisites and guides for
running the project in a standalone capacity. Refer to the documentation listed
below.
* `Armada`_
* `Deckhand`_
* `Drydock`_
* `Pegleg`_
* `Promenade`_
* `Shipyard`_
Integration Testing
~~~~~~~~~~~~~~~~~~~
While each Airship component supports individual usage, Airship components
have several integration points that should be exercised after modifying
functionality.
We maintain several environments that encompass these integration points:
#. `Airskiff`_: Integration of Armada, Deckhand, Shipyard, and Pegleg
#. `Airship-in-a-Bottle Multinode`: Full Airship integration
For changes that merely impact software delivery components, exercising a full
Airskiff deployment is often sufficient. Otherwise, we recommend using the
Airship-in-a-Bottle Multinode environment.
Each environment's documentation covers the process required to build and test
component images.
.. _Airskiff: https://airship-treasuremap.readthedocs.io/en/latest/
airskiff.html
.. _Airship-in-a-Bottle Multinode: http://git.openstack.org/cgit/openstack/
airship-in-a-bottle/tree/tools/multi_nodes_gate/README.rst
Final Checks
~~~~~~~~~~~~
Airship projects provide Makefiles to run unit, integration, and functional
tests as well as lint Python code for PEP8 compliance and Helm charts for
successful template rendering. All checks are gated by Zuul before a change can
be merged. For more information on executing these checks, refer to
project-specific documentation.
Third party CI tools, such as Jenkins, report results on Airship-in-a-Bottle
patches. These can be exposed using the "Toggle CI" button in the bottom
left-hand page of any gerrit change.
Pushing code
------------
Airship uses the `OpenDev gerrit`_ for code review. Refer to the `OpenStack
Contributing Guide`_ for a tutorial on submitting changes to Gerrit code
review.
.. _OpenDev gerrit: https://review.opendev.org
.. _OpenStack Contributing Guide: https://docs.openstack.org/horizon/latest/contributor/contributing.html
Next steps
----------
Upon pushing a change to gerrit, Zuul continuous integration will post job
results on your patch. Refer to the job output by clicking on the job itself to
determine if further action is required. If it's not clear why a job failed,
please reach out to a team member in IRC. We are happy to assist!
Assuming all continuous integration jobs succeed, Airship community members and
core developers will review your patch and provide feedback. Many patches are
submitted to Airship projects each day. If your patch does not receive feedback
for several days, please reach out using IRC or the Airship mailing list.
Merging code
------------
Like most OpenDev projects, Airship patches require two +2 code review votes
from core members to merge. Once you have addressed all outstanding feedback,
your change will be merged.
Beyond
------
Congratulations! After your first change merges, please keep up-to-date with
the team. We hold two weekly meetings for project and design discussion:
Our weekly #airshipit IRC meeting provides an opportunity to discuss project
operations.
Our weekly design call provides an opportunity for in-depth discussion of new
and existing Airship features.
For more information on the times of each meeting, refer to the `Airship
wiki`_.
.. _Airship wiki: https://wiki.openstack.org/wiki/Airship

Binary file not shown.

Before

Width:  |  Height:  |  Size: 234 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 375 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

View File

@ -1,210 +0,0 @@
Airship Treasuremap
===================
Airship is a collection of components that coordinate to form means of
configuring and deploying and maintaining
a `Kubernetes <https://kubernetes.io/>`__ environment using a
declarative set of `yaml <http://yaml.org/>`__ documents.
More specifically, the current focus of this project is the implementation of
OpenStack on Kubernetes (OOK).
.. image:: diagrams/architecture.png
Component Projects
==================
Pegleg
------
`Pegleg <https://opendev.org/airship/pegleg>`_ is a document aggregator
that provides early linting and validations via Deckhand, a document management
micro-service within Airship.
Shipyard
--------
`Shipyard <https://opendev.org/airship/shipyard>`_ is the directed acyclic
graph controller for Kubernetes and OpenStack control plane life cycle
management.
Shipyard provides the entrypoint for the following aspects of the control plane:
Designs and Secrets
^^^^^^^^^^^^^^^^^^^
Site designs, including the configuration of bare metal host nodes, network
design, operating systems, Kubernetes nodes, Armada manifests, Helm charts,
and any other descriptors that define the build out of a group of servers enter
the Airship via Shipyard. Secrets, such as passwords and certificates, use the same
mechanism.
The designs and secrets are stored in Airship's Deckhand, providing for version
history and secure storage among other document-based conveniences.
Actions
^^^^^^^
Interaction with the site's control plane is done via invocation of actions in
Shipyard. Each action is backed by a workflow implemented as a directed acyclic
graph (DAG) that runs using Apache Airflow. Shipyard provides a mechanism to
monitor and control the execution of the workflow.
Drydock
-------
`Drydock <https://opendev.org/airship/drydock>`_ is a provisioning orchestrator
for baremetal servers that translates a YAML-based declaritive site topology into a
physical undercloud that can be used for building out a enterprise Kubernetes cluster.
It uses plugins to leverage existing provisioning systems to build the servers allowing
integration with the provisioning system that best fits the goals and environment of a site.
Capabilities
^^^^^^^^^^^^
* Initial IPMI configuration for PXE booting new servers.
* Support for Canonical MAAS provisioning.
* Configuration of complex network topologies including bonding,
tagged VLANs and static routes
* Support for running behind a corporate proxy
* Extensible boot action system for placing files and SystemD
units on nodes for post-deployment execution
* Supports Keystone-based authentication and authorization
Deckhand
--------
`Deckhand <https://opendev.org/airship/deckhand>`_ is a document-based
configuration storage service built with auditability and validation in mind.
Core Responsibilities
^^^^^^^^^^^^^^^^^^^^^
* layering - helps reduce duplication in configuration by applying the notion
of inheritance to documents
* substitution - provides separation between secret data and other
configuration data for security purposes and reduces data duplication by
allowing common data to be defined once and substituted elsewhere dynamically
* revision history - maintains well-defined collections of documents within
immutable revisions that are meant to operate together, while providing the
ability to rollback to previous revisions
* validation - allows services to implement and register different kinds of
validations and report errors
* secret management - leverages existing OpenStack APIs -- namely
`Barbican`_ -- to reliably and securely store sensitive data
Armada
------
`Armada <https://opendev.org/airship/armada>`_ is a tool for managing multiple
Helm charts with dependencies by centralizing all configurations in a single
Armada YAML and providing life-cycle hooks for all Helm releases.
Core Responsibilities
^^^^^^^^^^^^^^^^^^^^^
* Multiple Chart Deployments and Upgrades driven by Armada Manifests
* Manage multiple chart dependencies using Chart Groups
* Enhancing base Helm functionality
* Supports Keystone-based authentication and authorization
Kubernetes
----------
`Kubernetes <https://github.com/kubernetes/kubernetes>`_ is an open source
system for managing containerized applications across multiple hosts, providing
basic mechanisms for deployment, maintenance, and scaling of applications.
Promenade
---------
`Promenade <https://opendev.org/airship/promenade>`_ is a tool for
bootstrapping a resilient, self-hosted Kubernetes cluster and managing its
life-cycle.
Bootstrapping begins by provisioning a single-node cluster with a complete,
configurable Airship infrastructure. After hosts are added to the cluster, the
original bootstrapping node can be re-provisioned to avoid subtle differences
that could result in future issues.
Promenade provides cluster resiliency against both node failures and full
cluster restarts. It does so by leveraging
`Helm <https://github.com/kubernetes/helm>`_ charts to manage core Kubernetes
assets directly on each host, to ensure their availability.
Helm
----
`Helm <https://github.com/kubernetes/helm>`_ is a package manager for Kubernetes.
It helps you define, install, and upgrade even the most complex Kubernetes
applications using Helm charts.
A chart is a collection of files that describe a related set of Kubernetes
resources. Helm wraps up each chart's deployment into a concrete release,
a tidy little box that is a collection of all the Kubernetes resources that
compose that service, and so you can interact with a collection of Kubernetes
resources that compose a release as a single unit, either to install, upgrade,
or remove.
At its core, the value that Helm brings to the table -- at least for us -- is
allowing us to templatize our experience with Kubernetes resources, providing
a standard interface for operators or high-level software orchestrators to
control the installation and life cycle of Kubernetes applications.
OpenStack-Helm
--------------
The `OpenStack-Helm <https://github.com/openstack/openstack-helm>`_ project
provides a framework to enable the deployment, maintenance, and upgrading of
loosely coupled OpenStack services and their dependencies individually or as
part of complex environments.
OpenStack-Helm is essentially a marriage of Kubernetes, Helm, and OpenStack,
and seeks to create Helm charts for each OpenStack service. These Helm charts
provide complete life cycle management for these OpenStack services.
Users of OpenStack-Helm either deploy all or individual OpenStack components
along with their required dependencies. It heavily borrows concepts from
Stackanetes and complex Helm application deployments. Ideally, at the end of
the day, this project is meant to be a collaborative project that brings
OpenStack applications into a cloud-native model.
Divingbell
----------
`Divingbell <https://opendev.org/airship/divingbell>`_ is a lightweight
solution for:
1. Bare metal configuration management for a few very targeted use cases
2. Bare metal package manager orchestration
What problems does it solve?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The needs identified for Divingbell were:
1. To plug gaps in day 1 tools (e.g., Drydock) for node configuration
2. To provide a day 2 solution for managing these configurations going forward
3. [Future] To provide a day 2 solution for system level host patching
Berth
-----
`Berth <https://opendev.org/airship/berth>`_ is a deliberately minimalist VM
runner for Kubernetes.
Process Flows
=============
.. image:: diagrams/genesis.png
.. image:: diagrams/deploy_site.png
.. toctree::
:maxdepth: 2
authoring_and_deployment
config_update_guide
troubleshooting_guide
seaworthy
airsloop
airskiff
development_guide
.. _Barbican: https://docs.openstack.org/barbican/latest/api/
.. _Helm Homepage: https://helm.sh/
.. _Kubernetes Homepage: https://kubernetes.io/
.. _Openstack: https://www.openstack.org/
.. _Openstack Helm: https://github.com/openstack/openstack-helm
.. _Treasuremap: https://opendev.org/airship/treasuremap
.. _yaml: http://yaml.org/

View File

@ -1,69 +0,0 @@
Seaworthy: Production-grade Airship
===================================
Airship Seaworthy is a multi-node site deployment reference
and continuous integration pipeline.
The site manifests are available at
`site/seaworthy <https://opendev.org/airship/treasuremap/src/branch/master/site/seaworthy>`__.
Pipeline
--------
Airship Seaworthy pipeline automates deployment flow documented in
`Site Authoring and Deployment Guide <https://airship-treasuremap.readthedocs.io/en/latest/authoring_and_deployment.html>`__.
The pipeline is implemented as Jenkins Pipeline (Groovy), see code for the pipeline at
`Jenkinsfile <https://opendev.org/airship/treasuremap/src/branch/master/tools/gate/seaworthy/Jenkinsfile>`__.
Versions
--------
The manifest overrides (`versions.yaml <https://opendev.org/airship/treasuremap/src/branch/master/global/software/config/versions.yaml>`__)
are setup to deploy OpenStack Ocata.
The versions are kept up to date via `updater.py <https://opendev.org/airship/treasuremap/src/branch/master/tools/updater.py>`__,
a utility that updates versions.yaml latest charts and (selected) images.
Due to the limited capacity of a test environment, only Ubuntu-based images are used at the moment.
The pipeline attempts to uplift and deploy latest versions on daily bases.
Hardware
--------
While HW configuration is flexible, Airship Seaworthy reference manifests
reflect full HA deployment, similar to what might be expected in production.
Reducing number of control/compute nodes will require site overrides
to align parts of the system such as Ceph replication, etcd, etc.
Airship Seaworthy site has 6 DELL R720xd bare-metal servers:
3 control, and 3 compute nodes.
See host profiles for the servers `here <https://opendev.org/airship/treasuremap/src/branch/master/site/seaworthy/profiles/host>`__.
Control (masters)
- cab23-r720-11
- cab23-r720-12
- cab23-r720-13
Compute (workers)
- cab23-r720-14
- cab23-r720-17
- cab23-r720-19
Network
-------
Physical (underlay) networks are described in Drydock site configuration
`here <https://opendev.org/airship/treasuremap/src/branch/master/site/seaworthy/networks/physical/networks.yaml>`__.
It defines OOB (iLO/IPMI), untagged PXE, and multiple tagged general use networks.
Calico overlay for k8s POD networking uses IPIP mesh.
BGP peering is supported but not enabled in this setup, see
`Calico chart <https://github.com/openstack/openstack-helm-infra/blob/master/calico>`__.

View File

@ -1,177 +0,0 @@
Troubleshooting Guide
=====================
This guide provides information on troubleshooting of an Airship
environment. Debugging of any software component starts with gathering
more information about the failure, so the intention of the document
is not to describe specific issues that one can encounter, but to provide
a generic set of instructions that a user can follow to find the
root cause of the problem.
For additional support you can contact the Airship team via
`IRC or mailing list <https://www.airshipit.org/community/>`__,
use `Airship bug tracker <https://storyboard.openstack.org/#!/project_group/Airship>`__
to search and create issues.
Configuring Airship CLI
-----------------------
Many commands from this guide use Airship CLI, this section describes
how to get it configured on your environment.
::
git clone https://opendev.org/airship/treasuremap
cd treasuremap/
# List available tags.
git tag --list
# Switch to the version your site is using.
git checkout {your-tag}
# Go back to a previous directory.
cd ..
# Run it without arguments to get a help message.
sudo ./treasuremap/tools/airship
Manifests Preparation
---------------------
When you do any configuration changes to the manifests, there are a few
commands that you can use to validate the changes without uploading them
to the Airship environment.
Run ``lint`` command for your site; it helps to catch the errors related
to documents duplication, broken references, etc.
Example:
::
sudo ./treasuremap/tools/airship pegleg site -r airship-treasuremap/ \
lint {site-name}
If you create configuration overrides or do changes to substitutions,
it is recommended to run ``render`` command this command merges the layers
and renders all substitutions. This allows finding what parameters are
passed to Helm as overrides for Charts' defaults.
Example:
::
# Saves the result into rendered.txt file.
sudo ./treasuremap/tools/airship pegleg site -r treasuremap/ \
render -o rendered.txt ${SITE}
Deployment Failure
------------------
During the deployment, it is important to identify a specific step
where it fails, there are two major deployment steps:
1. **Drydock build**: deploys Operating System.
2. **Armada build**: deploys Helm Charts.
After `Configuring Airship CLI`_, setup credentials for accessing
Shipyard; the password is stored in ``ucp_shipyard_keystone_password``
secret, you can find it in
``site/seaworthy/secrets/passphrases/ucp_shipyard_keystone_password.yaml``
configuration file of your site.
::
export OS_USERNAME=shipyard
export OS_PASSWORD={shipyard_password}
Now you can use the following commands to access Shipyard:
::
# Get all actions that were executed on you environment.
sudo ./treasuremap/tools/airship shipyard get actions
# Show all the steps within the action.
sudo ./treasuremap/tools/airship shipyard describe action/{action_id}
# Get a bit more details on the step.
sudo ./treasuremap/tools/airship shipyard describe step/{action_id}/armada_build
# Print the logs from the step.
sudo ./treasuremap/tools/airship shipyard logs step/{action_id}/armada_build
After the failed step is determined, you can access the logs of a specific
service (e.g., drydock-api/maas or armada-api) to get more information
on the failure, note that there may be multiple pods of a single service
running, you need to check all of them to find where the most recent
logs are available.
Example of accessing Armada API logs:
::
# Get all pods running on the cluster and find a name of the pod you are
# interested in.
kubectl get pods -o wide --all-namespaces
# See the logs of specific pod.
kubectl logs -n ucp -f --tail 200 armada-api-d5f757d5-6z6nv
In some cases you want to restart your pod, there is no dedicated command for
that in Kubernetes. However, you can delete the pod, it will be restarted
by Kubernetes to satisfy replication factor.
::
# Restart Armada API service.
kubectl delete pod -n ucp armada-api-d5f757d5-6z6nv
Ceph
----
Many stateful services in Airship rely on Ceph to function correctly.
For more information on Ceph debugging follow an official
`Ceph debugging guide <http://docs.ceph.com/docs/mimic/rados/troubleshooting/log-and-debug/>`__.
Although Ceph tolerates failures of multiple OSDs, it is important
to make sure that your Ceph cluster is healthy.
Example:
::
# Get a name of Ceph Monitor pod.
CEPH_MON=$(sudo kubectl get pods --all-namespaces -o=name | \
grep ceph-mon | sed -n 1p | sed 's|pod/||')
# Get the status of the Ceph cluster.
sudo kubectl exec -it -n ceph ${CEPH_MON} -- ceph -s
Cluster is in a helthy state when ``health`` parameter is set to ``HEALTH_OK``.
When the cluster is unhealthy, and some Placement Groups are reported to be in
degraded or down states, determine the problem by inspecting the logs of
Ceph OSD that is down using ``kubectl``.
::
# Get a name of Ceph Monitor pod.
CEPH_MON=$(sudo kubectl get pods --all-namespaces -o=name | \
grep ceph-mon | sed -n 1p | sed 's|pod/||')
# List a hierarchy of OSDs in the cluster to see what OSDs are down.
sudo kubectl exec -it -n ceph ${CEPH_MON} -- ceph osd tree
There are a few other commands that may be useful during the debugging:
::
# Get a name of Ceph Monitor pod.
CEPH_MON=$(sudo kubectl get pods --all-namespaces -o=name | \
grep ceph-mon | sed -n 1p | sed 's|pod/||')
# Get a detailed information on the status of every Placement Group.
sudo kubectl exec -it -n ceph ${CEPH_MON} -- ceph pg dump
# List allocated block devices.
sudo kubectl exec -it -n ceph ${CEPH_MON} -- rbd ls
# See what client uses the device.
sudo kubectl exec -it -n ceph ${CEPH_MON} -- rbd status \
kubernetes-dynamic-pvc-e71e65a9-3b99-11e9-bf31-e65b6238af01
# List all Ceph block devices mounted on a specific host.
mount | grep rbd

View File

@ -1,26 +0,0 @@
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: airship-target
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
signaling: false
assets:
- path: /etc/systemd/system/airship.target
type: unit
permissions: '444'
data: |
[Unit]
Description=Airshipt bootaction target
After=multi-user.target cloud-init.target
[Install]
WantedBy=graphical.target
data_pipeline:
- utf8_decode
...

View File

@ -1,47 +0,0 @@
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: apparmor-profiles
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
substitutions:
- src:
schema: pegleg/AppArmorProfile/v1
name: airship-default
path: .savePath
dest:
path: .assets[0].path
- src:
schema: pegleg/AppArmorProfile/v1
name: airship-default
path: .content
dest:
path: .assets[0].data
- src:
schema: pegleg/AppArmorProfile/v1
name: airship-apparmor-loader
path: .savePath
dest:
path: .assets[1].path
- src:
schema: pegleg/AppArmorProfile/v1
name: airship-apparmor-loader
path: .content
dest:
path: .assets[1].data
data:
signaling: false
assets:
- type: file
permissions: '600'
data_pipeline:
- utf8_decode
- type: file
permissions: '600'
data_pipeline:
- utf8_decode
...

View File

@ -1,23 +0,0 @@
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: nested-virt
labels:
name: nested-virt-global
application: 'drydock'
layeringDefinition:
abstract: false
layer: global
storagePolicy: 'cleartext'
data:
signaling: false
assets:
- path: /etc/modprobe.d/nested-virt.conf
type: file
permissions: '644'
data_pipeline:
- utf8_decode
data: |
options kvm-intel nested=y
...

View File

@ -1,34 +0,0 @@
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: promjoin-systemd-unit
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
labels:
name: promjoin-systemd-unit
application: 'drydock'
data:
signaling: false
assets:
- path: /etc/systemd/system/promjoin.service
type: unit
permissions: '444'
data: |
[Unit]
Description=Promenade Initialization Service
After=network-online.target local-fs.target cloud-init.target
ConditionPathExists=!/var/lib/prom.done
[Service]
Type=oneshot
ExecStart=/opt/promjoin.sh
[Install]
WantedBy=airship.target
data_pipeline:
- utf8_decode
...

View File

@ -1,31 +0,0 @@
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: seccomp-profiles
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
substitutions:
- src:
schema: pegleg/SeccompProfile/v1
name: seccomp-default
path: .savePath
dest:
path: .assets[0].path
- src:
schema: pegleg/SeccompProfile/v1
name: seccomp-default
path: .content
dest:
path: .assets[0].data
data:
signaling: false
assets:
- type: file
permissions: '600'
data_pipeline:
- utf8_decode
...

View File

@ -1,39 +0,0 @@
---
# The global deployment strategy assumes nodes are marked with node_tags
# of masters and workers.
schema: shipyard/DeploymentStrategy/v1
metadata:
schema: metadata/Document/v1
name: deployment-strategy
layeringDefinition:
abstract: false
layer: global
labels:
name: deployment-strategy-global
storagePolicy: cleartext
data:
groups:
- name: masters
critical: true
depends_on: []
selectors:
- node_names: []
node_labels: []
node_tags:
- masters
rack_names: []
success_criteria:
percent_successful_nodes: 100
- name: workers
critical: true
depends_on:
- masters
selectors:
- node_names: []
node_labels: []
node_tags:
- workers
rack_names: []
success_criteria:
percent_successful_nodes: 60
...

View File

@ -1,12 +0,0 @@
---
schema: deckhand/LayeringPolicy/v1
metadata:
schema: metadata/Control/v1
name: layering-policy
data:
layerOrder:
- global
- type
- site
- cicd # overrides for pipeline automation
...

View File

@ -1,150 +0,0 @@
---
schema: promenade/Genesis/v1
metadata:
schema: metadata/Document/v1
name: genesis-global
layeringDefinition:
abstract: true
layer: global
labels:
name: genesis-global
storagePolicy: cleartext
substitutions:
# Software versions for bootstrapping phase
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.ucp.armada.api
dest:
path: .images.armada
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.ucp.armada.tiller
dest:
path: .images.helm.tiller
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.apiserver.apiserver
dest:
path: .images.kubernetes.apiserver
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.controller-manager.controller_manager
dest:
path: .images.kubernetes.controller-manager
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.etcd.etcd
dest:
path: .images.kubernetes.etcd
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.scheduler.scheduler
dest:
path: .images.kubernetes.scheduler
# Site-specific configuration
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .genesis.hostname
dest:
path: .hostname
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .genesis.ip
dest:
path: .ip
# Command prefix
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.service_cidr
dest:
path: .apiserver.arguments[2]
pattern: SERVICE_CIDR
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.service_node_port_range
dest:
path: .apiserver.arguments[3]
pattern: SERVICE_NODE_PORT_RANGE
# Set etcd encryption policy
- src:
schema: promenade/EncryptionPolicy/v1
name: encryption-policy
path: .etcd
dest:
path: .apiserver.encryption
data:
apiserver:
arguments:
- --authorization-mode=Node,RBAC
- --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,NodeRestriction,EventRateLimit
- --service-cluster-ip-range=SERVICE_CIDR
- --service-node-port-range=SERVICE_NODE_PORT_RANGE
- --endpoint-reconciler-type=lease
- --feature-gates=PodShareProcessNamespace=true
- --v=3
- --admission-control-config-file=/etc/kubernetes/apiserver/acconfig.yaml
- --experimental-encryption-provider-config=/etc/kubernetes/apiserver/encryption_provider.yaml
- --requestheader-allowed-names='aggregator'
armada:
target_manifest: cluster-bootstrap
haproxy:
run_as_user: 65534
labels:
dynamic:
- beta.kubernetes.io/fluentd-ds-ready=true
- calico-etcd=enabled
- ceph-mds=enabled
- ceph-mon=enabled
- ceph-osd=enabled
- ceph-rgw=enabled
- ceph-mgr=enabled
- tenant-ceph-control-plane=enabled
- tenant-ceph-mon=enabled
- tenant-ceph-rgw=enabled
- tenant-ceph-mgr=enabled
- kube-dns=enabled
- kube-ingress=enabled
- kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled
- kubernetes-scheduler=enabled
- promenade-genesis=enabled
- ucp-control-plane=enabled
- maas-rack=enabled
- maas-region=enabled
- node-exporter=enabled
files:
- path: /var/lib/anchor/calico-etcd-bootstrap
content: "# placeholder for triggering calico etcd bootstrapping\n# this file will be deleted"
mode: 0644
- path: /etc/genesis/apiserver/acconfig.yaml
mode: 0444
content: |
kind: AdmissionConfiguration
apiVersion: apiserver.k8s.io/v1alpha1
plugins:
- name: EventRateLimit
path: eventconfig.yaml
- path: /etc/genesis/apiserver/eventconfig.yaml
mode: 0444
content: |
kind: Configuration
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
limits:
- type: Server
qps: 1000
burst: 10000

View File

@ -1,19 +0,0 @@
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: DELL_HP_Generic
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
vendor: Dell
generation: '8'
hw_version: '3'
bios_version: '2.2.3'
boot_mode: bios
bootstrap_protocol: pxe
pxe_interface: 0
device_aliases: {}
...

View File

@ -1,116 +0,0 @@
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: cp-global
storagePolicy: cleartext
labels:
hosttype: cp-global
layeringDefinition:
abstract: true
layer: global
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
data:
oob:
type: 'ipmi'
network: 'oob'
account: 'root'
storage:
physical_devices:
sda:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>100g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'xenial'
kernel: 'hwe-16.04'
kernel_params:
kernel_package: 'linux-image-4.15.0-46-generic'
metadata:
owner_data:
control-plane: enabled
ucp-control-plane: enabled
openstack-control-plane: enabled
openstack-heat: enabled
openstack-keystone: enabled
openstack-rabbitmq: enabled
openstack-dns-helper: enabled
openstack-mariadb: enabled
openstack-nova-control: enabled
openstack-etcd: enabled
openstack-mistral: enabled
openstack-memcached: enabled
openstack-glance: enabled
openstack-horizon: enabled
openstack-cinder-control: enabled
openstack-cinder-volume: control
openstack-neutron: enabled
openvswitch: enabled
ucp-barbican: enabled
ceph-mon: enabled
ceph-mgr: enabled
ceph-osd: enabled
ceph-mds: enabled
ceph-rgw: enabled
tenant-ceph-control-plane: enabled
tenant-ceph-mon: enabled
tenant-ceph-rgw: enabled
tenant-ceph-mgr: enabled
maas-rack: enabled
maas-region: enabled
kube-dns: enabled
kubernetes-apiserver: enabled
kubernetes-controller-manager: enabled
kubernetes-etcd: enabled
kubernetes-scheduler: enabled
tiller-helm: enabled
kube-etcd: enabled
calico-policy: enabled
calico-node: enabled
calico-etcd: enabled
ucp-armada: enabled
ucp-drydock: enabled
ucp-deckhand: enabled
ucp-shipyard: enabled
IAM: enabled
ucp-promenade: enabled
prometheus-server: enabled
prometheus-client: enabled
fluentd: enabled
fluentbit: enabled
influxdb: enabled
kibana: enabled
elasticsearch-client: enabled
elasticsearch-master: enabled
elasticsearch-data: enabled
postgresql: enabled
kube-ingress: enabled
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
...

View File

@ -1,65 +0,0 @@
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: dp-global
labels:
hosttype: dp-global
layeringDefinition:
abstract: true
layer: global
storagePolicy: cleartext
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
data:
oob:
type: 'ipmi'
network: 'oob'
account: 'root'
storage:
physical_devices:
sda:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>100g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'xenial'
kernel: 'hwe-16.04'
kernel_params:
kernel_package: 'linux-image-4.15.0-46-generic'
metadata:
owner_data:
openstack-nova-compute: enabled
tenant-ceph-osd: enabled
openvswitch: enabled
contrail-vrouter: kernel
openstack-libvirt: kernel
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
fluentbit: enabled
...

View File

@ -1,200 +0,0 @@
---
schema: promenade/HostSystem/v1
metadata:
schema: metadata/Document/v1
name: host-system
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.hyperkube
dest:
path: .files[0].docker_image
# Initial CoreDNS image (used during node Genesis and node join)
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.coredns.coredns
dest:
path: .images.coredns
# Initial CoreDNS image (used during node Genesis and node join)
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.haproxy.haproxy
dest:
path: .images.haproxy
# Operational tools
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.ucp.armada.helm
dest:
path: .images.helm.helm
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.hyperkube
dest:
path: .images.kubernetes.hyperkube
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.ucp.promenade.monitoring_image
dest:
path: .images.monitoring_image
# System packages
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.named.docker
dest:
path: .packages.common.required.docker
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.named.socat
dest:
path: .packages.common.required.socat
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.unnamed
dest:
path: .packages.common.additional
# Docker authorization
- src:
schema: deckhand/Passphrase/v1
path: .
name: private_docker_key
dest:
path: .files[4].content
pattern: DH_SUB_PRIVATE_DOCKER_KEY
data:
files:
- path: /opt/kubernetes/bin/hyperkube
file_path: /hyperkube
mode: 0555
- path: /opt/kubernetes/bin/kubelet
symlink: /opt/kubernetes/bin/hyperkube
mode: 0555
- path: /usr/local/bin/kubectl
symlink: /opt/kubernetes/bin/hyperkube
mode: 0555
- path: /etc/logrotate.d/json-logrotate
mode: 0444
content: |-
/var/lib/docker/containers/*/*-json.log
{
compress
copytruncate
create 0644 root root
weekly
dateext
dateformat -%Y%m%d-%s
maxsize 100M
missingok
notifempty
su root root
rotate 1
}
- path: /var/lib/kubelet/.dockercfg
mode: 0400
# NOTE: Sample key, this repo does not exist
content: |-
{
"https://private.registry.com": {
"auth": "DH_SUB_PRIVATE_DOCKER_KEY"
}
}
# Make sure that promjoin script does not run on every boot,
# otherwise it may downgrade current versions of Docker & Kubelet.
- path: /var/lib/prom.done
mode: 0444
content: ""
- path: /etc/profile.d/kubeconfig.sh
mode: 0744
content: |-
export KUBECONFIG=/etc/kubernetes/admin/kubeconfig.yaml
packages:
common:
repositories:
- deb https://download.docker.com/linux/ubuntu/ xenial stable
keys:
- |-
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
/nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
=0YYh
-----END PGP PUBLIC KEY BLOCK-----
...

View File

@ -1,80 +0,0 @@
---
schema: 'pegleg/AppArmorProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: airship-apparmor-loader
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
savePath: /etc/apparmor.d/profile_airship_loader
content: |
#include <tunables/global>
profile airship-apparmor-loader flags=(attach_disconnected,mediate_deleted) {
#include <abstractions/base>
network inet tcp,
network inet udp,
network inet icmp,
deny network raw,
deny network packet,
file,
umount,
deny /bin/** wl,
deny /boot/** wl,
deny /dev/** wl,
deny /etc/** wl,
deny /home/** wl,
deny /lib/** wl,
deny /lib64/** wl,
deny /media/** wl,
deny /mnt/** wl,
deny /opt/** wl,
deny /proc/** wl,
deny /root/** wl,
deny /sbin/** wl,
deny /srv/** wl,
deny /tmp/** wl,
deny /sys/** wl,
deny /usr/** wl,
audit /etc/apparmor.d/airship_* rwl,
audit /** w,
deny /bin/dash mrwklx,
deny /bin/sh mrwklx,
deny /usr/bin/top mrwklx,
capability chown,
# Allow Apparmor profiles to be loaded
capability mac_admin,
capability dac_override,
capability setuid,
capability setgid,
deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir)
# deny write to files not in /proc/<number>/** or /proc/sys/**
deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w,
deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel)
deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/
deny @{PROC}/sysrq-trigger rwklx,
deny @{PROC}/mem rwklx,
deny @{PROC}/kmem rwklx,
deny @{PROC}/kcore rwklx,
deny mount,
deny /sys/[^f]*/** wklx,
deny /sys/f[^s]*/** wklx,
deny /sys/fs/[^c]*/** wklx,
deny /sys/fs/c[^g]*/** wklx,
deny /sys/fs/cg[^r]*/** wklx,
deny /sys/firmware/** rwklx,
deny /sys/kernel/security/** rwklx,
}

View File

@ -1,78 +0,0 @@
---
schema: 'pegleg/AppArmorProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: airship-default
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
savePath: /etc/apparmor.d/profile_airship_default
content: |
#include <tunables/global>
profile airship-default flags=(attach_disconnected,mediate_deleted) {
#include <abstractions/base>
network inet tcp,
network inet udp,
network inet icmp,
deny network raw,
deny network packet,
file,
umount,
deny /bin/** wl,
deny /boot/** wl,
deny /dev/** wl,
deny /etc/** wl,
deny /home/** wl,
deny /lib/** wl,
deny /lib64/** wl,
deny /media/** wl,
deny /mnt/** wl,
deny /opt/** wl,
deny /proc/** wl,
deny /root/** wl,
deny /sbin/** wl,
deny /srv/** wl,
deny /tmp/** wl,
deny /sys/** wl,
deny /usr/** wl,
audit /** w,
deny /bin/dash mrwklx,
deny /bin/sh mrwklx,
deny /usr/bin/top mrwklx,
capability chown,
capability dac_override,
capability setuid,
capability setgid,
capability net_bind_service,
deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir)
# deny write to files not in /proc/<number>/** or /proc/sys/**
deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w,
deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel)
deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/
deny @{PROC}/sysrq-trigger rwklx,
deny @{PROC}/mem rwklx,
deny @{PROC}/kmem rwklx,
deny @{PROC}/kcore rwklx,
deny mount,
deny /sys/[^f]*/** wklx,
deny /sys/f[^s]*/** wklx,
deny /sys/fs/[^c]*/** wklx,
deny /sys/fs/c[^g]*/** wklx,
deny /sys/fs/cg[^r]*/** wklx,
deny /sys/firmware/** rwklx,
deny /sys/kernel/security/** rwklx,
}

View File

@ -1,787 +0,0 @@
---
# The data content of this file is referred from the Moby project as
# mentioned in the link below:
# https://github.com/moby/moby/blob/master/profiles/seccomp/default.json
schema: 'pegleg/SeccompProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: seccomp-default
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
# Path for seccomp profile root directory.
seccompDirPath: /var/lib/kubelet/seccomp
# Path to save seccomp profile as file.
# This should be same as seccompDirPath with file name.
savePath: /var/lib/kubelet/seccomp/seccomp_default
# Content of default seccomp profile file.
content: |
{
"defaultAction": "SCMP_ACT_ERRNO",
"archMap": [
{
"architecture": "SCMP_ARCH_X86_64",
"subArchitectures": [
"SCMP_ARCH_X86",
"SCMP_ARCH_X32"
]
},
{
"architecture": "SCMP_ARCH_AARCH64",
"subArchitectures": [
"SCMP_ARCH_ARM"
]
},
{
"architecture": "SCMP_ARCH_MIPS64",
"subArchitectures": [
"SCMP_ARCH_MIPS",
"SCMP_ARCH_MIPS64N32"
]
},
{
"architecture": "SCMP_ARCH_MIPS64N32",
"subArchitectures": [
"SCMP_ARCH_MIPS",
"SCMP_ARCH_MIPS64"
]
},
{
"architecture": "SCMP_ARCH_MIPSEL64",
"subArchitectures": [
"SCMP_ARCH_MIPSEL",
"SCMP_ARCH_MIPSEL64N32"
]
},
{
"architecture": "SCMP_ARCH_MIPSEL64N32",
"subArchitectures": [
"SCMP_ARCH_MIPSEL",
"SCMP_ARCH_MIPSEL64"
]
},
{
"architecture": "SCMP_ARCH_S390X",
"subArchitectures": [
"SCMP_ARCH_S390"
]
}
],
"syscalls": [
{
"names": [
"accept",
"accept4",
"access",
"adjtimex",
"alarm",
"bind",
"brk",
"capget",
"capset",
"chdir",
"chmod",
"chown",
"chown32",
"clock_getres",
"clock_gettime",
"clock_nanosleep",
"close",
"connect",
"copy_file_range",
"creat",
"dup",
"dup2",
"dup3",
"epoll_create",
"epoll_create1",
"epoll_ctl",
"epoll_ctl_old",
"epoll_pwait",
"epoll_wait",
"epoll_wait_old",
"eventfd",
"eventfd2",
"execve",
"execveat",
"exit",
"exit_group",
"faccessat",
"fadvise64",
"fadvise64_64",
"fallocate",
"fanotify_mark",
"fchdir",
"fchmod",
"fchmodat",
"fchown",
"fchown32",
"fchownat",
"fcntl",
"fcntl64",
"fdatasync",
"fgetxattr",
"flistxattr",
"flock",
"fork",
"fremovexattr",
"fsetxattr",
"fstat",
"fstat64",
"fstatat64",
"fstatfs",
"fstatfs64",
"fsync",
"ftruncate",
"ftruncate64",
"futex",
"futimesat",
"getcpu",
"getcwd",
"getdents",
"getdents64",
"getegid",
"getegid32",
"geteuid",
"geteuid32",
"getgid",
"getgid32",
"getgroups",
"getgroups32",
"getitimer",
"getpeername",
"getpgid",
"getpgrp",
"getpid",
"getppid",
"getpriority",
"getrandom",
"getresgid",
"getresgid32",
"getresuid",
"getresuid32",
"getrlimit",
"get_robust_list",
"getrusage",
"getsid",
"getsockname",
"getsockopt",
"get_thread_area",
"gettid",
"gettimeofday",
"getuid",
"getuid32",
"getxattr",
"inotify_add_watch",
"inotify_init",
"inotify_init1",
"inotify_rm_watch",
"io_cancel",
"ioctl",
"io_destroy",
"io_getevents",
"ioprio_get",
"ioprio_set",
"io_setup",
"io_submit",
"ipc",
"kill",
"lchown",
"lchown32",
"lgetxattr",
"link",
"linkat",
"listen",
"listxattr",
"llistxattr",
"_llseek",
"lremovexattr",
"lseek",
"lsetxattr",
"lstat",
"lstat64",
"madvise",
"memfd_create",
"mincore",
"mkdir",
"mkdirat",
"mknod",
"mknodat",
"mlock",
"mlock2",
"mlockall",
"mmap",
"mmap2",
"mprotect",
"mq_getsetattr",
"mq_notify",
"mq_open",
"mq_timedreceive",
"mq_timedsend",
"mq_unlink",
"mremap",
"msgctl",
"msgget",
"msgrcv",
"msgsnd",
"msync",
"munlock",
"munlockall",
"munmap",
"nanosleep",
"newfstatat",
"_newselect",
"open",
"openat",
"pause",
"pipe",
"pipe2",
"poll",
"ppoll",
"prctl",
"pread64",
"preadv",
"preadv2",
"prlimit64",
"pselect6",
"pwrite64",
"pwritev",
"pwritev2",
"read",
"readahead",
"readlink",
"readlinkat",
"readv",
"recv",
"recvfrom",
"recvmmsg",
"recvmsg",
"remap_file_pages",
"removexattr",
"rename",
"renameat",
"renameat2",
"restart_syscall",
"rmdir",
"rt_sigaction",
"rt_sigpending",
"rt_sigprocmask",
"rt_sigqueueinfo",
"rt_sigreturn",
"rt_sigsuspend",
"rt_sigtimedwait",
"rt_tgsigqueueinfo",
"sched_getaffinity",
"sched_getattr",
"sched_getparam",
"sched_get_priority_max",
"sched_get_priority_min",
"sched_getscheduler",
"sched_rr_get_interval",
"sched_setaffinity",
"sched_setattr",
"sched_setparam",
"sched_setscheduler",
"sched_yield",
"seccomp",
"select",
"semctl",
"semget",
"semop",
"semtimedop",
"send",
"sendfile",
"sendfile64",
"sendmmsg",
"sendmsg",
"sendto",
"setfsgid",
"setfsgid32",
"setfsuid",
"setfsuid32",
"setgid",
"setgid32",
"setgroups",
"setgroups32",
"setitimer",
"setpgid",
"setpriority",
"setregid",
"setregid32",
"setresgid",
"setresgid32",
"setresuid",
"setresuid32",
"setreuid",
"setreuid32",
"setrlimit",
"set_robust_list",
"setsid",
"setsockopt",
"set_thread_area",
"set_tid_address",
"setuid",
"setuid32",
"setxattr",
"shmat",
"shmctl",
"shmdt",
"shmget",
"shutdown",
"sigaltstack",
"signalfd",
"signalfd4",
"sigreturn",
"socket",
"socketcall",
"socketpair",
"splice",
"stat",
"stat64",
"statfs",
"statfs64",
"statx",
"symlink",
"symlinkat",
"sync",
"sync_file_range",
"syncfs",
"sysinfo",
"syslog",
"tee",
"tgkill",
"time",
"timer_create",
"timer_delete",
"timerfd_create",
"timerfd_gettime",
"timerfd_settime",
"timer_getoverrun",
"timer_gettime",
"timer_settime",
"times",
"tkill",
"truncate",
"truncate64",
"ugetrlimit",
"umask",
"uname",
"unlink",
"unlinkat",
"utime",
"utimensat",
"utimes",
"vfork",
"vmsplice",
"wait4",
"waitid",
"waitpid",
"write",
"writev"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {},
"excludes": {}
},
{
"names": [
"personality"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 0,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
],
"comment": "",
"includes": {},
"excludes": {}
},
{
"names": [
"personality"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 8,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
],
"comment": "",
"includes": {},
"excludes": {}
},
{
"names": [
"personality"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 131072,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
],
"comment": "",
"includes": {},
"excludes": {}
},
{
"names": [
"personality"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 131080,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
],
"comment": "",
"includes": {},
"excludes": {}
},
{
"names": [
"personality"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 4294967295,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
],
"comment": "",
"includes": {},
"excludes": {}
},
{
"names": [
"sync_file_range2"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"arches": [
"ppc64le"
]
},
"excludes": {}
},
{
"names": [
"arm_fadvise64_64",
"arm_sync_file_range",
"sync_file_range2",
"breakpoint",
"cacheflush",
"set_tls"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"arches": [
"arm",
"arm64"
]
},
"excludes": {}
},
{
"names": [
"arch_prctl"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"arches": [
"amd64",
"x32"
]
},
"excludes": {}
},
{
"names": [
"modify_ldt"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"arches": [
"amd64",
"x32",
"x86"
]
},
"excludes": {}
},
{
"names": [
"s390_pci_mmio_read",
"s390_pci_mmio_write",
"s390_runtime_instr"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"arches": [
"s390",
"s390x"
]
},
"excludes": {}
},
{
"names": [
"open_by_handle_at"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_DAC_READ_SEARCH"
]
},
"excludes": {}
},
{
"names": [
"bpf",
"clone",
"fanotify_init",
"lookup_dcookie",
"mount",
"name_to_handle_at",
"perf_event_open",
"quotactl",
"setdomainname",
"sethostname",
"setns",
"umount",
"umount2",
"unshare"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_ADMIN"
]
},
"excludes": {}
},
{
"names": [
"clone"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 2080505856,
"valueTwo": 0,
"op": "SCMP_CMP_MASKED_EQ"
}
],
"comment": "",
"includes": {},
"excludes": {
"caps": [
"CAP_SYS_ADMIN"
],
"arches": [
"s390",
"s390x"
]
}
},
{
"names": [
"clone"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 1,
"value": 2080505856,
"valueTwo": 0,
"op": "SCMP_CMP_MASKED_EQ"
}
],
"comment": "s390 parameter ordering for clone is different",
"includes": {
"arches": [
"s390",
"s390x"
]
},
"excludes": {
"caps": [
"CAP_SYS_ADMIN"
]
}
},
{
"names": [
"reboot"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_BOOT"
]
},
"excludes": {}
},
{
"names": [
"chroot"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_CHROOT"
]
},
"excludes": {}
},
{
"names": [
"delete_module",
"init_module",
"finit_module",
"query_module"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_MODULE"
]
},
"excludes": {}
},
{
"names": [
"acct"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_PACCT"
]
},
"excludes": {}
},
{
"names": [
"kcmp",
"process_vm_readv",
"process_vm_writev",
"ptrace"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_PTRACE"
]
},
"excludes": {}
},
{
"names": [
"iopl",
"ioperm"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_RAWIO"
]
},
"excludes": {}
},
{
"names": [
"settimeofday",
"stime",
"clock_settime"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_TIME"
]
},
"excludes": {}
},
{
"names": [
"vhangup"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_TTY_CONFIG"
]
},
"excludes": {}
},
{
"names": [
"get_mempolicy",
"mbind",
"set_mempolicy"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_NICE"
]
},
"excludes": {}
}
]
}

View File

@ -1,12 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: armada/Chart/v1
labels:
application: armada
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
additionalProperties: true
...

View File

@ -1,12 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: armada/ChartGroup/v1
labels:
application: armada
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
additionalProperties: true
...

View File

@ -1,12 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: armada/Manifest/v1
labels:
application: armada
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
additionalProperties: true
...

View File

@ -1,161 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: drydock/BaremetalNode/v1
labels:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
properties:
addressing:
type: 'array'
items:
type: 'object'
properties:
address:
type: 'string'
network:
type: 'string'
oob:
type: 'object'
properties:
type:
type: 'string'
network:
type: 'string'
account:
type: 'string'
credetial:
type: 'string'
additionalProperties: true
storage:
type: 'object'
properties:
physical_devices:
type: 'object'
additionalProperties:
type: 'object'
properties:
labels:
type: 'object'
additionalProperties:
type: 'string'
volume_group:
type: 'string'
partitions:
type: 'array'
items:
type: 'object'
properties:
name:
type: 'string'
size:
type: 'string'
part_uuid:
type: 'string'
volume_group:
type: 'string'
labels:
type: 'object'
additionalProperties:
type: 'string'
bootable:
type: 'boolean'
filesystem:
type: 'object'
properties:
mountpoint:
type: 'string'
fstype:
type: 'string'
mount_options:
type: 'string'
fs_uuid:
type: 'string'
fs_label:
type: 'string'
additionalProperties: false
additionalProperties: false
volume_groups:
type: 'object'
additionalProperties:
type: 'object'
properties:
vg_uuid:
type: 'string'
logical_volumes:
type: 'array'
items:
type: 'object'
properties:
name:
type: 'string'
lv_uuid:
type: 'string'
size:
type: 'string'
filesystem:
type: 'object'
properties:
mountpoint:
type: 'string'
fstype:
type: 'string'
mount_options:
type: 'string'
fs_uuid:
type: 'string'
fs_label:
type: 'string'
platform:
type: 'object'
properties:
image:
type: 'string'
kernel:
type: 'string'
kernel_params:
type: 'object'
additionalProperties: true
additionalProperties: false
metadata:
type: 'object'
properties:
tags:
type: 'array'
items:
type: 'string'
owner_data:
type: 'object'
additionalProperties:
type: 'string'
rack:
type: 'string'
boot_mac:
type: 'string'
additionalProperties: false
host_profile:
type: 'string'
hardware_profile:
type: 'string'
primary_network:
type: 'string'
interfaces:
type: 'object'
additionalProperties:
type: 'object'
properties:
device_link:
type: 'string'
slaves:
type: 'array'
items:
type: 'string'
networks:
type: 'array'
items:
type: 'string'
additionalProperties: false
...

View File

@ -1,93 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: drydock/BootAction/v1
labels:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
additionalProperties: false
properties:
signaling:
type: 'boolean'
assets:
type: 'array'
items:
type: 'object'
additionalProperties: false
properties:
path:
type: 'string'
pattern: '^/.+'
location:
type: 'string'
type:
type: 'string'
enum:
- 'unit'
- 'file'
- 'pkg_list'
data:
type: 'string'
location_pipeline:
type: 'array'
items:
type: 'string'
enum:
- 'template'
data_pipeline:
type: 'array'
items:
type: 'string'
enum:
- 'base64_encode'
- 'template'
- 'base64_decode'
- 'utf8_encode'
- 'utf8_decode'
permissions:
type: 'string'
pattern: '\d{3}'
required:
- 'type'
node_filter:
type: 'object'
additionalProperties: false
properties:
filter_set_type:
type: 'string'
enum:
- 'intersection'
- 'union'
filter_set:
type: 'array'
items:
type: 'object'
additionalProperties: false
properties:
filter_type:
type: 'string'
enum:
- 'intersection'
- 'union'
node_names:
type: 'array'
items:
type: 'string'
node_tags:
type: 'array'
items:
type: 'string'
node_labels:
type: 'object'
additionalProperties: true
rack_names:
type: 'array'
items:
type: 'string'
rack_labels:
type: 'object'
additionalProperties: true
...

View File

@ -1,49 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: drydock/HardwareProfile/v1
labels:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
properties:
vendor:
type: 'string'
generation:
type: 'string'
hw_version:
type: 'string'
bios_version:
type: 'string'
boot_mode:
type: 'string'
enum:
- 'bios'
- 'uefi'
bootstrap_protocol:
type: 'string'
enum:
- 'pxe'
- 'usb'
- 'hdd'
pxe_interface:
type: 'number'
device_aliases:
type: 'object'
additionalProperties: true
cpu_sets:
type: 'object'
additionalProperties:
type: 'string'
hugepages:
type: 'object'
additionalProperties:
type: 'object'
propertes:
size:
type: 'string'
count:
type: 'number'
additionalProperties: false

View File

@ -1,159 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: drydock/HostProfile/v1
labels:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
properties:
oob:
type: 'object'
properties:
type:
type: 'string'
network:
type: 'string'
account:
type: 'string'
credetial:
type: 'string'
additionalProperties: true
storage:
type: 'object'
properties:
physical_devices:
type: 'object'
additionalProperties:
type: 'object'
properties:
labels:
type: 'object'
additionalProperties:
type: 'string'
volume_group:
type: 'string'
partitions:
type: 'array'
items:
type: 'object'
properties:
name:
type: 'string'
size:
type: 'string'
part_uuid:
type: 'string'
volume_group:
type: 'string'
labels:
type: 'object'
additionalProperties:
type: 'string'
bootable:
type: 'boolean'
filesystem:
type: 'object'
properties:
mountpoint:
type: 'string'
fstype:
type: 'string'
mount_options:
type: 'string'
fs_uuid:
type: 'string'
fs_label:
type: 'string'
additionalProperties: false
additionalProperties: false
volume_groups:
type: 'object'
additionalProperties:
type: 'object'
properties:
vg_uuid:
type: 'string'
logical_volumes:
type: 'array'
items:
type: 'object'
properties:
name:
type: 'string'
lv_uuid:
type: 'string'
size:
type: 'string'
filesystem:
type: 'object'
properties:
mountpoint:
type: 'string'
fstype:
type: 'string'
mount_options:
type: 'string'
fs_uuid:
type: 'string'
fs_label:
type: 'string'
platform:
type: 'object'
properties:
image:
type: 'string'
kernel:
type: 'string'
kernel_params:
type: 'object'
additionalProperties: true
additionalProperties: false
metadata:
type: 'object'
properties:
tags:
type: 'array'
items:
type: 'string'
owner_data:
type: 'object'
additionalProperties:
type: 'string'
rack:
type: 'string'
boot_mac:
type: 'string'
additionalProperties: false
host_profile:
type: 'string'
hardware_profile:
type: 'string'
primary_network:
type: 'string'
interfaces:
type: 'object'
additionalProperties:
type: 'object'
properties:
device_link:
type: 'string'
slaves:
type: 'array'
items:
type: 'string'
networks:
type: 'array'
items:
type: 'string'
sriov:
type: 'object'
properties:
vf_count:
type: 'number'
trustmode:
type: 'boolean'
additionalProperties: false
...

View File

@ -1,70 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: drydock/Network/v1
labels:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
properties:
cidr:
type: 'string'
ranges:
type: 'array'
items:
type: 'object'
properties:
type:
type: 'string'
start:
type: 'string'
format: 'ipv4'
end:
type: 'string'
format: 'ipv4'
additionalProperties: false
dns:
type: 'object'
properties:
domain:
type: 'string'
servers:
type: 'string'
additionalProperties: false
dhcp_relay:
type: 'object'
properties:
self_ip:
type: 'string'
format: 'ipv4'
upstream_target:
type: 'string'
format: 'ipv4'
additionalProperties: false
mtu:
type: 'number'
vlan:
type: 'string'
routedomain:
type: 'string'
routes:
type: 'array'
items:
type: 'object'
properties:
subnet:
type: 'string'
gateway:
type: 'string'
format: 'ipv4'
metric:
type: 'number'
routedomain:
type: 'string'
additionalProperties: false
labels:
type: 'object'
additionalProperties: true
additionalProperties: false

View File

@ -1,47 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: drydock/NetworkLink/v1
labels:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
properties:
bonding:
type: 'object'
properties:
mode:
type: 'string'
hash:
type: 'string'
peer_rate:
type: 'string'
mon_rate:
type: 'number'
up_delay:
type: 'number'
down_delay:
type: 'number'
additionalProperties: false
mtu:
type: 'number'
linkspeed:
type: 'string'
trunking:
type: 'object'
properties:
mode:
type: 'string'
default_network:
type: 'string'
additionalProperties: false
allowed_networks:
type: 'array'
items:
type: 'string'
labels:
type: 'object'
additionalProperties: true
additionalProperties: false

View File

@ -1,35 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: drydock/Rack/v1
labels:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
properties:
tor_switches:
type: 'object'
properties:
mgmt_ip:
type: 'string'
format: 'ipv4'
sdn_api_uri:
type: 'string'
format: 'uri'
location:
type: 'object'
properties:
clli:
type: 'string'
grid:
type: 'string'
local_networks:
type: 'array'
items:
type: 'string'
labels:
type: 'object'
additionalProperties: true
additionalProperties: false

View File

@ -1,71 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: drydock/Region/v1
labels:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
properties:
tag_definitions:
type: 'array'
items:
type: 'object'
properties:
tag:
type: 'string'
definition_type:
type: 'string'
enum:
- 'lshw_xpath'
definition:
type: 'string'
additionalProperties: false
authorized_keys:
type: 'array'
items:
type: 'string'
repositories:
# top level is class (e.g. apt, rpm)
type: 'object'
properties:
remove_unlisted:
type: 'boolean'
additionalPropties:
type: 'object'
properties:
repo_type:
type: 'string'
pattern: 'apt|rpm'
url:
type: 'string'
distributions:
type: 'array'
items:
type: 'string'
subrepos:
type: 'array'
items:
type: 'string'
components:
type: 'array'
items:
type: 'string'
gpgkey:
type: 'string'
arches:
type: 'array'
items:
type: 'string'
options:
type: 'object'
additionalProperties:
type: 'string'
additionalProperties: false
required:
- 'repo_type'
- 'url'
- 'arches'
additionalProperties: false

View File

@ -1,645 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: pegleg/AccountCatalogue/v1
data:
$schema: 'http://json-schema.org/schema#'
type: object
properties:
ucp:
type: object
properties:
postgres:
type: object
properties:
admin:
type: object
properties:
username:
type: string
oslo_db:
type: object
properties:
admin:
type: object
properties:
username:
type: string
oslo_messaging:
type: object
properties:
admin:
type: object
properties:
username:
type: string
keystone:
type: object
properties:
admin:
type: object
properties:
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
oslo_messaging:
type: object
properties:
username:
type: string
oslo_db:
type: object
properties:
username:
type: string
database:
type: string
promenade:
type: object
properties:
keystone:
type: object
properties:
region_name:
type: string
role:
type: string
project_name:
type: string
project_domain_name:
type: string
user_domain_name:
type: string
username:
type: string
drydock:
type: object
properties:
keystone:
type: object
properties:
region_name:
type: string
role:
type: string
project_name:
type: string
project_domain_name:
type: string
user_domain_name:
type: string
username:
type: string
postgres:
type: object
properties:
username:
type: string
database:
type: string
shipyard:
type: object
properties:
keystone:
type: object
properties:
region_name:
type: string
role:
type: string
project_name:
type: string
project_domain_name:
type: string
user_domain_name:
type: string
username:
type: string
postgres:
type: object
properties:
username:
type: string
database:
type: string
airflow:
type: object
properties:
postgres:
type: object
properties:
username:
type: string
database:
type: string
oslo_messaging:
type: object
properties:
username:
type: string
maas:
type: object
properties:
admin:
type: object
properties:
username:
type: string
email:
type: string
postgres:
type: object
properties:
username:
type: string
database:
type: string
barbican:
type: object
properties:
keystone:
type: object
properties:
region_name:
type: string
role:
type: string
project_name:
type: string
project_domain_name:
type: string
user_domain_name:
type: string
username:
type: string
oslo_db:
type: object
properties:
username:
type: string
database:
type: string
oslo_messaging:
type: object
properties:
username:
type: string
armada:
type: object
properties:
keystone:
type: object
properties:
project_domain_name:
type: string
project_name:
type: string
region_name:
type: string
role:
type: string
user_domain_name:
type: string
username:
type: string
deckhand:
type: object
properties:
keystone:
type: object
properties:
region_name:
type: string
role:
type: string
project_name:
type: string
project_domain_name:
type: string
user_domain_name:
type: string
username:
type: string
postgres:
type: object
properties:
username:
type: string
database:
type: string
ceph:
type: object
properties:
swift:
type: object
properties:
keystone:
type: object
properties:
role:
type: string
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
osh:
type: object
properties:
keystone:
type: object
properties:
admin:
type: object
properties:
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
oslo_messaging:
type: object
properties:
admin:
type: object
properties:
username:
type: string
keystone:
type: object
properties:
username:
type: string
oslo_db:
type: object
properties:
username:
type: string
database:
type: string
cinder:
type: object
properties:
cinder:
type: object
properties:
role:
type: string
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
oslo_messaging:
type: object
properties:
admin:
type: object
properties:
username:
type: string
cinder:
type: object
properties:
username:
type: string
oslo_db:
type: object
properties:
username:
type: string
database:
type: string
glance:
type: object
properties:
glance:
type: object
properties:
role:
type: string
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
oslo_messaging:
type: object
properties:
admin:
type: object
properties:
username:
type: string
glance:
type: object
properties:
username:
type: string
oslo_db:
type: object
properties:
username:
type: string
database:
type: string
ceph_object_store:
type: object
properties:
username:
type: string
heat:
type: object
properties:
heat:
type: object
properties:
role:
type: string
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
heat_trustee:
type: object
properties:
role:
type: string
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
heat_stack_user:
type: object
properties:
role:
type: string
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
oslo_db:
type: object
properties:
username:
type: string
database:
type: string
oslo_messaging:
type: object
properties:
admin:
type: object
properties:
username:
type: string
heat:
type: object
properties:
username:
type: string
swift:
type: object
properties:
swift:
type: object
properties:
role:
type: string
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
oslo_db:
type: object
properties:
admin:
type: object
properties:
username:
type: string
neutron:
type: object
properties:
neutron:
type: object
properties:
role:
type: string
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
oslo_messaging:
type: object
properties:
admin:
type: object
properties:
username:
type: string
neutron:
type: object
properties:
username:
type: string
oslo_db:
type: object
properties:
username:
type: string
database:
type: string
nova:
type: object
properties:
nova:
type: object
properties:
role:
type: string
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
placement:
type: object
properties:
role:
type: string
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
oslo_messaging:
type: object
properties:
admin:
type: object
properties:
username:
type: string
nova:
type: object
properties:
username:
type: string
oslo_db:
type: object
properties:
username:
type: string
database:
type: string
oslo_db_api:
type: object
properties:
username:
type: string
database:
type: string
oslo_db_cell0:
type: object
properties:
username:
type: string
database:
type: string
horizon:
type: object
properties:
oslo_db:
type: object
properties:
username:
type: string
database:
type: string
osh_infra:
type: object
properties:
grafana:
type: object
properties:
admin:
type: object
properties:
username:
type: string
oslo_db:
type: object
properties:
username:
type: string
database:
type: string
oslo_db_session:
type: object
properties:
username:
type: string
database:
type: string
elasticsearch:
type: object
properties:
admin:
type: object
properties:
username:
type: string
oslo_db:
type: object
properties:
admin:
type: object
properties:
username:
type: string
prometheus_openstack_exporter:
type: object
properties:
user:
type: object
properties:
username:
type: string
nagios:
type: object
properties:
admin:
type: object
properties:
username:
type: string
...

View File

@ -1,17 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: pegleg/AppArmorProfile/v1
labels:
application: pegleg
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
additionalProperties: false
properties:
savePath:
type: 'string'
content:
type: 'string'
required: ['savePath', 'content']

View File

@ -1,116 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: pegleg/CommonAddresses/v1
data:
$schema: 'http://json-schema.org/schema#'
type: object
properties:
calico:
type: object
properties:
ip_autodetection_method:
type: string
etcd:
type: object
properties:
service_ip:
type: string
dns:
type: object
properties:
cluster_domain:
type: string
service_ip:
type: string
upstream_servers:
type: array
items:
type: string
upstream_servers_joined:
type: string
genesis:
type: object
properties:
hostname:
type: string
ip:
type: string
bootstrap:
type: object
properties:
ip:
type: string
kubernetes:
type: object
properties:
api_service_ip:
type: string
etcd_service_ip:
type: string
pod_cidr:
type: string
service_cidr:
type: string
apiserver_port:
type: number
haproxy_port:
type: number
service_node_port_range:
type: string
etcd:
type: object
properties:
container_port:
type: number
haproxy_port:
type: number
masters:
type: array
items:
type: object
properties:
hostname:
type: string
node_ports:
type: object
properties:
drydock_api:
type: number
maas_api:
type: number
maas_proxy:
type: number
shipyard_api:
type: number
airflow_web:
type: number
ntp:
type: object
properties:
servers_joined:
type: string
storage:
type: object
properties:
ceph:
type: object
properties:
public_cidr:
type: string
cluster_cidr:
type: string
openvswitch:
type: object
properties:
external_iface:
type: string
neutron:
type: object
properties:
tunnel_device:
type: string
external_iface:
type: string
...

View File

@ -1,15 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: pegleg/CommonSoftwareConfig/v1
data:
$schema: 'http://json-schema.org/schema#'
type: object
properties:
osh:
type: object
properties:
region_name:
type: string
...

View File

@ -1,169 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: pegleg/EndpointCatalogue/v1
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
# Namespace the list of endpoints
additionalProperties:
type: 'object'
additionalProperties:
type: 'object'
properties:
namespace:
oneOf:
- type: string
- type: "null"
name:
type: string
statefulset:
type: object
properties:
replicas:
type: number
name:
type: string
auth:
type: object
hosts:
type: object
properties:
data:
type: string
default:
type: string
discovery:
type: string
public:
type: string
internal:
type: string
additionalProperties:
type: string
host_fqdn_override:
oneOf:
- type: object
properties:
default:
oneOf:
- type: string
- type: "null"
- type: object
properties:
host:
type: string
tls:
type: object
properties:
crt:
type: string
ca:
type: string
key:
type: string
additionalProperties:
type: string
public:
oneOf:
- type: string
- type: "null"
- type: object
properties:
host:
type: string
tls:
type: object
properties:
crt:
type: string
ca:
type: string
key:
type: string
additionalProperties:
type: string
admin:
oneOf:
- type: string
- type: "null"
- type: object
properties:
host:
type: string
tls:
type: object
properties:
crt:
type: string
ca:
type: string
key:
type: string
additionalProperties:
type: string
internal:
oneOf:
- type: string
- type: "null"
- type: object
properties:
host:
type: string
tls:
type: object
properties:
crt:
type: string
ca:
type: string
key:
type: string
additionalProperties:
type: string
additionalProperties:
type: string
- type: "null"
path:
oneOf:
- type: object
properties:
default:
oneOf:
- type: string
- type: "null"
public:
type: string
internal:
type: string
additionalProperties:
type: string
- type: string
scheme:
oneOf:
- type: object
properties:
default:
type: string
public:
type: string
internal:
type: string
additionalProperties:
type: string
- type: string
port:
type: object
additionalProperties:
type: object
properties:
default:
type: number
public:
type: number
internal:
type: number
additionalProperties:
type: number
...

View File

@ -1,8 +0,0 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: pegleg/Script/v1
data:
$schema: http://json-schema.org/schema#
type: string

View File

@ -1,19 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: pegleg/SeccompProfile/v1
labels:
application: pegleg
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
additionalProperties: false
properties:
seccompDirPath:
type: 'string'
savePath:
type: 'string'
content:
type: 'string'
required: ['seccompDirPath', 'savePath', 'content']

View File

@ -1,29 +0,0 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: pegleg/SiteDefinition/v1
data:
$schema: http://json-schema.org/schema#
type: object
properties:
repositories:
type: object
additionalProperties:
type: object
properties:
revision:
type: string
url:
type: string
required:
- revision
- url
site_type:
type: string
required:
- site_type
additionalProperties: false
...

File diff suppressed because it is too large Load Diff

View File

@ -1,16 +0,0 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: promenade/Docker/v1
labels:
application: promenade
data:
$schema: http://json-schema.org/schema#
type: object
properties:
config:
type: object
required:
- config
additionalProperties: false

View File

@ -1,50 +0,0 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: promenade/EncryptionPolicy/v1
labels:
application: promenade
data:
$schema: http://json-schema.org/schema#
definitions:
script_encryption:
oneof:
- { $ref: '#/definitions/encryption_method_gpg' }
etcd_encryption:
type: array
items:
type: object
additionalProperties: false
properties:
resources:
type: array
items:
type: string
providers:
type: array
items:
type: object
additionalProperties: true
encryption_method_gpg:
properties:
gpg:
type: object
additionalProperties: false
required:
- gpg
additionalProperties: false
properties:
etcd:
$ref: '#/definitions/etcd_encryption'
scripts:
properties:
genesis:
$ref: '#/definitions/script_encryption'
join:
$ref: '#/definitions/script_encryption'
additionalProperties: false
...

View File

@ -1,165 +0,0 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: promenade/Genesis/v1
labels:
application: promenade
data:
$schema: http://json-schema.org/schema#
definitions:
abs_path:
type: string
pattern: '^/.+$'
hostname:
type: string
pattern: '^[a-z][a-z0-9-]+$'
file:
properties:
path:
$ref: '#/definitions/abs_path'
content:
type: string
mode:
type: integer
minimum: 0
tar_url:
$ref: '#/definitions/url'
tar_path:
$ref: '#/definitions/rel_path'
required:
- mode
- path
oneOf:
- type: object
required:
- content
- type: object
allOf:
- type: object
required:
- tar_url
- tar_path
additionalProperties: false
image:
type: string
# XXX add regex
ip_address:
type: string
pattern: '^(\d|[1-9]\d|1\d\d|2([0-4]\d|5[0-5]))\.(\d|[1-9]\d|1\d\d|2([0-4]\d|5[0-5]))\.(\d|[1-9]\d|1\d\d|2([0-4]\d|5[0-5]))\.(\d|[1-9]\d|1\d\d|2([0-4]\d|5[0-5]))$'
kubernetes_label:
type: string
# XXX add regex
rel_path:
type: string
# XXX add regex
type: object
properties:
armada:
type: object
properties:
target_manifest:
type: string
additionalProperties: false
apiserver:
type: object
properties:
arguments:
type: array
items:
type: string
encryption:
type: array
items:
type: object
properties:
resources:
type: array
items:
type: string
providers:
type: array
items:
type: object
additionalProperties: true
additionalProperties: false
files:
type: array
items:
$ref: '#/definitions/file'
haproxy:
type: object
properties:
run_as_user:
type: integer
additionalProperties: false
hostname:
$ref: '#/definitions/hostname'
domain:
type: string
ip:
$ref: '#/definitions/ip_address'
labels:
properties:
static:
type: array
items:
$ref: '#/definitions/kubernetes_label'
dynamic:
type: array
items:
$ref: '#/definitions/kubernetes_label'
additionalProperties: false
images:
type: object
properties:
armada:
$ref: '#/definitions/image'
helm:
type: object
properties:
tiller:
$ref: '#/definitions/image'
required:
- tiller
additionalProperties: false
kubernetes:
type: object
properties:
apiserver:
$ref: '#/definitions/image'
controller-manager:
$ref: '#/definitions/image'
etcd:
$ref: '#/definitions/image'
scheduler:
$ref: '#/definitions/image'
required:
- apiserver
- controller-manager
- etcd
- scheduler
additionalProperties: false
required:
- armada
- helm
- kubernetes
additionalProperties: false
required:
- hostname
- ip
- images
- labels
additionalProperties: false
...

View File

@ -1,245 +0,0 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: promenade/HostSystem/v1
labels:
application: promenade
data:
$schema: http://json-schema.org/schema#
definitions:
abs_path:
type: string
pattern: '^/.+$'
systemd_unit:
type: object
properties:
enable:
type: boolean
disable:
type: boolean
start:
type: boolean
stop:
type: boolean
additionalProperties: false
apt_source_line:
type: string
# XXX add regex
file:
properties:
path:
$ref: '#/definitions/abs_path'
content:
type: string
mode:
type: integer
minimum: 0
tar_url:
$ref: '#/definitions/url'
tar_path:
$ref: '#/definitions/rel_path'
docker_image:
$ref: '#/definitions/url'
file_path:
$ref: '#/definitions/abs_path'
symlink:
$ref: '#/definitions/abs_path'
required:
- mode
- path
oneOf:
- type: object
required:
- content
- type: object
required:
- symlink
- type: object
allOf:
- type: object
required:
- tar_url
- tar_path
- type: object
allOf:
- type: object
required:
- docker_image
- file_path
additionalProperties: false
image:
type: string
# XXX add regex
package:
type: string
# XXX add regex
public_key:
type: string
# XXX add regex
rel_path:
type: string
# XXX add regex
url:
type: string
# XXX add regex
type: object
properties:
files:
type: array
items:
type: object
items:
$ref: '#/definitions/file'
systemd_units:
type: object
additionalProperties:
$ref: '#/definitions/systemd_unit'
images:
type: object
properties:
coredns:
$ref: '#/definitions/image'
haproxy:
$ref: '#/definitions/image'
helm:
type: object
properties:
helm:
$ref: '#/definitions/image'
required:
- helm
additionalProperties: false
kubernetes:
type: object
properties:
hyperkube:
$ref: '#/definitions/image'
monitoring_image:
$ref: '#/definitions/image'
required:
- haproxy
- helm
- kubernetes
- monitoring_image
additionalProperties: false
packages:
type: object
common:
type: object
properties:
additional:
type: array
items:
$ref: '#/definitions/package'
keys:
type: array
items:
$ref: '#/definitions/public_key'
required:
type: object
properties:
docker:
$ref: '#/definitions/package'
socat:
$ref: '#/definitions/package'
required:
- docker
- socat
additionalProperties: false
repositories:
type: array
items:
$ref: '#/definitions/apt_source_line'
required:
- required
additionalProperties: false
genesis:
type: object
properties:
additional:
type: array
items:
$ref: '#/definitions/package'
keys:
type: array
items:
$ref: '#/definitions/public_key'
required:
type: object
properties:
docker:
$ref: '#/definitions/package'
socat:
$ref: '#/definitions/package'
required:
- docker
- socat
additionalProperties: false
repositories:
type: array
items:
$ref: '#/definitions/apt_source_line'
required:
- required
additionalProperties: false
join:
type: object
properties:
additional:
type: array
items:
$ref: '#/definitions/package'
keys:
type: array
items:
$ref: '#/definitions/public_key'
required:
type: object
properties:
docker:
$ref: '#/definitions/package'
socat:
$ref: '#/definitions/package'
required:
- docker
- socat
additionalProperties: false
repositories:
type: array
items:
$ref: '#/definitions/apt_source_line'
required:
- required
additionalProperties: false
validation:
type: object
properties:
pod_logs:
type: object
properties:
image:
type: string
additionalProperties: false
additionalProperties: false
required:
- images
- packages
additionalProperties: false

View File

@ -1,31 +0,0 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: promenade/Kubelet/v1
labels:
application: promenade
data:
$schema: http://json-schema.org/schema#
type: object
definitions:
image:
type: string
# XXX add regex
properties:
images:
type: object
properties:
pause:
$ref: '#/definitions/image'
required:
- pause
additionalProperties: false
arguments:
type: array
items:
type: string
required:
- images
additionalProperties: false

View File

@ -1,121 +0,0 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: promenade/KubernetesNetwork/v1
labels:
application: promenade
data:
$schema: http://json-schema.org/schema#
definitions:
cidr:
type: string
pattern: '^(\d|[1-9]\d|1\d\d|2([0-4]\d|5[0-5]))\.(\d|[1-9]\d|1\d\d|2([0-4]\d|5[0-5]))\.(\d|[1-9]\d|1\d\d|2([0-4]\d|5[0-5]))\.(\d|[1-9]\d|1\d\d|2([0-4]\d|5[0-5]))\/([0-9]|[1-2][0-9]|3[0-2])$'
domain_name:
type: string
format: hostname
domain_suffix:
type: string
pattern: '^\.[a-z0-9][a-z0-9-\.]*$'
hostname:
type: string
format: hostname
hostname_or_ip_address:
anyOf:
- $ref: '#/definitions/hostname'
- $ref: '#/definitions/ip_address'
- $ref: '#/definitions/domain_suffix'
ip_address:
type: string
format: ipv4
url:
type: string
format: uri
type: object
properties:
dns:
type: object
properties:
bootstrap_validation_checks:
type: array
items:
$ref: '#/definitions/domain_name'
cluster_domain:
$ref: '#/definitions/domain_name'
service_ip:
$ref: '#/definitions/ip_address'
upstream_servers:
type: array
items:
$ref: '#/definitions/ip_address'
required:
- cluster_domain
- service_ip
additionalProperties: false
etcd:
type: object
properties:
container_port:
type: integer
haproxy_port:
type: integer
# NOTE(mark-burnett): No longer used.
service_ip:
$ref: '#/definitions/ip_address'
required:
- container_port
- haproxy_port
additionalProperties: false
kubernetes:
type: object
properties:
pod_cidr:
$ref: '#/definitions/cidr'
service_ip:
$ref: '#/definitions/ip_address'
service_cidr:
$ref: '#/definitions/cidr'
apiserver_port:
type: integer
haproxy_port:
type: integer
required:
- pod_cidr
- service_cidr
- service_ip
- apiserver_port
- haproxy_port
additionalProperties: false
hosts_entries:
type: array
items:
type: object
properties:
ip:
$ref: '#/definitions/ip_address'
names:
type: array
items:
$ref: '#/definitions/hostname'
proxy:
type: object
properties:
additional_no_proxy:
type: array
items:
$ref: '#/definitions/hostname_or_ip_address'
url:
$ref: '#/definitions/url'
required:
- url
additionalFields: false
required:
- dns
- kubernetes
additionalProperties: false
...

View File

@ -1,47 +0,0 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: promenade/KubernetesNode/v1
labels:
application: promenade
data:
$schema: http://json-schema.org/schema#
definitions:
hostname:
type: string
pattern: '^[a-z][a-z0-9-]+$'
ip_address:
type: string
pattern: '^(\d|[1-9]\d|1\d\d|2([0-4]\d|5[0-5]))\.(\d|[1-9]\d|1\d\d|2([0-4]\d|5[0-5]))\.(\d|[1-9]\d|1\d\d|2([0-4]\d|5[0-5]))\.(\d|[1-9]\d|1\d\d|2([0-4]\d|5[0-5]))$'
kubernetes_label:
type: string
# XXX add regex
type: object
properties:
hostname:
$ref: '#/definitions/hostname'
ip:
$ref: '#/definitions/ip_address'
join_ip:
$ref: '#/definitions/ip_address'
labels:
properties:
static:
type: array
items:
$ref: '#/definitions/kubernetes_label'
dynamic:
type: array
items:
$ref: '#/definitions/kubernetes_label'
additionalProperties: false
required:
- ip
- join_ip
additionalProperties: false

View File

@ -1,43 +0,0 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: promenade/PKICatalog/v1
labels:
application: promenade
data:
$schema: http://json-schema.org/schema#
certificate_authorities:
type: array
items:
type: object
properties:
description:
type: string
certificates:
type: array
items:
type: object
properties:
document_name:
type: string
description:
type: string
common_name:
type: string
hosts:
type: array
items: string
groups:
type: array
items: string
keypairs:
type: array
items:
type: object
properties:
name:
type: string
description:
type: string
...

View File

@ -1,80 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: shipyard/DeploymentConfiguration/v1
labels:
application: shipyard
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
properties:
physical_provisioner:
type: 'object'
properties:
deployment_strategy:
type: 'string'
deploy_interval:
type: 'integer'
deploy_timeout:
type: 'integer'
destroy_interval:
type: 'integer'
destroy_timeout:
type: 'integer'
join_wait:
type: 'integer'
prepare_node_interval:
type: 'integer'
prepare_node_timeout:
type: 'integer'
prepare_site_interval:
type: 'integer'
prepare_site_timeout:
type: 'integer'
verify_interval:
type: 'integer'
verify_timeout:
type: 'integer'
additionalProperties: false
kubernetes:
type: 'object'
properties:
node_status_interval:
type: 'integer'
node_status_timeout:
type: 'integer'
additionalProperties: false
kubernetes_provisioner:
type: 'object'
properties:
drain_timeout:
type: 'integer'
drain_grace_period:
type: 'integer'
clear_labels_timeout:
type: 'integer'
remove_etcd_timeout:
type: 'integer'
etcd_ready_timeout:
type: 'integer'
additionalProperties: false
armada:
type: 'object'
properties:
get_releases_timeout:
type: 'integer'
get_status_timeout:
type: 'integer'
manifest:
type: 'string'
post_apply_timeout:
type: 'integer'
validate_design_timeout:
type: 'integer'
additionalProperties: false
required:
- manifest
additionalProperties: false
required:
- armada

View File

@ -1,73 +0,0 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: shipyard/DeploymentStrategy/v1
labels:
application: shipyard
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
required:
- groups
properties:
groups:
type: 'array'
minItems: 0
items:
type: 'object'
required:
- name
- critical
- depends_on
- selectors
properties:
name:
type: 'string'
minLength: 1
critical:
type: 'boolean'
depends_on:
type: 'array'
minItems: 0
items:
type: 'string'
selectors:
type: 'array'
minItems: 0
items:
type: 'object'
minProperties: 1
properties:
node_names:
type: 'array'
items:
type: 'string'
node_labels:
type: 'array'
items:
type: 'string'
node_tags:
type: 'array'
items:
type: 'string'
rack_names:
type: 'array'
items:
type: 'string'
additionalProperties: false
success_criteria:
type: 'object'
minProperties: 1
properties:
percent_successful_nodes:
type: 'integer'
minimum: 0
maximum: 100
minimum_successful_nodes:
type: 'integer'
minimum: 0
maximum_failed_nodes:
type: 'integer'
minimum: 0
additionalProperties: false

View File

@ -1,128 +0,0 @@
---
schema: pegleg/Script/v1
metadata:
schema: metadata/Document/v1
name: configure-ip-rules
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: global
data: |-
#!/bin/bash
set -ex
function usage() {
cat <<EOU
Options are:
-c POD_CIDR The pod CIDR for the Kubernetes cluster, e.g. 10.97.0.0/16
-i INTERFACE (optional) The interface for internal pod traffic, e.g.
bond0.22. Used to auto-detect the service gateway.
Exclusive with -g.
-g SERVICE_GW (optional) The service gateway/VRR IP for routing pod
traffic. Exclusive with -i.
-o OVERLAP_CIDR (optional) This CIDR will be routed via the VRRP IP on
INTERFACE. It is used to provide a work around when
complete Calico routes cannot be received via BGP.
e.g. 10.96.0.0/15. NOTE: This must include the POD_CIDR.
-s SERVICE_CIDR (optional) A routable CIDR to configure for ingress, maas,
e.g. 10.23.22.192/29
EOU
}
SERVICE_CIDR=
OVERLAP_CIDR=
while getopts ":c:g:hi:o:s:" o; do
case "${o}" in
c)
POD_CIDR=${OPTARG}
;;
g)
SERVICE_GW=${OPTARG}
;;
h)
usage
exit 0
;;
i)
INTERFACE=${OPTARG}
;;
o)
OVERLAP_CIDR=${OPTARG}
;;
s)
SERVICE_CIDR=${OPTARG}
;;
\?)
echo "Unknown option: -${OPTARG}" >&2
exit 1
;;
:)
echo "Missing argument for option: -${OPTARG}" >&2
exit 1
;;
*)
echo "Unimplemented option: -${OPTARG}" >&2
exit 1
;;
esac
done
shift $((OPTIND-1))
if [ "x$POD_CIDR" == "x" ]; then
echo "Missing pod CIDR, e.g -c 10.97.0.0/16" >&2
usage
exit 1
fi
if [ "x$INTERFACE" != "x" ]; then
while ! ip route list dev "${INTERFACE}" > /dev/null; do
echo Waiting for device "${INTERFACE}" to be ready. >&2
sleep 5
done
fi
intra_vrrp_ip=
if [ "x${SERVICE_GW}" == "x" ]; then
intra_vrrp_ip=$(ip route list dev "${INTERFACE}" | awk '($2~/via/){print $3}' | head -n 1)
else
intra_vrrp_ip=${SERVICE_GW}
fi
TABLE="1500"
if [ "x${intra_vrrp_ip}" == "x" ]; then
echo "Either INTERFACE or SERVICE_GW is required: e.g. either -i bond0.22 or -g 10.23.22.1"
usage
exit 1
fi
# Setup a routing table for traffic from service IPs
ip route flush table "${TABLE}"
ip route add default via "${intra_vrrp_ip}" table "${TABLE}"
# Setup arp_announce adjustment on interface facing gateway
arp_intf=$(ip route get ${intra_vrrp_ip} | grep dev | awk '{print $3}')
echo 2 > /proc/sys/net/ipv4/conf/${arp_intf}/arp_announce
if [ "x$OVERLAP_CIDR" != "x" ]; then
# NOTE: This is a work-around for nodes not receiving complete
# routes via BGP.
ip route add "${OVERLAP_CIDR}" via "${intra_vrrp_ip}"
fi
if [ "x$SERVICE_CIDR" != "x" ]; then
# Traffic from the service IPs to pods should use the pod network.
ip rule add \
from "${SERVICE_CIDR}" \
to "${POD_CIDR}" \
lookup main \
pref 10000
# Other traffic from service IPs should only use the VRRP IP
ip rule add \
from "${SERVICE_CIDR}" \
lookup "${TABLE}" \
pref 10100
fi

View File

@ -1,26 +0,0 @@
---
schema: pegleg/Script/v1
metadata:
schema: metadata/Document/v1
name: hanging-cgroup-release
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: global
data: |-
#!/bin/bash
set -ex
cgroup_count() {
echo "Current cgroup count: $(find /sys/fs/cgroup/*/system.slice -name tasks | wc -l)"
}
DATE=$(date)
echo "$(cgroup_count)"
echo # Stop systemd mount unit that isn't actually mounted
echo "Stopping Kubernetes systemd mount units that are not mounted to the system."
systemctl list-units --state=running| \
sed -rn '/Kubernetes.transient.mount/s,(run-\S+).+(/var/lib/kubelet/pods/.+),\1 \2,p' | \
xargs -r -l1 sh -c 'test -d $2 || echo $1' -- | \
xargs -r -tl1 systemctl stop |& wc -l
echo "$(cgroup_count)"

View File

@ -1,32 +0,0 @@
---
schema: pegleg/Script/v1
metadata:
schema: metadata/Document/v1
name: rbd-roomba-scanner
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: global
data: |-
#!/bin/bash
set -ex
# don't put it in /tmp where it can be p0wned (???)
lsblk | awk '/^rbd/ {if($7==""){print $0}}' | awk '{ printf "/dev/%s\n",$1 }' > /var/run/rbd_list
# wait a while, so we don't catch rbd devices the kubelet is working on mounting
sleep 60
# finally, examine rbd devices again and if any were seen previously (60s ago) we will
# forcefully unmount them if they have no fs mounts
DATE=$(date)
for rbd in `lsblk | awk '/^rbd/ {if($7==""){print $0}}' | awk '{ printf "/dev/%s\n",$1 }'`; do
if grep -q $rbd /var/run/rbd_list; then
echo "[${DATE}] Unmapping stale RBD $rbd"
/usr/bin/rbd unmap -o force $rbd
# NOTE(supamatt): rbd unmap -o force will only succeed if there are NO pending I/O
else
echo "[${DATE}] Skipping RBD $rbd as it hasn't been stale for at least 60 seconds"
fi
done
rm -rf /var/run/rbd_list

View File

@ -1,14 +0,0 @@
---
schema: deckhand/Passphrase/v1
metadata:
schema: metadata/Document/v1
name: private_docker_key
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
# sample key for potential private docker registry
# see Docker documentation for info on how to generate the key
# base64 of password123
data: cGFzc3dvcmQxMjM=
...

View File

@ -1,11 +0,0 @@
---
schema: deckhand/PublicKey/v1
metadata:
schema: metadata/Document/v1
name: airship_ssh_public_key
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyb6CDrai3VcFW1ew5ikf7IDSpqfFyrJNLI1DPyd28vcy6D1oFXdELYK7DsXzVCgV7YNDiKpneXMBTJ/Mr/aZi9K3eVvtRp1HAK3y6ycx9KRfyfMVAU0aT3xMOpE5xS/xTH8HNRbOSszp0woVYKhncpkumHweji7wbLKm/WxsggIoGDjn29KIoRhpo96tWz/DBsoU1pIHTMoZNyHW2aYWEx6kOzTEmhxL0LkKv7+A/2HJuLnqcXoQH9jl3kRQDyikNlSw2T3gQV3I8m0od/lEf98MZb1Yv9GrlDCmnUPXAJ2HQaWaVaPPpGcBW7veOZlLfeulwD4zlo6P6JW1SZaat airship@seaworthy
...

View File

@ -1,173 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-calico
layeringDefinition:
abstract: false
layer: global
labels:
name: kubernetes-calico-global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.calico.calico
dest:
path: .source
# Image versions
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.calico.calico
dest:
path: .values.images.tags
# IP addresses
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.etcd.service_ip
dest:
path: .values.endpoints.etcd.host_fqdn_override.default
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.pod_cidr
dest:
path: .values.networking.podSubnet
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.api_service_ip
dest:
path: .values.conf.controllers.K8S_API
pattern: SUB_KUBERNETES_IP
# Other site-specific configuration
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.ip_autodetection_method
dest:
path: .values.conf.node.IP_AUTODETECTION_METHOD
# Certificates
- src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd
path: .
dest:
path: .values.endpoints.etcd.auth.client.tls.ca
- src:
schema: deckhand/Certificate/v1
name: calico-node
path: .
dest:
path: .values.endpoints.etcd.auth.client.tls.crt
- src:
schema: deckhand/CertificateKey/v1
name: calico-node
path: .
dest:
path: .values.endpoints.etcd.auth.client.tls.key
data:
chart_name: calico
release: kubernetes-calico
namespace: kube-system
protected:
continue_processing: true
wait:
timeout: 600
labels:
release_group: airship-kubernetes-calico
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-calico
values:
conf:
cni_network_config:
name: k8s-pod-network
cniVersion: 0.3.0
plugins:
- type: calico
etcd_endpoints: __ETCD_ENDPOINTS__
etcd_ca_cert_file: /etc/calico/pki/ca
etcd_cert_file: /etc/calico/pki/crt
etcd_key_file: /etc/calico/pki/key
log_level: info
ipam:
type: calico-ipam
policy:
type: k8s
kubernetes:
kubeconfig: __KUBECONFIG_FILEPATH__
- type: portmap
snat: true
capabilities:
portMappings: true
controllers:
K8S_API: "https://SUB_KUBERNETES_IP:443"
node:
CALICO_STARTUP_LOGLEVEL: INFO
CLUSTER_TYPE: "k8s,bgp"
ETCD_CA_CERT_FILE: /etc/calico/pki/ca
ETCD_CERT_FILE: /etc/calico/pki/crt
ETCD_KEY_FILE: /etc/calico/pki/key
WAIT_FOR_STORAGE: "true"
endpoints:
etcd:
hosts:
default: calico-etcd
scheme:
default: https
networking:
mtu: 1500
settings:
mesh: "on"
ippool:
ipip:
enabled: "true"
mode: "Always"
nat_outgoing: "true"
disabled: "false"
manifests:
daemonset_calico_etcd: false
job_image_repo_sync: false
service_calico_etcd: false
dependencies:
- calico-htk
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: calico-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.calico.calico-htk
dest:
path: .source
data:
chart_name: calico-htk
release: calico-htk
namespace: calico-htk
values: {}
dependencies: []
...

View File

@ -1,15 +0,0 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-container-networking
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: Container networking via Calico
sequenced: true
chart_group:
- kubernetes-calico-etcd
- kubernetes-calico

View File

@ -1,136 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-calico-etcd-global
layeringDefinition:
abstract: true
layer: global
labels:
name: kubernetes-calico-etcd-global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.calico.etcd
dest:
path: .source
# Image versions
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.calico.etcd
dest:
path: .values.images.tags
# IP addresses
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.etcd.service_ip
dest:
path: .values.service.ip
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.etcd.service_ip
dest:
path: .values.anchor.etcdctl_endpoint
# CAs
- src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd
path: .
dest:
path: .values.secrets.tls.client.ca
- src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd-peer
path: .
dest:
path: .values.secrets.tls.peer.ca
# Anchor client cert
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-anchor
path: .
dest:
path: .values.secrets.anchor.tls.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-anchor
path: .
dest:
path: .values.secrets.anchor.tls.key
data:
chart_name: etcd
release: kubernetes-calico-etcd
namespace: kube-system
protected:
continue_processing: true
wait:
timeout: 600
labels:
release_group: airship-kubernetes-calico-etcd
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-calico-etcd
values:
labels:
anchor:
node_selector_key: calico-etcd
node_selector_value: enabled
etcd:
host_data_path: /var/lib/etcd/calico
host_etc_path: /etc/etcd/calico
bootstrapping:
enabled: true
host_directory: /var/lib/anchor
filename: calico-etcd-bootstrap
service:
name: calico-etcd
network:
service_client:
name: service_client
port: 6666
target_port: 6666
service_peer:
name: service_peer
port: 6667
target_port: 6667
dependencies:
- kubernetes-calico-etcd-htk
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-calico-etcd-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.calico.etcd-htk
dest:
path: .source
data:
chart_name: kubernetes-calico-etcd-htk
release: kubernetes-calico-etcd-htk
namespace: kubernetes-calico-etcd-htk
values: {}
dependencies: []
...

View File

@ -1,198 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-apiserver
labels:
name: kubernetes-apiserver-global
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.apiserver
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.apiserver
dest:
path: .values.images.tags
# IP addresses
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.api_service_ip
dest:
path: .values.network.kubernetes_service_ip
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.pod_cidr
dest:
path: .values.network.pod_cidr
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.service_cidr
dest:
path: .values.apiserver.arguments[1]
pattern: SERVICE_CIDR
# Kubernetes Port Range
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.service_node_port_range
dest:
path: .values.apiserver.arguments[2]
pattern: SERVICE_NODE_PORT_RANGE
# CA
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: .
dest:
path: .values.secrets.tls.ca
# Certificates
- src:
schema: deckhand/Certificate/v1
name: apiserver
path: .
dest:
path: .values.secrets.tls.cert
- src:
schema: deckhand/CertificateKey/v1
name: apiserver
path: .
dest:
path: .values.secrets.tls.key
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd
path: .
dest:
path: .values.secrets.etcd.tls.ca
- src:
schema: deckhand/Certificate/v1
name: apiserver-etcd
path: .
dest:
path: .values.secrets.etcd.tls.cert
- src:
schema: deckhand/CertificateKey/v1
name: apiserver-etcd
path: .
dest:
path: .values.secrets.etcd.tls.key
- src:
schema: deckhand/PublicKey/v1
name: service-account
path: .
dest:
path: .values.secrets.service_account.public_key
# Encryption policy
- src:
schema: promenade/EncryptionPolicy/v1
name: encryption-policy
path: .etcd
dest:
path: .values.conf.encryption_provider.content.resources
data:
chart_name: apiserver
release: kubernetes-apiserver
namespace: kube-system
protected:
continue_processing: true
wait:
timeout: 600
labels:
release_group: airship-kubernetes-apiserver
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-apiserver
values:
apiserver:
etcd:
endpoints: https://127.0.0.1:2378
tls:
tls-cipher-suites: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA"
# https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
# Possible values: VersionTLS10, VersionTLS11, VersionTLS12
tls-min-version: 'VersionTLS12'
arguments:
- --authorization-mode=Node,RBAC
- --service-cluster-ip-range=SERVICE_CIDR
- --service-node-port-range=SERVICE_NODE_PORT_RANGE
- --endpoint-reconciler-type=lease
- --feature-gates=PodShareProcessNamespace=true
- --v=3
conf:
encryption_provider:
file: encryption_provider.yaml
command_options:
- '--experimental-encryption-provider-config=/etc/kubernetes/apiserver/encryption_provider.yaml'
content:
kind: EncryptionConfig
apiVersion: v1
eventconfig:
file: eventconfig.yaml
content:
kind: Configuration
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
limits:
- type: Server
qps: 100
burst: 1000
acconfig:
file: acconfig.yaml
command_options:
- '--enable-admission-plugins=PodSecurityPolicy,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,NodeRestriction,EventRateLimit'
- '--admission-control-config-file=/etc/kubernetes/apiserver/acconfig.yaml'
content:
kind: AdmissionConfiguration
apiVersion: apiserver.k8s.io/v1alpha1
plugins:
- name: EventRateLimit
path: eventconfig.yaml
dependencies:
- kubernetes-apiserver-htk
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-apiserver-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.apiserver-htk
dest:
path: .source
data:
chart_name: kubernetes-apiserver-htk
release: kubernetes-apiserver-htk
namespace: kubernetes-apiserver-htk
values: {}
dependencies: []
...

View File

@ -1,15 +0,0 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-core
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: Kubernetes components
chart_group:
- kubernetes-apiserver
- kubernetes-controller-manager
- kubernetes-scheduler

View File

@ -1,138 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-controller-manager
labels:
name: kubernetes-controller-manager-global
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.controller-manager
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.controller-manager
dest:
path: .values.images.tags
# IP addresses
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.pod_cidr
dest:
path: .values.network.pod_cidr
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.service_cidr
dest:
path: .values.network.service_cidr
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.pod_cidr
dest:
path: .values.command_prefix[1]
pattern: SUB_POD_CIDR
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.service_cidr
dest:
path: .values.command_prefix[2]
pattern: SUB_SERVICE_CIDR
# CA
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: .
dest:
path: .values.secrets.tls.ca
# Certificates
- src:
schema: deckhand/Certificate/v1
name: controller-manager
path: .
dest:
path: .values.secrets.tls.cert
- src:
schema: deckhand/CertificateKey/v1
name: controller-manager
path: .
dest:
path: .values.secrets.tls.key
# Private key for Kubernetes service account token signing
- src:
schema: deckhand/PrivateKey/v1
name: service-account
path: .
dest:
path: .values.secrets.service_account.private_key
data:
chart_name: controller-manager
release: kubernetes-controller-manager
namespace: kube-system
protected:
continue_processing: true
wait:
timeout: 600
labels:
release_group: airship-kubernetes-controller-manager
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-controller-manager
values:
command_prefix:
- /controller-manager
- --cluster-cidr=SUB_POD_CIDR
- --service-cluster-ip-range=SUB_SERVICE_CIDR
- --node-monitor-period=5s
- --node-monitor-grace-period=20s
- --pod-eviction-timeout=60s
network:
kubernetes_netloc: 127.0.0.1:6553
dependencies:
- kubernetes-controller-manager-htk
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-controller-manager-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.controller-manager-htk
dest:
path: .source
data:
chart_name: kubernetes-controller-manager-htk
release: kubernetes-controller-manager-htk
namespace: kubernetes-controller-manager-htk
values: {}
dependencies: []
...

View File

@ -1,95 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-scheduler
labels:
name: kubernetes-scheduler-global
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.scheduler
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.scheduler
dest:
path: .values.images.tags
# CA
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: .
dest:
path: .values.secrets.tls.ca
# Certificates
- src:
schema: deckhand/Certificate/v1
name: scheduler
path: .
dest:
path: .values.secrets.tls.cert
- src:
schema: deckhand/CertificateKey/v1
name: scheduler
path: .
dest:
path: .values.secrets.tls.key
data:
chart_name: scheduler
release: kubernetes-scheduler
namespace: kube-system
protected:
continue_processing: true
wait:
timeout: 600
labels:
release_group: airship-kubernetes-scheduler
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-scheduler
values:
network:
kubernetes_netloc: 127.0.0.1:6553
dependencies:
- kubernetes-scheduler-htk
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-scheduler-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.scheduler-htk
dest:
path: .source
data:
chart_name: kubernetes-scheduler-htk
release: kubernetes-scheduler-htk
namespace: kubernetes-scheduler-htk
values: {}
dependencies: []
...

View File

@ -1,13 +0,0 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-dns
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: Cluster DNS
chart_group:
- coredns

View File

@ -1,149 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: coredns
layeringDefinition:
abstract: false
layer: global
labels:
name: coredns-global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.coredns
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.coredns
dest:
path: .values.images.tags
# IP Addresses
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .dns.service_ip
dest:
path: .values.service.ip
# Zones
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .dns.cluster_domain
dest:
path: .values.conf.coredns.corefile
pattern: '(CLUSTER_DOMAIN)'
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.service_cidr
dest:
path: .values.conf.coredns.corefile
pattern: '(SERVICE_CIDR)'
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.pod_cidr
dest:
path: .values.conf.coredns.corefile
pattern: '(POD_CIDR)'
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .dns.upstream_servers[0]
dest:
path: .values.conf.coredns.corefile
pattern: '(UPSTREAM1)'
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .dns.upstream_servers[1]
dest:
path: .values.conf.coredns.corefile
pattern: '(UPSTREAM2)'
data:
chart_name: coredns
release: coredns
namespace: kube-system
wait:
timeout: 600
labels:
release_group: airship-coredns
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-coredns
values:
conf:
coredns:
corefile: |
.:53 {
errors
health
autopath @kubernetes
kubernetes CLUSTER_DOMAIN SERVICE_CIDR POD_CIDR {
pods insecure
fallthrough in-addr.arpa ip6.arpa
upstream UPSTREAM1
upstream UPSTREAM2
}
prometheus :9153
forward . UPSTREAM1 UPSTREAM2
cache 30
}
labels:
coredns:
node_selector_key: kube-dns
node_selector_value: enabled
dependencies:
- coredns-htk
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: coredns-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.coredns-htk
dest:
path: .source
data:
chart_name: coredns-htk
release: coredns-htk
namespace: coredns-htk
values:
pod:
# TODO: replicas can be removed once we switch coredns to
# DaemonSet-only. It will be deployed with both DaemonSet
# and Deployment-managed pods as we transition to DaemonSet.
replicas:
coredns: 2
manifests:
daemonset: true
# TODO: `deployment` can be set to false once we switch coredns to
# DaemonSet-only. It will be deployed with both DaemonSet
# and Deployment-managed pods as we transition to DaemonSet.
deployment: true
dependencies: []
...

View File

@ -1,13 +0,0 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-etcd
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: Kubernetes etcd
chart_group:
- kubernetes-etcd

View File

@ -1,137 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-etcd-global
layeringDefinition:
abstract: true
layer: global
labels:
name: kubernetes-etcd-global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.etcd
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.etcd
dest:
path: .values.images.tags
# IP addresses
-
src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.etcd_service_ip
dest:
path: .values.service.ip
-
src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.etcd_service_ip
dest:
path: .values.anchor.etcdctl_endpoint
# CAs
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd
path: .
dest:
path: .values.secrets.tls.client.ca
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd-peer
path: .
dest:
path: .values.secrets.tls.peer.ca
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-anchor
path: .
dest:
path: .values.secrets.anchor.tls.cert
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-anchor
path: .
dest:
path: .values.secrets.anchor.tls.key
data:
chart_name: etcd
release: kubernetes-etcd
namespace: kube-system
protected:
continue_processing: true
wait:
timeout: 600
labels:
release_group: airship-kubernetes-etcd
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-etcd
values:
labels:
anchor:
node_selector_key: kubernetes-etcd
node_selector_value: enabled
etcd:
host_data_path: /var/lib/etcd/kubernetes
host_etc_path: /etc/etcd/kubernetes
service:
name: kubernetes-etcd
network:
service_client:
name: service_client
port: 2379
target_port: 2379
service_peer:
name: service_peer
port: 2380
target_port: 2380
dependencies:
- kubernetes-etcd-htk
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-etcd-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.etcd-htk
dest:
path: .source
data:
chart_name: kubernetes-etcd-htk
release: kubernetes-etcd-htk
namespace: kubernetes-etcd-htk
values: {}
dependencies: []
...

View File

@ -1,13 +0,0 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-haproxy
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: HAProxy for Kubernetes
chart_group:
- haproxy

View File

@ -1,111 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: haproxy
labels:
name: haproxy-global
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.haproxy
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.haproxy
dest:
path: .values.images.tags
# Kubernetes configuration
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.api_service_ip
dest:
path: .values.conf.anchor.kubernetes_url
pattern: KUBERNETES_IP
data:
chart_name: haproxy
release: haproxy
namespace: kube-system
protected:
continue_processing: true
wait:
timeout: 600
labels:
release_group: airship-haproxy
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-haproxy
values:
conf:
anchor:
kubernetes_url: https://KUBERNETES_IP:443
services:
default:
kubernetes:
server_opts: "check port 6443"
conf_parts:
frontend:
- mode tcp
- option tcpka
- bind *:6553
backend:
- mode tcp
- option tcpka
- option tcp-check
- option redispatch
kube-system:
kubernetes-etcd:
server_opts: "check port 2379"
conf_parts:
frontend:
- mode tcp
- option tcpka
- bind *:2378
backend:
- mode tcp
- option tcpka
- option tcp-check
- option redispatch
dependencies:
- haproxy-htk
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: haproxy-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.haproxy-htk
dest:
path: .source
data:
chart_name: haproxy-htk
release: haproxy-htk
namespace: haproxy-htk
values: {}
dependencies: []
...

View File

@ -1,13 +0,0 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ingress-kube-system
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: Ingress for the site
chart_group:
- ingress-kube-system

View File

@ -1,88 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: global-ingress-kube-system
labels:
ingress: kube-system
name: ingress-kube-system-global
layeringDefinition:
abstract: true
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.ingress
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.ingress
dest:
path: .values.images.tags
data:
chart_name: ingress-kube-system
release: ingress-kube-system
namespace: kube-system
wait:
timeout: 300
labels:
release_group: airship-ingress-kube-system
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-ingress-kube-system
values:
labels:
server:
node_selector_key: kube-ingress
node_selector_value: enabled
error_server:
node_selector_key: kube-ingress
node_selector_value: enabled
deployment:
mode: cluster
type: Deployment
network:
host_namespace: true
ingress:
annotations:
nginx.ingress.kubernetes.io/proxy-read-timeout: "603"
pod:
replicas:
ingress: 1
error_page: 1
dependencies:
- ingress-kube-system-htk
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ingress-kube-system-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.ingress-htk
dest:
path: .source
data:
chart_name: ingress-kube-system-htk
release: ingress-kube-system-htk
namespace: ingress-kube-system-htk
values: {}
dependencies: []

View File

@ -1,14 +0,0 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-proxy
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: Kubernetes proxy
sequenced: true
chart_group:
- kubernetes-proxy

View File

@ -1,94 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-proxy
labels:
name: kubernetes-proxy-global
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.proxy
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.proxy
dest:
path: .values.images.tags
# IP Addresses
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.pod_cidr
dest:
path: .values.command_prefix[1]
pattern: POD_CIDR
# Secrets
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: .
dest:
path: .values.secrets.tls.ca
data:
chart_name: proxy
release: kubernetes-proxy
namespace: kube-system
wait:
timeout: 600
labels:
release_group: airship-kubernetes-proxy
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-proxy
values:
command_prefix:
- /proxy
- --cluster-cidr=POD_CIDR
- --proxy-mode=iptables
kube_service:
host: 127.0.0.1
port: 6553
livenessProbe:
whitelist:
- tiller-deploy
dependencies:
- kubernetes-proxy-htk
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-proxy-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.proxy-htk
dest:
path: .source
data:
chart_name: kubernetes-proxy-htk
release: kubernetes-proxy-htk
namespace: kubernetes-proxy-htk
dependencies: []
...

View File

@ -1,28 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: osh-infra-helm-toolkit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.osh_infra.helm_toolkit
dest:
path: .source
data:
chart_name: helm-toolkit
release: osh-infra-helm-toolkit
namespace: osh-infra-helm-toolkit
wait:
timeout: 600
labels:
release_group: airship-osh-infra-helm-toolkit
upgrade:
no_hooks: true
values: {}
dependencies: []

View File

@ -1,92 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: osh-infra-ceph-config
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.ucp.ceph-provisioners
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.ceph.ceph-provisioners
dest:
path: .values.images.tags
# IP addresses
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.public_cidr
dest:
path: .values.network.public
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.cluster_cidr
dest:
path: .values.network.cluster
# Endpoints
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.ceph_mon
dest:
path: .values.endpoints.ceph_mon
data:
chart_name: osh-infra-ceph-config
release: osh-infra-ceph-config
namespace: osh-infra
wait:
timeout: 900
labels:
release_group: airship-osh-infra-ceph-config
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-osh-infra-ceph-config
values:
labels:
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
provisioner:
node_selector_key: openstack-control-plane
node_selector_value: enabled
deployment:
ceph: false
client_secrets: true
rbd_provisioner: false
cephfs_provisioner: false
rgw_keystone_user_and_endpoints: false
bootstrap:
enabled: false
storageclass:
rbd:
ceph_configmap_name: ceph-etc
parameters:
userSecretName: pvc-ceph-client-key
cephfs:
provision_storage_class: false
dependencies:
- ceph-htk
...

View File

@ -1,13 +0,0 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: osh-infra-ceph-config
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: Ceph config for OpenStack-Infra namespace(s)
chart_group:
- osh-infra-ceph-config

View File

@ -1,14 +0,0 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: osh-infra-dashboards
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: OSH Infra Dashboards
chart_group:
- kibana
- grafana

View File

@ -1,269 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: grafana
layeringDefinition:
abstract: false
layer: global
labels:
name: grafana-global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.osh_infra.grafana
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.osh_infra.grafana
dest:
path: .values.images.tags
# Endpoints
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.oslo_db
dest:
path: .values.endpoints.oslo_db
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.oslo_db
dest:
path: .values.endpoints.oslo_db_session
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.grafana
dest:
path: .values.endpoints.grafana
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.monitoring
dest:
path: .values.endpoints.monitoring
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.ldap
dest:
path: .values.endpoints.ldap
# Accounts
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.grafana.admin
dest:
path: .values.endpoints.grafana.auth.admin
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.grafana.oslo_db
dest:
path: .values.endpoints.oslo_db.auth.user
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.grafana.oslo_db.database
dest:
path: .values.endpoints.oslo_db.path
pattern: DB_NAME
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.grafana.oslo_db_session
dest:
path: .values.endpoints.oslo_db_session.auth.user
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.grafana.oslo_db_session.database
dest:
path: .values.endpoints.oslo_db_session.path
pattern: DB_NAME
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.prometheus.admin
dest:
path: .values.endpoints.monitoring.auth.user
# Secrets
- dest:
path: .values.endpoints.grafana.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_grafana_admin_password
path: .
- dest:
path: .values.endpoints.oslo_db.auth.user.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_grafana_oslo_db_password
path: .
- dest:
path: .values.endpoints.oslo_db_session.auth.user.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_grafana_oslo_db_session_password
path: .
- dest:
path: .values.endpoints.oslo_db.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_oslo_db_admin_password
path: .
- dest:
path: .values.endpoints.oslo_db_session.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_oslo_db_admin_password
path: .
- dest:
path: .values.endpoints.monitoring.auth.user.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_prometheus_admin_password
path: .
# LDAP Configuration Details
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.ldap.admin.bind
dest:
path: .values.endpoints.ldap.auth.admin.bind_dn
- dest:
path: .values.endpoints.ldap.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_keystone_ldap_password
path: .
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .ldap.subdomain
dest:
path: .values.conf.ldap.config.base_dns.search
pattern: SUBDOMAIN
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .ldap.domain
dest:
path: .values.conf.ldap.config.base_dns.search
pattern: DOMAIN
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .ldap.subdomain
dest:
path: .values.conf.ldap.config.base_dns.group_search
pattern: SUBDOMAIN
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .ldap.domain
dest:
path: .values.conf.ldap.config.base_dns.group_search
pattern: DOMAIN
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .ldap.common_name
dest:
path: .values.conf.ldap.config.filters.group_search
pattern: COMMON_NAME
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .ldap.subdomain
dest:
path: .values.conf.ldap.config.filters.group_search
pattern: SUBDOMAIN
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .ldap.domain
dest:
path: .values.conf.ldap.config.filters.group_search
pattern: DOMAIN
data:
chart_name: grafana
release: grafana
namespace: osh-infra
wait:
timeout: 900
labels:
release_group: airship-grafana
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-grafana
post:
create: []
values:
labels:
grafana:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
conf:
provisioning:
datasources:
monitoring:
url: http://prom-metrics.osh-infra.svc.cluster.local:80/
ldap:
config:
base_dns:
search: "DC=SUBDOMAIN,DC=DOMAIN,DC=com"
group_search: "OU=Groups,DC=SUBDOMAIN,DC=DOMAIN,DC=com"
filters:
search: "(sAMAccountName=%s)"
group_search: "(memberof=CN=COMMON_NAME,OU=Application,OU=Groups,DC=SUBDOMAIN,DC=DOMAIN,DC=com)"
template: |
verbose_logging = true
[[servers]]
host = "{{ tuple "ldap" "public" . | include "helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup" }}"
port = {{ tuple "ldap" "public" "ldap" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
use_ssl = false
start_tls = false
ssl_skip_verify = false
bind_dn = "{{ .Values.endpoints.ldap.auth.admin.bind_dn }}"
bind_password = '{{ .Values.endpoints.ldap.auth.admin.password }}'
search_filter = "{{ .Values.conf.ldap.config.filters.search }}"
search_base_dns = ["{{ .Values.conf.ldap.config.base_dns.search }}"]
group_search_base_dns = ["{{ .Values.conf.ldap.config.base_dns.group_search }}"]
[servers.attributes]
username = "sAMAccountName"
surname = "sn"
member_of = "memberof"
email = "mail"
[[servers.group_mappings]]
group_dn = "{{.Values.endpoints.ldap.auth.admin.bind_dn }}"
org_role = "Admin"
[[servers.group_mappings]]
group_dn = "*"
org_role = "Viewer"
pod:
replicas:
grafana: 2
dependencies:
- osh-infra-helm-toolkit
...

View File

@ -1,126 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kibana
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.osh_infra.kibana
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.osh_infra.kibana
dest:
path: .values.images.tags
# Endpoints
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.elasticsearch
dest:
path: .values.endpoints.elasticsearch
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.kibana
dest:
path: .values.endpoints.kibana
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.ldap
dest:
path: .values.endpoints.ldap
# Accounts
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.elasticsearch.admin
dest:
path: .values.endpoints.elasticsearch.auth.admin
# Secrets
- dest:
path: .values.endpoints.elasticsearch.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_elasticsearch_admin_password
path: .
# LDAP Details
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.ldap.admin
dest:
path: .values.endpoints.ldap.auth.admin
- dest:
path: .values.endpoints.ldap.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_keystone_ldap_password
path: .
data:
chart_name: kibana
release: kibana
namespace: osh-infra
wait:
timeout: 900
labels:
release_group: airship-kibana
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kibana
create: []
post:
create: []
values:
conf:
apache:
host: |
<VirtualHost *:80>
ProxyRequests off
ProxyPreserveHost On
<Location />
ProxyPass http://localhost:{{ tuple "kibana" "internal" "kibana" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}/
ProxyPassReverse http://localhost:{{ tuple "kibana" "internal" "kibana" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}/
</Location>
<Proxy *>
AuthName "Kibana"
AuthType Basic
AuthBasicProvider file ldap
AuthUserFile /usr/local/apache2/conf/.htpasswd
AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}
AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}
AuthLDAPURL {{ tuple "ldap" "public" "ldap" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}
Require valid-user
</Proxy>
</VirtualHost>
labels:
kibana:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
dependencies:
- osh-infra-helm-toolkit
...

View File

@ -1,13 +0,0 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: osh-infra-ingress-controller
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: OpenStack Namespace Ingress
chart_group:
- osh-infra-ingress-controller

View File

@ -1,57 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: osh-infra-ingress-controller
labels:
name: osh-infra-ingress-controller-global
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.osh.ingress
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.osh.ingress
dest:
path: .values.images.tags
data:
chart_name: osh-infra-ingress-controller
release: osh-infra-ingress-controller
namespace: osh-infra
wait:
timeout: 900
labels:
release_group: airship-osh-infra-ingress-controller
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-osh-infra-ingress-controller
values:
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
error_server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
replicas:
ingress: 2
error_page: 2
dependencies:
- osh-helm-toolkit

View File

@ -1,16 +0,0 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: osh-infra-logging
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: OSH Infra Logging
sequenced: True
chart_group:
- elasticsearch
- fluentbit
- fluentd

View File

@ -1,364 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: elasticsearch-global
labels:
hosttype: elasticsearch-global
layeringDefinition:
abstract: true
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.osh_infra.elasticsearch
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.osh_infra.elasticsearch
dest:
path: .values.images.tags
# Endpoints
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.elasticsearch
dest:
path: .values.endpoints.elasticsearch
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.prometheus_elasticsearch_exporter
dest:
path: .values.endpoints.prometheus_elasticsearch_exporter
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.ldap
dest:
path: .values.endpoints.ldap
# Accounts
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.elasticsearch.admin
dest:
path: .values.endpoints.elasticsearch.auth.admin
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.ceph_object_store.admin
dest:
path: .values.endpoints.ceph_object_store.auth.admin
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.ceph_object_store.elasticsearch
dest:
path: .values.endpoints.ceph_object_store.auth.elasticsearch
# Secrets
- dest:
path: .values.endpoints.elasticsearch.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_elasticsearch_admin_password
path: .
- dest:
path: .values.endpoints.ceph_object_store.auth.admin.access_key
src:
schema: deckhand/Passphrase/v1
name: osh_infra_rgw_s3_admin_access_key
path: .
- dest:
path: .values.endpoints.ceph_object_store.auth.admin.secret_key
src:
schema: deckhand/Passphrase/v1
name: osh_infra_rgw_s3_admin_secret_key
path: .
- dest:
path: .values.endpoints.ceph_object_store.auth.elasticsearch.access_key
src:
schema: deckhand/Passphrase/v1
name: osh_infra_rgw_s3_elasticsearch_access_key
path: .
- dest:
path: .values.endpoints.ceph_object_store.auth.elasticsearch.secret_key
src:
schema: deckhand/Passphrase/v1
name: osh_infra_rgw_s3_elasticsearch_secret_key
path: .
# LDAP Details
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.ldap.admin
dest:
path: .values.endpoints.ldap.auth.admin
- dest:
path: .values.endpoints.ldap.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_keystone_ldap_password
path: .
data:
chart_name: elasticsearch
release: elasticsearch
namespace: osh-infra
wait:
timeout: 900
labels:
release_group: airship-elasticsearch
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-elasticsearch
create: []
post:
create: []
values:
pod:
replicas:
client: 5
resources:
enabled: true
apache_proxy:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "0"
cpu: "0"
client:
requests:
memory: "8Gi"
cpu: "1000m"
limits:
memory: "16Gi"
cpu: "2000m"
master:
requests:
memory: "8Gi"
cpu: "1000m"
limits:
memory: "16Gi"
cpu: "2000m"
data:
requests:
memory: "8Gi"
cpu: "1000m"
limits:
memory: "16Gi"
cpu: "2000m"
prometheus_elasticsearch_exporter:
requests:
memory: "0"
cpu: "0"
limits:
memory: "1024Mi"
cpu: "2000m"
jobs:
curator:
requests:
memory: "0"
cpu: "0"
limits:
memory: "1024Mi"
cpu: "2000m"
image_repo_sync:
requests:
memory: "0"
cpu: "0"
limits:
memory: "1024Mi"
cpu: "2000m"
snapshot_repository:
requests:
memory: "0"
cpu: "0"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: "0"
cpu: "0"
limits:
memory: "1024Mi"
cpu: "2000m"
labels:
elasticsearch:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
monitoring:
prometheus:
enabled: true
conf:
httpd: |
ServerRoot "/usr/local/apache2"
Listen 80
LoadModule mpm_event_module modules/mod_mpm_event.so
LoadModule authn_file_module modules/mod_authn_file.so
LoadModule authn_core_module modules/mod_authn_core.so
LoadModule authz_host_module modules/mod_authz_host.so
LoadModule authz_groupfile_module modules/mod_authz_groupfile.so
LoadModule authz_user_module modules/mod_authz_user.so
LoadModule authz_core_module modules/mod_authz_core.so
LoadModule access_compat_module modules/mod_access_compat.so
LoadModule auth_basic_module modules/mod_auth_basic.so
LoadModule ldap_module modules/mod_ldap.so
LoadModule authnz_ldap_module modules/mod_authnz_ldap.so
LoadModule reqtimeout_module modules/mod_reqtimeout.so
LoadModule filter_module modules/mod_filter.so
LoadModule proxy_html_module modules/mod_proxy_html.so
LoadModule log_config_module modules/mod_log_config.so
LoadModule env_module modules/mod_env.so
LoadModule headers_module modules/mod_headers.so
LoadModule setenvif_module modules/mod_setenvif.so
LoadModule version_module modules/mod_version.so
LoadModule proxy_module modules/mod_proxy.so
LoadModule proxy_connect_module modules/mod_proxy_connect.so
LoadModule proxy_http_module modules/mod_proxy_http.so
LoadModule proxy_balancer_module modules/mod_proxy_balancer.so
LoadModule slotmem_shm_module modules/mod_slotmem_shm.so
LoadModule slotmem_plain_module modules/mod_slotmem_plain.so
LoadModule unixd_module modules/mod_unixd.so
LoadModule status_module modules/mod_status.so
LoadModule autoindex_module modules/mod_autoindex.so
<IfModule unixd_module>
User daemon
Group daemon
</IfModule>
<Directory />
AllowOverride none
Require all denied
</Directory>
<Files ".ht*">
Require all denied
</Files>
ErrorLog /dev/stderr
LogLevel warn
<IfModule log_config_module>
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
LogFormat "%h %l %u %t \"%r\" %>s %b" common
<IfModule logio_module>
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio
</IfModule>
CustomLog /dev/stdout common
CustomLog /dev/stdout combined
</IfModule>
<Directory "/usr/local/apache2/cgi-bin">
AllowOverride None
Options None
Require all granted
</Directory>
<IfModule headers_module>
RequestHeader unset Proxy early
</IfModule>
<IfModule proxy_html_module>
Include conf/extra/proxy-html.conf
</IfModule>
<VirtualHost *:80>
<Location />
ProxyPass http://localhost:{{ tuple "elasticsearch" "internal" "client" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}/
ProxyPassReverse http://localhost:{{ tuple "elasticsearch" "internal" "client" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}/
</Location>
<Proxy *>
AuthName "Elasticsearch"
AuthType Basic
AuthBasicProvider file ldap
AuthUserFile /usr/local/apache2/conf/.htpasswd
AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}
AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}
AuthLDAPURL {{ tuple "ldap" "public" "ldap" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" | quote }}
Require valid-user
</Proxy>
</VirtualHost>
elasticsearch:
config:
http:
max_content_length: 2gb
pipelining: false
env:
java_opts:
client: "-Xms8g -Xmx8g"
data: "-Xms8g -Xmx8g"
master: "-Xms8g -Xmx8g"
snapshots:
enabled: true
curator:
#run every 6th hour
schedule: "0 */6 * * *"
action_file:
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
"Delete indices older than 7 days"
options:
timeout_override:
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 7
2:
action: delete_indices
description: >-
"Delete indices by age if available disk space is
less than 80% total disk"
options:
timeout_override: 600
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
- filtertype: space
source: creation_date
use_age: True
disk_space: 1200
storage:
requests:
storage: 500Gi
dependencies:
- osh-infra-helm-toolkit
...

View File

@ -1,255 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: fluentbit-global
layeringDefinition:
abstract: true
layer: global
labels:
hosttype: fluentbit-global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.osh_infra.fluentbit
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.osh_infra.fluentbit
dest:
path: .values.images.tags
# Endpoints
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.fluentd # TODO change it in OSH repo
dest:
path: .values.endpoints.fluentbit
data:
chart_name: fluentbit
release: fluentbit
namespace: osh-infra
wait:
timeout: 900
labels:
release_group: airship-fluentbit
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-fluentbit
create: []
post:
create: []
values:
monitoring:
prometheus:
enabled: true
pod:
resources:
enabled: true
fluentbit:
limits:
memory: '4Gi'
cpu: '2000m'
requests:
memory: '2Gi'
cpu: '1000m'
jobs:
image_repo_sync:
requests:
memory: '0'
cpu: '0'
limits:
memory: '1024Mi'
cpu: '2000m'
tests:
requests:
memory: '0'
cpu: '0'
limits:
memory: '1024Mi'
cpu: '2000m'
labels:
fluentbit:
node_selector_key: fluentbit
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
conf:
fluentbit:
template: |
[SERVICE]
Daemon false
Flush 5
Log_Level info
Parsers_File parsers.conf
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Path /var/log/kern.log
Tag kernel
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Parser docker
Path /var/log/containers/*.log
Tag kube.*
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Path /var/log/libvirt/libvirtd.log
Tag libvirt
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Path /var/log/libvirt/qemu/*.log
Tag qemu
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name systemd
Path ${JOURNAL_PATH}
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Tag journal.*
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name systemd
Path ${JOURNAL_PATH}
Systemd_Filter _SYSTEMD_UNIT=docker.service
Tag journal.*
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Parsers syslog
Path /var/log/ceph/airship-ucp-ceph-mon/ceph.log
Tag ceph.cluster.*
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Parsers syslog
Path /var/log/ceph/airship-ucp-ceph-mon/ceph.audit.log
Tag ceph.audit.*
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Parsers syslog
Path /var/log/ceph/airship-ucp-ceph-mon/ceph-mon**.log
Tag ceph.mon.*
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Parsers syslog
Path /var/log/ceph/airship-ucp-ceph-osd/ceph-osd**.log
Tag ceph.osd.*
[FILTER]
Interval 1s
Match **
Name throttle
Rate 1000
Window 300
[FILTER]
Match libvirt
Name record_modifier
Record hostname ${HOSTNAME}
[FILTER]
Match qemu
Name record_modifier
Record hostname ${HOSTNAME}
[FILTER]
Match kernel
Name record_modifier
Record hostname ${HOSTNAME}
[FILTER]
Match journal.**
Name modify
Rename _BOOT_ID BOOT_ID
Rename _CAP_EFFECTIVE CAP_EFFECTIVE
Rename _CMDLINE CMDLINE
Rename _COMM COMM
Rename _EXE EXE
Rename _GID GID
Rename _HOSTNAME HOSTNAME
Rename _MACHINE_ID MACHINE_ID
Rename _PID PID
Rename _SYSTEMD_CGROUP SYSTEMD_CGROUP
Rename _SYSTEMD_SLICE SYSTEMD_SLICE
Rename _SYSTEMD_UNIT SYSTEMD_UNIT
Rename _TRANSPORT TRANSPORT
Rename _UID UID
[OUTPUT]
Match **.fluentd**
Name null
[FILTER]
Match kube.*
Merge_JSON_Log true
Name kubernetes
[OUTPUT]
Host ${FLUENTD_HOST}
Match *
Name forward
Port ${FLUENTD_PORT}
parsers:
template: |
[PARSER]
Name syslog
Format regex
Regex '^(?<time>.*[0-9]{2}:[0-9]{2}:[0-9]{2}) (?<host>[^ ]*) (?<app>[a-zA-Z0-9_\/\.\-]*)(?:\[(?<pid>[0-9]+)\])?(?:[^\:]*\:)? (?<log>.+)$'
Time_Key time
Time_Format "%Y-%m-%dT%H:%M:%S.%L"
Time_Keep On
Types "pid:integer"
dependencies:
- osh-infra-helm-toolkit
...

View File

@ -1,375 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: fluentd-global
layeringDefinition:
abstract: true
layer: global
labels:
hosttype: fluentd-global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.osh_infra.fluentd
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.osh_infra.fluentd
dest:
path: .values.images.tags
# Endpoints
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.fluentd
dest:
path: .values.endpoints.fluentd
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.prometheus_fluentd_exporter
dest:
path: .values.endpoints.prometheus_fluentd_exporter
# Accounts
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.elasticsearch.admin
dest:
path: .values.endpoints.elasticsearch.auth.admin
# Secrets
- dest:
path: .values.endpoints.elasticsearch.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_elasticsearch_admin_password
path: .
data:
chart_name: fluentd
release: fluentd
namespace: osh-infra
wait:
timeout: 900
labels:
release_group: airship-fluentd
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-fluentd
create: []
post:
create: []
values:
monitoring:
prometheus:
enabled: true
pod:
resources:
enabled: true
fluentd:
limits:
memory: '4Gi'
cpu: '2000m'
requests:
memory: '2Gi'
cpu: '1000m'
prometheus_fluentd_exporter:
limits:
memory: '1024Mi'
cpu: '2000m'
requests:
memory: '0'
cpu: '0'
jobs:
image_repo_sync:
requests:
memory: '0'
cpu: '0'
limits:
memory: '1024Mi'
cpu: '2000m'
tests:
requests:
memory: '0'
cpu: '0'
limits:
memory: '1024Mi'
cpu: '2000m'
labels:
fluentd:
node_selector_key: fluentd
node_selector_value: enabled
prometheus_fluentd_exporter:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
conf:
fluentd:
template: |
<source>
bind 0.0.0.0
port 24220
@type monitor_agent
</source>
<source>
bind 0.0.0.0
port "#{ENV['FLUENTD_PORT']}"
@type forward
</source>
<match fluent.**>
@type null
</match>
<match kube.var.log.containers.**.log>
<rule>
key log
pattern /info/i
tag info.${tag}
</rule>
<rule>
key log
pattern /warn/i
tag warn.${tag}
</rule>
<rule>
key log
pattern /error/i
tag error.${tag}
</rule>
<rule>
key log
pattern /critical/i
tag critical.${tag}
</rule>
<rule>
key log
pattern (.+)
tag info.${tag}
</rule>
@type rewrite_tag_filter
</match>
<filter **.kube.var.log.containers.**.log>
enable_ruby true
<record>
application ${record["kubernetes"]["labels"]["application"]}
level ${tag_parts[0]}
</record>
@type record_transformer
</filter>
<filter openstack.**>
<record>
application ${tag_parts[1]}
</record>
@type record_transformer
</filter>
<match openstack.**>
<rule>
key level
pattern INFO
tag info.${tag}
</rule>
<rule>
key level
pattern WARN
tag warn.${tag}
</rule>
<rule>
key level
pattern ERROR
tag error.${tag}
</rule>
<rule>
key level
pattern CRITICAL
tag critical.${tag}
</rule>
@type rewrite_tag_filter
</match>
<match *.openstack.**>
<rule>
key application
pattern keystone
tag auth.${tag}
</rule>
<rule>
key application
pattern horizon
tag auth.${tag}
</rule>
<rule>
key application
pattern mariadb
tag auth.${tag}
</rule>
<rule>
key application
pattern memcached
tag auth.${tag}
</rule>
<rule>
key application
pattern rabbitmq
tag auth.${tag}
</rule>
@type rewrite_tag_filter
</match>
<match libvirt>
<buffer>
chunk_limit_size 8MB
flush_interval 15s
flush_thread_count 8
queue_limit_length 256
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
reload_connections false
reconnect_on_error true
reload_on_failure true
include_tag_key true
logstash_format true
logstash_prefix libvirt
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
<match qemu>
<buffer>
chunk_limit_size 8MB
flush_interval 15s
flush_thread_count 8
queue_limit_length 256
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
reload_connections false
reconnect_on_error true
reload_on_failure true
include_tag_key true
logstash_format true
logstash_prefix qemu
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
<match journal.**>
<buffer>
chunk_limit_size 8MB
flush_interval 15s
flush_thread_count 8
queue_limit_length 256
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
reload_connections false
reconnect_on_error true
reload_on_failure true
include_tag_key true
logstash_format true
logstash_prefix journal
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
<match kernel>
<buffer>
chunk_limit_size 8MB
flush_interval 15s
flush_thread_count 8
queue_limit_length 256
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
reload_connections false
reconnect_on_error true
reload_on_failure true
include_tag_key true
logstash_format true
logstash_prefix kernel
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
<match **>
<buffer>
chunk_limit_size 8MB
flush_interval 15s
flush_thread_count 8
queue_limit_length 256
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
reload_connections false
reconnect_on_error true
reload_on_failure true
include_tag_key true
logstash_format true
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
type_name fluent
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
<match *ceph-**.log>
<buffer>
chunk_limit_size 8MB
flush_interval 15s
flush_thread_count 8
queue_limit_length 256
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
reload_connections false
reconnect_on_error true
reload_on_failure true
include_tag_key true
logstash_format true
logstash_prefix ceph
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
dependencies:
- osh-infra-helm-toolkit
...

View File

@ -1,13 +0,0 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: osh-infra-mariadb
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: OpenStack-Infra MariaDB
chart_group:
- osh-infra-mariadb

View File

@ -1,100 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: osh-infra-mariadb
labels:
name: osh-infra-mariadb-global
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.osh.mariadb
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.osh.mariadb
dest:
path: .values.images.tags
# Endpoints
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.oslo_db
dest:
path: .values.endpoints.oslo_db
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.prometheus_mysql_exporter
dest:
path: .values.endpoints.prometheus_mysql_exporter
# Accounts
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.oslo_db.admin
dest:
path: .values.endpoints.oslo_db.auth.admin
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.prometheus_mysql_exporter.user
dest:
path: .values.endpoints.prometheus_mysql_exporter.auth.user
# Secrets
- dest:
path: .values.endpoints.oslo_db.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_oslo_db_admin_password
path: .
- dest:
path: .values.endpoints.oslo_db.auth.exporter.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_oslo_db_exporter_password
path: .
data:
chart_name: osh-infra-mariadb
release: osh-infra-mariadb
namespace: osh-infra
wait:
timeout: 900
labels:
release_group: airship-osh-infra-mariadb
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-osh-infra-mariadb
values:
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
prometheus_mysql_exporter:
node_selector_key: openstack-control-plane
node_selector_value: enabled
monitoring:
prometheus:
enabled: true
dependencies:
- osh-helm-toolkit
...

View File

@ -1,19 +0,0 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: osh-infra-monitoring
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: OSH Infra Monitoring
chart_group:
- prometheus
- prometheus-alertmanager
- prometheus-node-exporter
- prometheus-process-exporter
- prometheus-kube-state-metrics
- nagios
...

View File

@ -1,159 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: nagios
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.osh_infra.nagios
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.osh_infra.nagios
dest:
path: .values.images.tags
# Endpoints
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.nagios
dest:
path: .values.endpoints.nagios
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.monitoring
dest:
path: .values.endpoints.monitoring
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.elasticsearch
dest:
path: .values.endpoints.elasticsearch
- src:
schema: pegleg/EndpointCatalogue/v1
name: osh_infra_endpoints
path: .osh_infra.ldap
dest:
path: .values.endpoints.ldap
# Accounts
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.nagios.admin
dest:
path: .values.endpoints.nagios.auth.admin
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.prometheus.admin
dest:
path: .values.endpoints.monitoring.auth.admin
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.elasticsearch.admin
dest:
path: .values.endpoints.elasticsearch.auth.admin
# Secrets
- dest:
path: .values.endpoints.nagios.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_nagios_admin_password
path: .
- dest:
path: .values.endpoints.elasticsearch.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_elasticsearch_admin_password
path: .
- dest:
path: .values.endpoints.monitoring.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_infra_prometheus_admin_password
path: .
# LDAP Details
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
path: .osh_infra.ldap.admin
dest:
path: .values.endpoints.ldap.auth.admin
- dest:
path: .values.endpoints.ldap.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_keystone_ldap_password
path: .
data:
chart_name: nagios
release: nagios
namespace: osh-infra
wait:
timeout: 900
labels:
release_group: airship-nagios
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-nagios
create: []
post:
create: []
values:
conf:
apache:
host: |
<VirtualHost *:80>
<Location />
ProxyPass http://localhost:{{ tuple "nagios" "internal" "nagios" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}/
ProxyPassReverse http://localhost:{{ tuple "nagios" "internal" "nagios" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}/
</Location>
<Proxy *>
AuthName "Nagios"
AuthType Basic
AuthBasicProvider file ldap
AuthUserFile /usr/local/apache2/conf/.htpasswd
AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}
AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}
AuthLDAPURL {{ tuple "ldap" "public" "ldap" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}
Require valid-user
</Proxy>
</VirtualHost>
labels:
nagios:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
replicas:
nagios: 3
dependencies:
- osh-infra-helm-toolkit
...

Some files were not shown because too many files have changed in this diff Show More