Initial push for NC` branch

- New type cruiser introduced
 - Sample site stl1 is for reference
 - Except cruiser other types are not currently supposrted by the global configs

Change-Id: Iab46b5352f7976f76673eab53664b6796347dcce
This commit is contained in:
Bose, Arijit (ab7180) 2020-04-27 15:26:29 -05:00
parent 1678cf635f
commit 44dac7678b
491 changed files with 59882 additions and 5095 deletions

View File

@ -7,6 +7,8 @@ metadata:
layeringDefinition:
abstract: false
layer: global
labels:
name: airship-target
data:
signaling: false
assets:

View File

@ -10,29 +10,52 @@ metadata:
substitutions:
- src:
schema: pegleg/AppArmorProfile/v1
name: airship-default
name: calico-node-v1
path: .savePath
dest:
path: .assets[0].path
- src:
schema: pegleg/AppArmorProfile/v1
name: airship-default
name: calico-node-v1
path: .content
dest:
path: .assets[0].data
- src:
schema: pegleg/AppArmorProfile/v1
name: airship-apparmor-loader
name: kubeproxy-v1
path: .savePath
dest:
path: .assets[1].path
- src:
schema: pegleg/AppArmorProfile/v1
name: airship-apparmor-loader
name: kubeproxy-v1
path: .content
dest:
path: .assets[1].data
- src:
schema: pegleg/AppArmorProfile/v1
name: libvirt-v1
path: .savePath
dest:
path: .assets[2].path
- src:
schema: pegleg/AppArmorProfile/v1
name: libvirt-v1
path: .content
dest:
path: .assets[2].data
- src:
schema: pegleg/AppArmorProfile/v1
name: ceph-osd-v1
path: .savePath
dest:
path: .assets[3].path
- src:
schema: pegleg/AppArmorProfile/v1
name: ceph-osd-v1
path: .content
dest:
path: .assets[3].data
data:
signaling: false
assets:
@ -44,4 +67,12 @@ data:
permissions: '600'
data_pipeline:
- utf8_decode
- type: file
permissions: '600'
data_pipeline:
- utf8_decode
- type: file
permissions: '600'
data_pipeline:
- utf8_decode
...

View File

@ -2,22 +2,29 @@
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: nested-virt
labels:
name: nested-virt-global
application: 'drydock'
name: nested_virt
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
storagePolicy: 'cleartext'
labels:
application: 'drydock'
name: nested_virt
data:
signaling: false
node_filter:
filter_set_type: 'union'
filter_set:
- filter_type: 'union'
node_labels:
# execute boot action on compute nodes
openstack-nova-compute: enabled
assets:
- path: /etc/modprobe.d/nested-virt.conf
- path: /etc/modprobe.d/qemu-system-x86.conf
type: file
permissions: '644'
data_pipeline:
- utf8_decode
data: |
options kvm-intel nested=y
options kvm_intel nested=1
...

View File

@ -0,0 +1,31 @@
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: disable_cloud_init
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
labels:
application: 'drydock'
data:
signaling: false
assets:
- path: /etc/systemd/system/disable_cloud_init.service
type: unit
permissions: '600'
data: |-
[Unit]
Description=Disable Cloud Init after Promenade finishes
After=promjoin.service
[Service]
Type=oneshot
ExecStart=/usr/bin/install -g 0 -o 0 -m 444 -D -T /bin/true /etc/cloud/cloud-init.disabled
[Install]
WantedBy=airship.target
data_pipeline:
- utf8_decode
...

View File

@ -0,0 +1,52 @@
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: linux-crashdump-install
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
labels:
application: 'drydock'
name: linux-crashdump-install-global
substitutions:
- src:
schema: pegleg/Script/v1
name: linux-crashdump-install
path: .
dest:
path: .assets[2].data
data:
signaling: false
assets:
- type: pkg_list
data:
linux-crashdump: null
- path: /etc/systemd/system/linux-crashdump-install.service
type: unit
permissions: '444'
data: |
[Unit]
Description=Service to update /run/reboot-required to trigger reboot
DefaultDependencies=no
Before=promjoin.service node-reboot.service
After=network-online.target local-fs.target cloud-init.target
ConditionPathExists=!/var/lib/linux-crashdump-reboot.done
[Service]
Type=oneshot
ExecStart=/opt/linux-crashdump-install.sh
RemainAfterExit=true
[Install]
WantedBy=airship.target
data_pipeline:
- utf8_decode
- path: /opt/linux-crashdump-install.sh
type: file
permissions: '700'
data_pipeline:
- utf8_decode
...

View File

@ -0,0 +1,49 @@
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: node-reboot
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
labels:
application: 'drydock'
name: node-reboot-global
substitutions:
- src:
schema: pegleg/Script/v1
name: node-reboot
path: .
dest:
path: .assets[1].data
data:
signaling: false
assets:
- path: /etc/systemd/system/node-reboot.service
type: unit
permissions: '444'
data: |
[Unit]
Description=Service to manage rebootign a node if required
DefaultDependencies=no
After=linux-crashdump-install.service
Before=promjoin.service
After=network-online.target local-fs.target cloud-init.target
[Service]
Type=oneshot
ExecStart=/opt/node-reboot.sh -s 30
RemainAfterExit=true
[Install]
WantedBy=airship.target
data_pipeline:
- utf8_decode
- path: /opt/node-reboot.sh
type: file
permissions: '700'
data_pipeline:
- utf8_decode
...

View File

@ -0,0 +1,33 @@
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: disable-unattended-upgrades
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
labels:
application: 'drydock'
data:
signaling: false
assets:
- path: /etc/systemd/system/disable-unattended-upgrades.service
type: unit
permissions: '444'
data: |-
[Unit]
Description=Disable unattended upgrades
After=network-online.target local-fs.target
[Service]
Type=oneshot
# Due to bug https://github.com/systemd/systemd/issues/5133
# on ubuntu 16.04, we have to mash these all on the same line
ExecStart=/bin/bash -c "/bin/chmod 644 /etc/cron.daily/apt-compat ; /bin/systemctl disable apt-daily-upgrade.timer apt-daily.timer ; /bin/systemctl stop apt-daily-upgrade.timer apt-daily.timer"
[Install]
WantedBy=multi-user.target
data_pipeline:
- utf8_decode
...

View File

@ -8,5 +8,3 @@ data:
- global
- type
- site
- cicd # overrides for pipeline automation
...

View File

@ -0,0 +1,39 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: pegleg/SiteDefinition/v1
data:
$schema: http://json-schema.org/schema#
type: object
properties:
repositories:
type: object
properties:
global:
type: object
properties:
revision:
type: string
url:
type: string
additionalProperties: false
secrets:
type: object
properties:
revision:
type: string
url:
type: string
additionalProperties: false
required:
- global
- secrets
additionalProperties: false
site_type:
type: string
required:
- repositories
- site_type
additionalProperties: false

View File

@ -0,0 +1,155 @@
---
schema: nc/CorridorConfig/v1
metadata:
schema: metadata/Document/v1
name: corridor-1
layeringDefinition:
abstract: false
layer: global
labels:
corridor: '1'
region: 'global'
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/CommonSoftwareConfig/v1
name: common-software-config
path: .osh.region_name
dest:
path: .infrastructure.dmaap.topic
pattern: REGION
data:
corridor: 'c1'
artifactory:
artfactory_ip: 1.1.1.1
hostnames:
artifacts: artifacts-example.com
docker-open: docker.io
docker: docker.io
repo_urls:
artfactory_web_url: artifacts-example.com/artifactory
infrastructure:
dns:
upstream_servers:
- 8.8.8.8
- 1.1.1.1
# Repeat the same values as above, but formatted as a common separated
# string
upstream_servers_joined: 8.8.8.8,1.1.1.1
ntp:
# Verify that a minimum of three (3) ATT NTP servers are reachable in your
# environment; otherwise override them at the site level
# with the correct values for your environment.
#
# NOTE: We use IPs for NTP instead of FQDN because:
# 1. We found problems with large numbers of pools configured for NTP (nodes
# would refuse to time sync). When MaaS sees an FQDN, it configures them
# as "pool", whereas when MaaS sees IPv4 addresses it configures them as
# "server", which does not have this problem with large NTP lists.
# 2. A DNS failure scenario would not result in the common-mode failure of
# nodes to be able to maintain accurate time.
servers:
# time WWT
- 10.255.0.1
# cloudfare
- 1.1.1.1
smtp:
server: smtp.test.com
ldap:
auth_path: ""
common_name: AP-NC_Test_Users
domain: exampledomian
fqdn: 'its-a'
subdomain: testitservices
proxy:
no_proxy: localhost,127.0.0.1,[::1],0.0.0.0,10.96.0.1,.cluster.local,172.17.0.1,172.18.0.1
scm:
port: 8888
ssh: ssh://user@.example.com:8888
ssh_codecloud: ssh://example.com
url: gerrit.example.com
credentials: jenkins-gerrit-mtn5-key
dmaap:
fqdn: example.com
user: user@example.com
namespace: com.example.com
topic: 29002-NC-astra-log-22790-corr1-REGION-v1
ozone:
ozone-collector:
user: admin-nc
threec:
threec-api:
user: admin-nc
corridor: 1
jenkins:
url: "https://jenkins-example.com"
siteScm:
url: "ssh://:userId@gerrit.example.com/treasuremap"
scm:
security:
url: "ssh://:userId@grrit.example.com/treasuremap"
ldap:
url: "ldaps://example.com:636"
keystone:
ucp:
groups: "AP-NC-NC-Lab-Deployment"
ldap:
group_filter: "(CN=AP-NC-NC-Lab-Deployment)"
airship:
log_level: 'INFO'
log_level_numeric: 20
maas:
ingress_disable_gui: false
ranger:
ranger:
debug_level: ERROR
ranger_keystone_user: admin-nc
customer_domain: nc
user: admin-nc
user_home: /home/admin-nc
ranger_agent_client_cert_path: ''
ranger_agent_client_cert: ''
ranger-agent:
debug_level: ERROR
user: admin-nc
user_home: /home/aic-ord
ranger_agent_keystone_user: ranger-agent-admin
rds_listener_endpoint: https://ranger.example.com/v1/rds/status
calico:
calico_startup_loglevel: 'Debug'
felix_logseverity: 'Info'
cni_log_level: 'Info'
kubernetes_components:
# https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md
# The above link describes different log levels and their respective values.
apiserver_log_level: 2
controller_manager_log_level: 2
scheduler_log_level: 2
proxy_log_level: 2
apiserver_webhook_log_level: 2
nagios:
notification:
snmp:
primary_target: 130.8.117.171:16162
secondary_target: 130.8.117.171:16162
http:
primary_target: http://example.com/events/NC-INFRA-NAGIOS-ALARMS
secondary_target: http://example.com/events/NC-INFRA-NAGIOS-ALARMS
utility:
always_log_user: true
cicd:
generic_pipe:
user: admin
required_role: admin
jenkins:
global_env_vars:
conf_package_path: com.nccicd.config.conf
internal_gerrit_key: jenkins-gerrit-mtn5-key
jenkins_cli: /var/jenkins_home/war/WEB-INF/jenkins-cli.jar
os_project_name: admin
ssh_data: '{"jenkins-gerrit-mtn5-key": {"user": "admin", "resource": "gerrit.example.com"}}'
mylogins:
user: admin
...

View File

@ -0,0 +1,105 @@
---
schema: promenade/KubernetesNetwork/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-network
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# DNS
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .dns.cluster_domain
dest:
path: .dns.cluster_domain
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .dns.service_ip
dest:
path: .dns.service_ip
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .dns.upstream_servers
dest:
path: .dns.upstream_servers
# NTP
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .ntp.servers
dest:
path: .ntp.servers
# Kubernetes IPs
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.api_service_ip
dest:
path: .kubernetes.service_ip
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.pod_cidr
dest:
path: .kubernetes.pod_cidr
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.service_cidr
dest:
path: .kubernetes.service_cidr
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.apiserver_port
dest:
path: .kubernetes.apiserver_port
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.haproxy_port
dest:
path: .kubernetes.haproxy_port
# etcd IPs
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .etcd.container_port
dest:
path: .etcd.container_port
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .etcd.haproxy_port
dest:
path: .etcd.haproxy_port
# proxy
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .proxy.http
dest:
path: .proxy.url
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .proxy.no_proxy
dest:
path: .proxy.additional_no_proxy
data:
dns:
bootstrap_validation_checks:
- calico-etcd.kube-system.svc.cluster.local
- kubernetes-etcd.kube-system.svc.cluster.local
- kubernetes.default.svc.cluster.local
...

View File

@ -0,0 +1,103 @@
---
schema: pegleg/CommonAddresses/v1
metadata:
schema: metadata/Document/v1
name: common-addresses
labels:
name: common-addresses-global
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# DNS
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .infrastructure.dns.upstream_servers
dest:
path: .dns.upstream_servers
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .infrastructure.dns.upstream_servers_joined
dest:
path: .dns.upstream_servers_joined
# LDAP
# NOTE: ldap.base_url below is only used by 5ec-seaworthy site.
# TODO: remove this substitution once we pin to globals versions
# or refactor that site.
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .infrastructure.ldap.fqdn
dest:
path: .ldap.base_url
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .infrastructure.ldap.auth_path
dest:
path: .ldap.auth_path
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .infrastructure.ldap.common_name
dest:
path: .ldap.common_name
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .infrastructure.ldap.subdomain
dest:
path: .ldap.subdomain
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .infrastructure.ldap.domain
dest:
path: .ldap.domain
# NTP
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .infrastructure.ntp.servers
dest:
path: .ntp.servers
# SMTP
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .infrastructure.smtp.server
dest:
path: .smtp.server
data:
dns:
# Kubernetes cluster domain. Do not change. This is internal to the cluster
# and should *not* be registered with upstream ATT EISS.
cluster_domain: cluster.local
kubernetes:
# misc k8s port settings
apiserver_port: 6443
haproxy_port: 6553
# Defines the k8s-apiserver node port range (default 30000-32767)
service_node_port_range: 30000-32767
# etcd port settings
etcd:
container_port: 2379
haproxy_port: 2378
# Environment proxy information.
# NOTE: Reference NC sites do not deploy behind a proxy, so this proxy section
# is empty.
# However, if you are in a lab that requires proxy, ensure that these proxy
# settings are overridden at the site level and reachable in your environment.
proxy:
http: ""
no_proxy: []
ovs-dpdk:
ovs_bridge_dpdk: 'br-phy-bond0'
...

View File

@ -0,0 +1,22 @@
---
schema: pegleg/NetworkSettings/v1
metadata:
schema: metadata/Document/v1
name: network-settings
layeringDefinition:
abstract: false
layer: global
labels:
network-settings: default
storagePolicy: cleartext
data:
mtu:
kubernetes: 9000
neutron: 9150
oam: 9000
oob: 1500
overlay: 9150
primary_bond: 9214
pxe: 1500
storage: 9000
...

View File

@ -0,0 +1,22 @@
---
schema: promenade/PKICatalog/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-agg-api
labels:
name: kubernetes-agg-api
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
certificate_authorities:
kubernetes-agg-api:
description: CA for Kubernetes Aggregated API
certificates:
- document_name: apiserver-proxy
description: Aggregation API proxy certificate for Kubernetes apiserver
common_name: aggregator
kubernetes_service_names:
- kubernetes.default.svc.cluster.local
...

View File

@ -0,0 +1,22 @@
---
schema: promenade/PKICatalog/v1
metadata:
schema: metadata/Document/v1
name: cnx-catalog
labels:
name: cnx-catalog
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
certificate_authorities:
cnx-api:
description: CA for tsee cnx API
certificates:
- document_name: cnx-api
description: Server certificate for tsee cnx extention API
common_name: cnx-api
kubernetes_service_names:
- cnx-api.kube-system.svc.cluster.local
...

View File

@ -0,0 +1,17 @@
---
schema: promenade/PKICatalog/v1
metadata:
schema: metadata/Document/v1
name: patroni-replication
labels:
name: patroni-replication
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
certificate_authorities:
patroni-replication:
description: CA for generating Patroni replication client certificates
certificates: []
...

View File

@ -1,10 +1,14 @@
---
# The purpose of this file is to apply proper labels to Genesis node so the
# proper services are installed and proper configuration applied. This may
# need to be changed for a new site if it diverges from a standard deployment
# that contains Airship, OSH-Infra, OSH.
schema: promenade/Genesis/v1
metadata:
schema: metadata/Document/v1
name: genesis-global
name: genesis
layeringDefinition:
abstract: true
abstract: false
layer: global
labels:
name: genesis-global
@ -61,6 +65,18 @@ metadata:
path: .genesis.ip
dest:
path: .ip
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .genesis.ip.oam
dest:
path: .external_ip
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .dns.node_domain
dest:
path: .domain
# Command prefix
- src:
@ -86,36 +102,66 @@ metadata:
dest:
path: .apiserver.encryption
# Aggregation API configuration
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-agg-api
path: .
dest:
path: .files[2].content
- src:
schema: deckhand/Certificate/v1
name: apiserver-proxy
path: .
dest:
path: .files[3].content
- src:
schema: deckhand/CertificateKey/v1
name: apiserver-proxy
path: .
dest:
path: .files[4].content
data:
apiserver:
command_prefix:
- /hyperkube
- kube-apiserver
arguments:
- --authorization-mode=Node,RBAC
- --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,NodeRestriction,EventRateLimit
- --enable-admission-plugins=PodSecurityPolicy,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,NodeRestriction,EventRateLimit
- --service-cluster-ip-range=SERVICE_CIDR
- --service-node-port-range=SERVICE_NODE_PORT_RANGE
- --endpoint-reconciler-type=lease
- --feature-gates=PodShareProcessNamespace=true
- --feature-gates=PodShareProcessNamespace=true,TaintBasedEvictions=false
- --v=3
- --admission-control-config-file=/etc/kubernetes/apiserver/acconfig.yaml
- --experimental-encryption-provider-config=/etc/kubernetes/apiserver/encryption_provider.yaml
- --requestheader-allowed-names='aggregator'
- --encryption-provider-config=/etc/kubernetes/apiserver/encryption_provider.yaml
- '--requestheader-client-ca-file=/etc/kubernetes/apiserver/agg-api-ca.pem'
- '--requestheader-extra-headers-prefix=X-Remote-Extra-'
- '--requestheader-group-headers=X-Remote-Group'
- '--requestheader-username-headers=X-Remote-User'
- '--requestheader-allowed-names=aggregator'
- '--proxy-client-key-file=/etc/kubernetes/apiserver/apiserver-proxy-key.pem'
- '--proxy-client-cert-file=/etc/kubernetes/apiserver/apiserver-proxy-cert.pem'
armada:
target_manifest: cluster-bootstrap
haproxy:
run_as_user: 65534
metrics:
output_dir: /var/log/node-exporter-textfiles
tiller:
storage: secret
labels:
dynamic:
- beta.kubernetes.io/fluentd-ds-ready=true
- tenant-ceph-control-plane=enabled
- calico-etcd=enabled
- tenant-ceph-mon=enabled
- tenant-ceph-rgw=enabled
- tenant-ceph-mgr=enabled
- ceph-mds=enabled
- ceph-mon=enabled
- ceph-osd=enabled
- ceph-rgw=enabled
- ceph-mgr=enabled
- tenant-ceph-control-plane=enabled
- tenant-ceph-mon=enabled
- tenant-ceph-rgw=enabled
- tenant-ceph-mgr=enabled
- kube-dns=enabled
- kube-ingress=enabled
- kubernetes-apiserver=enabled
@ -126,12 +172,22 @@ data:
- ucp-control-plane=enabled
- maas-rack=enabled
- maas-region=enabled
- openstack-control-plane=enabled
- openstack-l3-agent=enabled
- openstack-dhcp-agent=enabled
- openstack-metadata-agent=enabled
- openstack-neutron-server=enabled
- openvswitch=enabled
- openstack-l3-agent=enabled
- node-exporter=enabled
- utility=enabled
- fluentd=enabled
- hosttype=nc-cp-adv
- sriov=enabled
- elasticsearch-data=enabled
- elasticsearch-client=enabled
- elasticsearch-master=enabled
- prometheus-server=enabled
files:
- path: /var/lib/anchor/calico-etcd-bootstrap
content: "# placeholder for triggering calico etcd bootstrapping\n# this file will be deleted"
mode: 0644
- path: /etc/genesis/apiserver/acconfig.yaml
mode: 0444
content: |
@ -149,3 +205,14 @@ data:
- type: Server
qps: 1000
burst: 10000
- path: /etc/genesis/apiserver/agg-api-ca.pem
mode: 0400
- path: /etc/genesis/apiserver/apiserver-proxy-cert.pem
mode: 0400
- path: /etc/genesis/apiserver/apiserver-proxy-key.pem
mode: 0400
- path: /var/lib/anchor/calico-etcd-bootstrap
content: "# placeholder for triggering calico etcd bootstrapping"
mode: 0644
haproxy:
run_as_user: "0"

View File

@ -0,0 +1,80 @@
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: dell_r640_purley_adv_5ec
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
vendor: 'Dell'
# Data in this block is probably inaccurate, but also
# not yet used
generation: '8'
hw_version: '3'
bios_version: '2.2.3'
boot_mode: bios
bootstrap_protocol: pxe
pxe_interface: 0
# End inaccurate data block
device_aliases:
pxe_nic01:
# eno3
address: '0000:01:00.0'
dev_type: 'Gig NIC'
bus_type: 'pci'
gp_nic01:
# enp59s0f1
address: '0000:3b:00.1'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
dp_nic01:
# enp59s0f0
address: '0000:3b:00.0'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
gp_nic02:
# enp216s0f0
address: '0000:d8:00.0'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
dp_nic02:
# enp216s0f1
address: '0000:d8:00.1'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
bootdisk:
# /dev/sda
address: '0:2.0.0'
dev_type: 'Internal RAID-1 SSDs'
bus_type: 'scsi'
ephemeral:
# /dev/sdd
address: '0:2.3.0'
dev_type: 'Internal RAID-10 HDDs'
bus_type: 'scsi'
cpu_sets:
# CPUS pinned to nova
# Host OS CPUs are inferred, and will be the remaining cores
isolcpus: '4-43,48-87'
# Kernel config
# Reduce OS jitter on the offloaded CPUs.
rcu_nocbs: '4-43,48-87'
# Nova config
# CPUs dedicated to tenant workload.
vcpu_pin_set: '8-43,52-87'
# OVS config
# CPUs used by OVS-DPDK processes, same as CPUs used by host OS.
# VCPUs 0,44,1,45,2,46,3,47 = first 4 CPU cores
dpdk-lcore-mask: '0xF0000000000F'
# OVS config
# CPUs used by dpdk Poll Mode Drivers (PMD)
# OVS configu paramter for DPDK.
# VCPUs 4,48,5,49,6,50,7,51 = CPU cores 4-7
pmd-cpu-mask: '0x0F0000000000F0'
hugepages:
dpdk:
size: '1G'
count: 320
...

View File

@ -0,0 +1,73 @@
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: dell_r740_purley_5ec
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
vendor: 'Dell'
# Data in this block is probably inaccurate, but also
# not yet used
generation: '8'
hw_version: '3'
bios_version: '2.2.3'
boot_mode: bios
bootstrap_protocol: pxe
pxe_interface: 0
# End inaccurate data block
device_aliases:
pxe_nic01:
# eno4
address: '0000:01:00.1'
dev_type: 'Gig NIC'
bus_type: 'pci'
sriov_nic01:
# enp94s0f0
address: '0000:5e:00.0'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
gp_nic01:
# enp94s0f1
address: '0000:5e:00.1'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
sriov_nic02:
# enp135s0f1
address: '0000:87:00.1'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
gp_nic02:
# enp135s0f0
address: '0000:87:00.0'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
bootdisk:
# /dev/sda
address: '0:2.0.0'
dev_type: 'Internal RAID-1 SSDs'
bus_type: 'scsi'
cephjournal1:
# /dev/sdb
address: '0:2.1.0'
dev_type: 'Internal JBOD SSD1'
bus_type: 'scsi'
cephjournal2:
# /dev/sdc
address: '0:2.2.0'
dev_type: 'Internal JBOD SSD2'
bus_type: 'scsi'
ephemeral:
# /dev/sdd
address: '0:2.3.0'
dev_type: 'Internal RAID-10 HDDs'
bus_type: 'scsi'
cpu_sets:
kvm: '4-43,48-87'
hugepages:
dpdk:
size: '1G'
count: 300
...

View File

@ -0,0 +1,57 @@
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: dell_730xd_compute
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
vendor: 'Dell'
# Data in this block is probably inaccurate, but also
# not yet used
generation: '8'
hw_version: '3'
bios_version: '2.2.3'
boot_mode: bios
bootstrap_protocol: pxe
pxe_interface: 0
# End inaccurate data block
device_aliases:
pxe_nic01:
# eno3
address: '0000:06:00.0'
dev_type: 'Gig NIC'
bus_type: 'pci'
gp_nic01:
# enp131s0f1
address: '0000:83:00.1'
dev_type: 'Intel 10G X520 NIC'
bus_type: 'pci'
gp_nic02:
# enp3s0f0
address: '0000:03:00.0'
dev_type: 'Intel 10G X520 NIC'
bus_type: 'pci'
bootdisk:
# /dev/sda
address: '0:2.0.0'
dev_type: 'Internal RAID-1 HDDs'
bus_type: 'scsi'
cephjournal1:
# /dev/sdb
address: '0:2.1.0'
dev_type: 'Internal JBOD SSD1'
bus_type: 'scsi'
cephjournal2:
# /dev/sdc
address: '0:2.2.0'
dev_type: 'Internal JBOD SSD2'
bus_type: 'scsi'
ephemeral:
# /dev/sdd
address: '0:2.3.0'
dev_type: 'Internal RAID-6 HDDs'
bus_type: 'scsi'
...

View File

@ -0,0 +1,52 @@
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: dell_730xd_cp
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
vendor: 'Dell'
# Data in this block is probably inaccurate, but also
# not yet used
generation: '8'
hw_version: '3'
bios_version: '2.2.3'
boot_mode: bios
bootstrap_protocol: pxe
pxe_interface: 0
# End inaccurate data block
device_aliases:
pxe_nic01:
# eno3
address: '0000:06:00.0'
dev_type: 'I350 Gig NIC'
bus_type: 'pci'
gp_nic01:
# enp131s0f1
address: '0000:83:00.1'
dev_type: 'Intel 10G X520 NIC'
bus_type: 'pci'
gp_nic02:
# enp3s0f0
address: '0000:03:00.0'
dev_type: 'Intel 10G X520 NIC'
bus_type: 'pci'
bootdisk:
# /dev/sda
address: '0:2.0.0'
dev_type: 'Internal RAID-1 HDDs'
bus_type: 'scsi'
cephjournal1:
# /dev/sdb
address: '0:2.1.0'
dev_type: 'Internal JBOD SSD1'
bus_type: 'scsi'
cephjournal2:
# /dev/sdc
address: '0:2.2.0'
dev_type: 'Internal JBOD SSD2'
bus_type: 'scsi'
...

View File

@ -0,0 +1,47 @@
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: hp_dl380_compute
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
vendor: 'HP'
# Data in this block is probably inaccurate, but also
# not yet used
generation: '8'
hw_version: '3'
bios_version: '2.2.3'
boot_mode: bios
bootstrap_protocol: pxe
pxe_interface: 0
# End inaccurate data block
device_aliases:
pxe_nic01:
# eno1
address: '0000:02:00.0'
dev_type: 'Gig NIC'
bus_type: 'pci'
gp_nic01:
# ens6f0
address: '0000:81:00.0'
dev_type: 'Intel 10 Gig'
bus_type: 'pci'
gp_nic02:
# ens3f1
address: '0000:08:00.1'
dev_type: 'Intel 10 Gig'
bus_type: 'pci'
bootdisk:
# /dev/sda
address: '0:1.0.0'
dev_type: 'Internal RAID-1 HDDs'
bus_type: 'scsi'
ephemeral:
# /dev/sdb
address: '0:1.0.1'
dev_type: 'Internal RAID-6 HDDs'
bus_type: 'scsi'
...

View File

@ -0,0 +1,84 @@
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: dell_r740_purley_adv_nc
storagePolicy: 'cleartext'
labels:
hardwaretype: dell_r740_purley_adv_nc
layeringDefinition:
abstract: false
layer: global
data:
vendor: 'Dell'
# Data in this block is probably inaccurate, but also
# not yet used
generation: '10'
hw_version: '3'
bios_version: '2.3.3'
boot_mode: bios
bootstrap_protocol: pxe
pxe_interface: 4
# End inaccurate data block
device_aliases:
pxe_nic01:
# eno4
address: '0000:01:00.1'
dev_type: 'Gig NIC'
bus_type: 'pci'
dp_nic01:
# enp94s0f0
address: '0000:5e:00.0'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
gp_nic01:
# enp94s0f1
address: '0000:5e:00.1'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
dp_nic02:
# enp135s0f1
address: '0000:87:00.1'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
gp_nic02:
# enp135s0f0
address: '0000:87:00.0'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
# Disk layout per https://wiki.web.labs.att.com/display/NCID/904-03-04%3A+Compute+Host+Storage+Ephemeral+Design
bootdisk:
# /dev/sda
address: '0:2.0.0'
dev_type: 'Internal RAID-1 SSDs'
bus_type: 'scsi'
ephemeral:
# /dev/sdj
address: '0:2.9.0'
dev_type: 'Internal RAID-1 SSDs'
bus_type: 'scsi'
cpu_sets:
# CPUS pinned to nova
# Host OS CPUs are inferred, and will be the remaining cores
isolcpus: '4-43,48-87'
# Kernel config
# Reduce OS jitter on the offloaded CPUs.
rcu_nocbs: '4-43,48-87'
# Nova config
# CPUs dedicated to tenant workload.
vcpu_pin_set: '8-43,52-87'
# OVS config
# CPUs used by OVS-DPDK processes, same as CPUs used by host OS.
# VCPUs 0,44,1,45,2,46,3,47 = first 4 CPU cores
dpdk-lcore-mask: '0xF0000000000F'
# OVS config
# CPUs used by dpdk Poll Mode Drivers (PMD)
# OVS configu paramter for DPDK.
# VCPUs 4,48,5,49,6,50,7,51 = CPU cores 4-7
pmd-cpu-mask: '0x0F0000000000F0'
hugepages:
dpdk:
size: '1G'
# This value only applicable to dp profile
count: 320
...

View File

@ -0,0 +1,63 @@
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: dell_r740_purley_nc
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
vendor: 'Dell'
# Data in this block is probably inaccurate, but also
# not yet used
generation: '10'
hw_version: '3'
bios_version: '2.3.3'
boot_mode: bios
bootstrap_protocol: pxe
pxe_interface: 4
# End inaccurate data block
device_aliases:
pxe_nic01:
# eno4
address: '0000:01:00.1'
dev_type: 'Gig NIC'
bus_type: 'pci'
dp_nic01:
# enp94s0f0
address: '0000:5e:00.0'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
gp_nic01:
# enp94s0f1
address: '0000:5e:00.1'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
dp_nic02:
# enp135s0f1
address: '0000:87:00.1'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
gp_nic02:
# enp135s0f0
address: '0000:87:00.0'
dev_type: 'Intel XXV710 NIC'
bus_type: 'pci'
bootdisk:
# /dev/sda
address: '0:2.0.0'
dev_type: 'Internal RAID-1 SSDs'
bus_type: 'scsi'
ephemeral:
# /dev/sdd
address: '0:2.3.0'
dev_type: 'Internal RAID-10 HDDs'
bus_type: 'scsi'
cpu_sets:
kvm: '4-43,48-87'
hugepages:
dpdk:
size: '1G'
count: 320
...

View File

@ -0,0 +1,162 @@
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: 5ec-cp-global
storagePolicy: cleartext
labels:
hosttype: 5ec-cp
layeringDefinition:
abstract: true
layer: global
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
data:
oob:
type: 'ipmi'
network: 'oob'
account: 'tier4'
primary_network: 'oam'
hardware_profile: dell_r640_purley_5ec
interfaces:
pxe:
device_link: pxe
slaves:
- 'pxe_nic'
networks:
- 'pxe'
bond1:
device_link: bond1
slaves:
- 'gp_nic01'
- 'gp_nic02'
networks:
- 'oam'
- 'storage'
- 'overlay'
- 'ksn'
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>300g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
cephjournal1:
partitions:
- name: 'ceph-ssd1-j1'
size: '10g'
- name: 'ceph-ssd1-j2'
size: '10g'
- name: 'ceph-ssd1-j3'
size: '10g'
cephjournal2:
partitions:
- name: 'ceph-ssd2-j4'
size: '10g'
- name: 'ceph-ssd2-j5'
size: '10g'
- name: 'ceph-ssd2-j6'
size: '10g'
platform:
image: 'xenial'
kernel: 'hwe-16.04'
kernel_params:
console: 'ttyS1,115200n8'
cgroup_disable: 'hugetlb'
metadata:
owner_data:
hosttype: 5ec-cp
control-plane: enabled
ucp-control-plane: enabled
openstack-control-plane: enabled
openstack-heat: enabled
openstack-keystone: enabled
openstack-rabbitmq: enabled
openstack-dns-helper: enabled
openstack-mariadb: enabled
openstack-nova-control: enabled
openstack-etcd: enabled
openstack-mistral: enabled
openstack-memcached: enabled
openstack-glance: enabled
openstack-horizon: enabled
openstack-cinder-control: enabled
openstack-cinder-volume: control
openstack-neutron: enabled
openstack-ranger-agent: enabled
openstack-l3-agent: enabled
openstack-dhcp-agent: enabled
openstack-metadata-agent: enabled
openstack-neutron-server: enabled
openvswitch: enabled
ucp-barbican: enabled
tenant-ceph-control-plane: enabled
tenant-ceph-mon: enabled
tenant-ceph-rgw: enabled
tenant-ceph-mgr: enabled
ceph-bootstrap: enabled
ceph-mon: enabled
ceph-mgr: enabled
ceph-osd: enabled
ceph-mds: enabled
ceph-rgw: enabled
ucp-maas: enabled
kube-dns: enabled
kubernetes-apiserver: enabled
kubernetes-controller-manager: enabled
kubernetes-etcd: enabled
kubernetes-scheduler: enabled
tiller-helm: enabled
kube-etcd: enabled
calico-policy: enabled
calico-node: enabled
ucp-armada: enabled
ucp-drydock: enabled
ucp-deckhand: enabled
ucp-shipyard: enabled
IAM: enabled
ucp-promenade: enabled
prometheus-server: enabled
prometheus-client: enabled
fluentd: enabled
influxdb: enabled
kibana: enabled
elasticsearch-client: enabled
elasticsearch-master: enabled
elasticsearch-data: enabled
postgresql: enabled
kube-ingress: enabled
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
fluentbit: enabled
...

View File

@ -0,0 +1,127 @@
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: 5ec-ns-cpt-global
storagePolicy: cleartext
labels:
hosttype: 5ec-ns-cpt
layeringDefinition:
abstract: true
layer: global
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
data:
oob:
type: 'ipmi'
network: 'oob'
account: 'tier4'
primary_network: 'oam'
hardware_profile: dell_r640_purley_5ec
interfaces:
pxe:
device_link: pxe
slaves:
- 'pxe_nic'
networks:
- 'pxe'
bond1:
device_link: bond1
slaves:
- 'gp_nic01'
- 'gp_nic02'
networks:
- 'oam'
- 'storage'
- 'overlay'
- 'ksn'
p1p1:
slaves:
- 'sriov_nic01'
sriov:
vf_count: 32
trustedmode: false
p3p2:
slaves:
- 'sriov_nic02'
sriov:
vf_count: 32
trustedmode: false
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>300g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
cephjournal1:
partitions:
- name: 'ceph-ssd1-j1'
size: '10g'
cephjournal2:
partitions:
- name: 'ceph-ssd2-j2'
size: '10g'
ephemeral:
partitions:
- name: 'nova_instance'
size: '99%'
filesystem:
mountpoint: '/var/lib/nova'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'xenial'
kernel: 'hwe-16.04'
kernel_params:
console: 'ttyS1,115200n8'
intel_iommu: 'on'
iommu: 'pt'
amd_iommu: 'on'
cgroup_disable: 'hugetlb'
transparent_hugepage: 'never'
hugepagesz: 'hardwareprofile:hugepages.dpdk.size'
hugepages: 'hardwareprofile:hugepages.dpdk.count'
default_hugepagesz: 'hardwareprofile:hugepages.dpdk.size'
isolcpus: 'hardwareprofile:cpuset.kvm'
metadata:
owner_data:
hosttype: 5ec-ns-cpt
tenant-ceph-osd: enabled
openstack-nova-compute: enabled
openvswitch: enabled
sriov: enabled
openstack-libvirt: kernel
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
fluentbit: enabled
...

View File

@ -0,0 +1,127 @@
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: 5ec-ns-r640
storagePolicy: cleartext
labels:
hosttype: 5ec-ns-r640
layeringDefinition:
abstract: false
layer: global
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
data:
oob:
type: 'ipmi'
network: 'oob'
account: 'tier4'
primary_network: 'oam'
hardware_profile: dell_r640_purley_5ec
interfaces:
pxe:
device_link: pxe
slaves:
- 'pxe_nic'
networks:
- 'pxe'
bond1:
device_link: bond1
slaves:
- 'gp_nic01'
- 'gp_nic02'
networks:
- 'oam'
- 'storage'
- 'overlay'
- 'ksn'
p1p1:
slaves:
- 'sriov_nic01'
sriov:
vf_count: 32
trustedmode: false
p3p2:
slaves:
- 'sriov_nic02'
sriov:
vf_count: 32
trustedmode: false
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>300g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
cephjournal1:
partitions:
- name: 'ceph-ssd1-j1'
size: '10g'
cephjournal2:
partitions:
- name: 'ceph-ssd2-j2'
size: '10g'
ephemeral:
partitions:
- name: 'nova_instance'
size: '99%'
filesystem:
mountpoint: '/var/lib/nova'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'xenial'
kernel: 'hwe-16.04'
kernel_params:
console: 'ttyS1,115200n8'
intel_iommu: 'on'
iommu: 'pt'
amd_iommu: 'on'
cgroup_disable: 'hugetlb'
transparent_hugepage: 'never'
hugepagesz: 'hardwareprofile:hugepages.dpdk.size'
hugepages: 'hardwareprofile:hugepages.dpdk.count'
default_hugepagesz: 'hardwareprofile:hugepages.dpdk.size'
isolcpus: 'hardwareprofile:cpuset.kvm'
metadata:
owner_data:
hosttype: 5ec-ns-r640
tenant-ceph-osd: enabled
openstack-nova-compute: enabled
openvswitch: enabled
sriov: enabled
openstack-libvirt: kernel
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
fluentbit: enabled
...

View File

@ -0,0 +1,127 @@
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: 5ec-ns-tools-global
storagePolicy: cleartext
labels:
hosttype: 5ec-ns-tools
layeringDefinition:
abstract: true
layer: global
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
data:
oob:
type: 'ipmi'
network: 'oob'
account: 'tier4'
primary_network: 'oam'
hardware_profile: dell_r640_purley_5ec
interfaces:
pxe:
device_link: pxe
slaves:
- 'pxe_nic'
networks:
- 'pxe'
bond1:
device_link: bond1
slaves:
- 'gp_nic01'
- 'gp_nic02'
networks:
- 'oam'
- 'storage'
- 'ksn'
- 'overlay'
p1p1:
slaves:
- 'sriov_nic01'
sriov:
vf_count: 32
trustedmode: true
p3p2:
slaves:
- 'sriov_nic02'
sriov:
vf_count: 32
trustedmode: true
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>300g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
cephjournal1:
partitions:
- name: 'ceph-ssd1-j1'
size: '10g'
cephjournal2:
partitions:
- name: 'ceph-ssd2-j2'
size: '10g'
ephemeral:
partitions:
- name: 'nova_instance'
size: '99%'
filesystem:
mountpoint: '/var/lib/nova'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'xenial'
kernel: 'hwe-16.04'
kernel_params:
console: 'ttyS1,115200n8'
intel_iommu: 'on'
iommu: 'pt'
amd_iommu: 'on'
cgroup_disable: 'hugetlb'
transparent_hugepage: 'never'
hugepagesz: 'hardwareprofile:hugepages.dpdk.size'
hugepages: 'hardwareprofile:hugepages.dpdk.count'
default_hugepagesz: 'hardwareprofile:hugepages.dpdk.size'
isolcpus: 'hardwareprofile:cpuset.kvm'
metadata:
owner_data:
hosttype: '5ec-ns-tools'
tenant-ceph-osd: enabled
openstack-nova-compute: enabled
openvswitch: enabled
sriov: enabled
openstack-libvirt: kernel
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
fluentbit: enabled
...

View File

@ -0,0 +1,127 @@
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: 5ec-ns-upt-global
storagePolicy: cleartext
labels:
hosttype: 5ec-ns-upt
layeringDefinition:
abstract: true
layer: global
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
data:
oob:
type: 'ipmi'
network: 'oob'
account: 'tier4'
primary_network: 'oam'
hardware_profile: dell_r640_purley_5ec
interfaces:
pxe:
device_link: pxe
slaves:
- 'pxe_nic'
networks:
- 'pxe'
bond1:
device_link: bond1
slaves:
- 'gp_nic01'
- 'gp_nic02'
networks:
- 'oam'
- 'storage'
- 'overlay'
- 'ksn'
p1p1:
slaves:
- 'sriov_nic01'
sriov:
vf_count: 32
trustedmode: false
p3p2:
slaves:
- 'sriov_nic02'
sriov:
vf_count: 32
trustedmode: false
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>300g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
cephjournal1:
partitions:
- name: 'ceph-ssd1-j1'
size: '10g'
cephjournal2:
partitions:
- name: 'ceph-ssd2-j2'
size: '10g'
ephemeral:
partitions:
- name: 'nova_instance'
size: '99%'
filesystem:
mountpoint: '/var/lib/nova'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'xenial'
kernel: 'hwe-16.04'
kernel_params:
console: 'ttyS1,115200n8'
intel_iommu: 'on'
iommu: 'pt'
amd_iommu: 'on'
cgroup_disable: 'hugetlb'
transparent_hugepage: 'never'
hugepagesz: 'hardwareprofile:hugepages.dpdk.size'
hugepages: 'hardwareprofile:hugepages.dpdk.count'
default_hugepagesz: 'hardwareprofile:hugepages.dpdk.size'
isolcpus: 'hardwareprofile:cpuset.kvm'
metadata:
owner_data:
hosttype: 5ec-ns-upt
tenant-ceph-osd: enabled
openstack-nova-compute: enabled
openvswitch: enabled
sriov: enabled
openstack-libvirt: kernel
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
fluentbit: enabled
...

View File

@ -41,17 +41,20 @@ data:
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>100g'
size: '>300g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'xenial'
kernel: 'hwe-16.04'
kernel: 'ga-16.04'
kernel_params:
kernel_package: 'linux-image-4.15.0-46-generic'
# bug ks3019: these kernel parameters will render PXE boot hanging
# for HP DL380G9 nodes (occurs at MaaS Deploy stage)
# todo: find working version or remove
# console: 'ttyS1,115200n8'
cgroup_disable: 'hugetlb'
metadata:
owner_data:
control-plane: enabled
@ -71,8 +74,14 @@ data:
openstack-cinder-control: enabled
openstack-cinder-volume: control
openstack-neutron: enabled
openstack-l3-agent: enabled
openstack-dhcp-agent: enabled
openstack-metadata-agent: enabled
openstack-neutron-server: enabled
openstack-ranger-agent: enabled
openvswitch: enabled
ucp-barbican: enabled
ceph-bootstrap: enabled
ceph-mon: enabled
ceph-mgr: enabled
ceph-osd: enabled
@ -82,8 +91,7 @@ data:
tenant-ceph-mon: enabled
tenant-ceph-rgw: enabled
tenant-ceph-mgr: enabled
maas-rack: enabled
maas-region: enabled
ucp-maas: enabled
kube-dns: enabled
kubernetes-apiserver: enabled
kubernetes-controller-manager: enabled
@ -103,7 +111,6 @@ data:
prometheus-server: enabled
prometheus-client: enabled
fluentd: enabled
fluentbit: enabled
influxdb: enabled
kibana: enabled
elasticsearch-client: enabled
@ -113,5 +120,4 @@ data:
kube-ingress: enabled
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
utility: enabled
...

View File

@ -2,9 +2,9 @@
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: dp-global
name: gv-global
labels:
hosttype: dp-global
hosttype: gv-global
layeringDefinition:
abstract: true
layer: global
@ -41,25 +41,27 @@ data:
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>100g'
size: '>300g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'xenial'
kernel: 'hwe-16.04'
kernel: 'ga-16.04'
kernel_params:
kernel_package: 'linux-image-4.15.0-46-generic'
console: 'ttyS1,115200n8'
cgroup_disable: 'hugetlb'
isolcpus: '2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43'
metadata:
owner_data:
openstack-nova-compute: enabled
tenant-ceph-osd: enabled
openvswitch: enabled
sriov: enabled
contrail-vrouter: kernel
openstack-libvirt: kernel
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
fluentbit: enabled
fluentd: enabled
tenant-ceph-osd: enabled
...

View File

@ -0,0 +1,214 @@
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: nc-cp-adv-global
storagePolicy: cleartext
labels:
hosttype: nc-cp-adv
layeringDefinition:
abstract: true
layer: global
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
- dest:
path: .interfaces.p2p1.sriov.device
src:
schema: drydock/HardwareProfile/v1
name: dell_r740_purley_adv_nc
path: .device_aliases.dp_nic01.address
- dest:
path: .interfaces.p7p2.sriov.device
src:
schema: drydock/HardwareProfile/v1
name: dell_r740_purley_adv_nc
path: .device_aliases.dp_nic02.address
data:
oob:
type: 'ipmi'
network: 'oob'
account: 'tier4'
primary_network: 'oam'
hardware_profile: dell_r740_purley_adv_nc
interfaces:
pxe:
device_link: pxe
slaves:
- 'pxe_nic01'
networks:
- 'pxe'
bond1:
device_link: bond1
slaves:
- 'gp_nic01'
- 'gp_nic02'
networks:
- 'oam'
- 'storage'
- 'calico'
p2p1:
slaves:
- 'dp_nic01'
sriov:
num_vfs: 32
promisc: false
ovs_dpdk:
vf_index: 0
pci_whitelist:
trusted: false
p7p2:
slaves:
- 'dp_nic02'
sriov:
num_vfs: 32
promisc: false
ovs_dpdk:
vf_index: 0
pci_whitelist:
trusted: false
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_crash'
size: '70g'
filesystem:
mountpoint: '/var/crash'
fstype: 'ext4'
mount_options: 'defaults,nofail'
- name: 'var_lib_openstack_helm'
size: '10g'
filesystem:
mountpoint: '/var/lib/openstack-helm'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>200g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
ephemeral:
partitions:
- name: 'ssd_storage'
size: '99%'
filesystem:
mountpoint: '/srv'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'bionic'
kernel: 'ga-18.04'
kernel_params:
# NOTE: For the hugepagez config, must use real value here
# because this list is also consumed by pre-genesis.sh to
# configure hugepages for genesis node, and unlike drydock
# it does not have capability to perform the necessarily
# substitution at run time. See nc-p1-adv profile for
# preferred substitution pattern.
hugepagesz: '1G'
hugepages: '20'
transparent_hugepage: 'never'
kernel_package: 'linux-image-4.15.0-64-generic'
console: 'ttyS1,115200n8'
cgroup_disable: 'hugetlb'
amd_iommu: 'on'
intel_iommu: 'on'
iommu: 'pt'
default_hugepagesz: '1G'
dpdk-socket-mem: '4096,4096'
metadata:
owner_data:
hosttype: nc-cp-adv
control-plane: enabled
ucp-control-plane: enabled
openstack-control-plane: enabled
openstack-heat: enabled
openstack-keystone: enabled
openstack-rabbitmq: enabled
openstack-dns-helper: enabled
openstack-mariadb: enabled
openstack-nova-control: enabled
openstack-etcd: enabled
openstack-mistral: enabled
openstack-memcached: enabled
openstack-glance: enabled
openstack-horizon: enabled
openstack-cinder-control: enabled
openstack-cinder-volume: control
openstack-neutron: enabled
openstack-l3-agent: enabled
openstack-dhcp-agent: enabled
openstack-metadata-agent: enabled
openstack-neutron-server: enabled
openvswitch: enabled
ucp-barbican: enabled
ceph-bootstrap: enabled
ceph-mon: enabled
ceph-mgr: enabled
ceph-osd: enabled
ceph-mds: enabled
ceph-rgw: enabled
tenant-ceph-control-plane: enabled
tenant-ceph-mon: enabled
tenant-ceph-rgw: enabled
tenant-ceph-mgr: enabled
maas-rack: enabled
maas-region: enabled
kube-dns: enabled
kubernetes-apiserver: enabled
kubernetes-controller-manager: enabled
kubernetes-etcd: enabled
kubernetes-scheduler: enabled
tiller-helm: enabled
kube-etcd: enabled
calico-policy: enabled
calico-node: enabled
calico-etcd: enabled
ucp-armada: enabled
ucp-drydock: enabled
ucp-deckhand: enabled
ucp-shipyard: enabled
IAM: enabled
ucp-promenade: enabled
prometheus-server: enabled
prometheus-client: enabled
fluentd: enabled
influxdb: enabled
kibana: enabled
elasticsearch-client: enabled
elasticsearch-master: enabled
elasticsearch-data: enabled
postgresql: enabled
kube-ingress: enabled
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
utility: enabled
sriov: enabled
...

View File

@ -0,0 +1,160 @@
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: nc-cp-global
storagePolicy: cleartext
labels:
hosttype: nc-cp
layeringDefinition:
abstract: true
layer: global
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
data:
oob:
type: 'ipmi'
network: 'oob'
account: 'tier4'
primary_network: 'oam'
hardware_profile: dell_r740_purley_nc
interfaces:
pxe:
device_link: pxe
slaves:
- 'pxe_nic01'
networks:
- 'pxe'
bond1:
device_link: bond1
slaves:
- 'gp_nic01'
- 'gp_nic02'
networks:
- 'oam'
- 'storage'
- 'overlay'
- 'calico'
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_crash'
size: '70g'
filesystem:
mountpoint: '/var/crash'
fstype: 'ext4'
mount_options: 'defaults,nofail'
- name: 'var_lib_ceph'
size: '10g'
filesystem:
mountpoint: '/var/lib/ceph'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>250g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'bionic'
kernel: 'ga-18.04'
kernel_params:
kernel_package: 'linux-image-4.15.0-64-generic'
console: 'ttyS1,115200n8'
cgroup_disable: 'hugetlb'
metadata:
owner_data:
hosttype: nc-cp
control-plane: enabled
ucp-control-plane: enabled
openstack-control-plane: enabled
openstack-heat: enabled
openstack-keystone: enabled
openstack-rabbitmq: enabled
openstack-dns-helper: enabled
openstack-mariadb: enabled
openstack-nova-control: enabled
openstack-etcd: enabled
openstack-mistral: enabled
openstack-memcached: enabled
openstack-glance: enabled
openstack-horizon: enabled
openstack-cinder-control: enabled
openstack-cinder-volume: control
openstack-neutron: enabled
openstack-l3-agent: enabled
openstack-dhcp-agent: enabled
openstack-metadata-agent: enabled
openstack-neutron-server: enabled
openvswitch: enabled
ucp-barbican: enabled
ceph-bootstrap: enabled
ceph-mon: enabled
ceph-mgr: enabled
ceph-osd: enabled
ceph-mds: enabled
ceph-rgw: enabled
tenant-ceph-control-plane: enabled
tenant-ceph-mon: enabled
tenant-ceph-rgw: enabled
tenant-ceph-mgr: enabled
maas-rack: enabled
maas-region: enabled
kube-dns: enabled
kubernetes-apiserver: enabled
kubernetes-controller-manager: enabled
kubernetes-etcd: enabled
kubernetes-scheduler: enabled
tiller-helm: enabled
kube-etcd: enabled
calico-policy: enabled
calico-node: enabled
calico-etcd: enabled
ucp-armada: enabled
ucp-drydock: enabled
ucp-deckhand: enabled
ucp-shipyard: enabled
IAM: enabled
ucp-promenade: enabled
prometheus-server: enabled
prometheus-client: enabled
fluentd: enabled
influxdb: enabled
kibana: enabled
elasticsearch-client: enabled
elasticsearch-master: enabled
elasticsearch-data: enabled
postgresql: enabled
kube-ingress: enabled
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
utility: enabled
...

View File

@ -0,0 +1,158 @@
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: nc-p1-adv
storagePolicy: cleartext
labels:
hosttype: nc-p1-adv
layeringDefinition:
abstract: false
layer: global
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
- dest:
- path: .platform.kernel_params.isolcpus
src:
schema: drydock/HardwareProfile/v1
name: dell_r740_purley_adv_nc
path: .cpu_sets.isolcpus
- dest:
- path: .platform.kernel_params.rcu_nocbs
src:
schema: drydock/HardwareProfile/v1
name: dell_r740_purley_adv_nc
path: .cpu_sets.rcu_nocbs
- dest:
path: .interfaces.p2p1.sriov.device
src:
schema: drydock/HardwareProfile/v1
name: dell_r740_purley_adv_nc
path: .device_aliases.dp_nic01
- dest:
path: .interfaces.p7p2.sriov.device.address
src:
schema: drydock/HardwareProfile/v1
name: dell_r740_purley_adv_nc
path: .device_aliases.dp_nic02.address
data:
oob:
type: 'ipmi'
network: 'oob'
account: 'tier4'
primary_network: 'oam'
hardware_profile: dell_r740_purley_adv_nc
interfaces:
pxe:
device_link: pxe
slaves:
- 'pxe_nic01'
networks:
- 'pxe'
bond1:
device_link: bond1
slaves:
- 'gp_nic01'
- 'gp_nic02'
networks:
- 'oam'
- 'storage'
- 'calico'
p2p1:
slaves:
- 'dp_nic01'
sriov:
num_vfs: 32
promisc: false
ovs_dpdk:
vf_index: 0
pci_whitelist:
trusted: false
p7p2:
slaves:
- 'dp_nic02'
sriov:
num_vfs: 32
promisc: false
ovs_dpdk:
vf_index: 0
pci_whitelist:
trusted: false
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_crash'
size: '70g'
filesystem:
mountpoint: '/var/crash'
fstype: 'ext4'
mount_options: 'defaults,nofail'
- name: 'var'
size: '>200g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
ephemeral:
partitions:
- name: 'nova_instance'
size: '99%'
filesystem:
mountpoint: '/var/lib/nova'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'bionic'
kernel: 'ga-18.04'
kernel_params:
hugepagesz: 'hardwareprofile:hugepages.dpdk.size'
hugepages: 'hardwareprofile:hugepages.dpdk.count'
transparent_hugepage: 'never'
kernel_package: 'linux-image-4.15.0-64-generic'
console: 'ttyS1,115200n8'
amd_iommu: 'on'
intel_iommu: 'on'
iommu: 'pt'
cgroup_disable: 'hugetlb'
default_hugepagesz: 'hardwareprofile:hugepages.dpdk.size'
dpdk-socket-mem: '4096,4096'
rcu_nocb_poll: true
metadata:
owner_data:
hosttype: nc-p1-r740
tenant-ceph-osd: enabled
openstack-nova-compute: enabled
openvswitch: enabled
sriov: enabled
openstack-libvirt: kernel
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
fluentd: enabled
...

View File

@ -0,0 +1,126 @@
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: nc-p1
storagePolicy: cleartext
labels:
hosttype: nc-p1
layeringDefinition:
abstract: false
layer: global
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
data:
oob:
type: 'ipmi'
network: 'oob'
account: 'tier4'
primary_network: 'oam'
hardware_profile: dell_r740_purley_nc
interfaces:
pxe:
device_link: pxe
slaves:
- 'pxe_nic01'
networks:
- 'pxe'
bond1:
device_link: bond1
slaves:
- 'gp_nic01'
- 'gp_nic02'
networks:
- 'oam'
- 'storage'
- 'overlay'
- 'calico'
p2p2:
slaves:
- 'dp_nic01'
sriov:
vf_count: 64
trustedmode: false
p7p1:
slaves:
- 'dp_nic02'
sriov:
vf_count: 64
trustedmode: false
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_crash'
size: '70g'
filesystem:
mountpoint: '/var/crash'
fstype: 'ext4'
mount_options: 'defaults,nofail'
- name: 'var'
size: '>250g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
ephemeral:
partitions:
- name: 'nova_instance'
size: '99%'
filesystem:
mountpoint: '/var/lib/nova'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'bionic'
kernel: 'ga-18.04'
kernel_params:
kernel_package: 'linux-image-4.15.0-64-generic'
console: 'ttyS1,115200n8'
intel_iommu: 'on'
iommu: 'pt'
amd_iommu: 'on'
cgroup_disable: 'hugetlb'
transparent_hugepage: 'never'
hugepagesz: 'hardwareprofile:hugepages.dpdk.size'
hugepages: 'hardwareprofile:hugepages.dpdk.count'
default_hugepagesz: 'hardwareprofile:hugepages.dpdk.size'
isolcpus: 'hardwareprofile:cpuset.kvm'
metadata:
owner_data:
hosttype: nc-p1-r740
tenant-ceph-osd: enabled
openstack-nova-compute: enabled
openvswitch: enabled
sriov: enabled
openstack-libvirt: kernel
beta.kubernetes.io/fluentd-ds-ready: 'true'
node-exporter: enabled
fluentd: enabled
...

View File

@ -0,0 +1,67 @@
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: nd-global
labels:
hosttype: nd-global
layeringDefinition:
abstract: true
layer: global
storagePolicy: cleartext
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
data:
oob:
type: 'ipmi'
network: 'oob'
account: 'root'
storage:
physical_devices:
sda:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>300g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'xenial'
kernel: 'ga-16.04'
kernel_params:
console: 'ttyS1,115200n8'
cgroup_disable: 'hugetlb'
hugepagesz: '1G'
hugepages: '210'
isolcpus: '2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43'
metadata:
owner_data:
openstack-nova-compute: enabled
contrail-vrouter: dpdk
openstack-libvirt: dpdk
beta.kubernetes.io/fluentd-ds-ready: 'true'
sriov: enabled
node-exporter: enabled
fluentd: enabled
...

View File

@ -0,0 +1,67 @@
---
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: nv-global
labels:
hosttype: nv-global
layeringDefinition:
abstract: true
layer: global
storagePolicy: cleartext
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
data:
oob:
type: 'ipmi'
network: 'oob'
account: 'root'
storage:
physical_devices:
sda:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>300g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
platform:
image: 'xenial'
kernel: 'ga-16.04'
kernel_params:
console: 'ttyS1,115200n8'
cgroup_disable: 'hugetlb'
hugepagesz: '2M'
hugepages: '107520'
isolcpus: '4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43'
metadata:
owner_data:
openstack-nova-compute: enabled
contrail-vrouter: kernel
openstack-libvirt: kernel
beta.kubernetes.io/fluentd-ds-ready: 'true'
sriov: enabled
node-exporter: enabled
fluentd: enabled
...

View File

@ -3,11 +3,14 @@ schema: promenade/HostSystem/v1
metadata:
schema: metadata/Document/v1
name: host-system
labels:
name: host-system-global
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
#NOTE: /hyperkube in k8s 1.17+ images is a useless shell script
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
@ -15,6 +18,14 @@ metadata:
dest:
path: .files[0].docker_image
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .files.kubernetes
dest:
- path: .files[1].tar_url
- path: .files[2].tar_url
# Initial CoreDNS image (used during node Genesis and node join)
- src:
schema: pegleg/SoftwareVersions/v1
@ -23,7 +34,7 @@ metadata:
dest:
path: .images.coredns
# Initial CoreDNS image (used during node Genesis and node join)
# Initial haproxy image (used during node Genesis and node join)
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
@ -49,7 +60,7 @@ metadata:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.ucp.promenade.monitoring_image
path: .images.monitoring_image
dest:
path: .images.monitoring_image
@ -57,62 +68,177 @@ metadata:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.named.docker
path: .packages.named
dest:
path: .packages.common.required.docker
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.named.socat
dest:
path: .packages.common.required.socat
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.named.socat
dest:
path: .packages.genesis.required.socat
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.named.socat
dest:
path: .packages.join.required.socat
path: .packages.genesis.required
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.unnamed
dest:
path: .packages.common.additional
# Docker authorization
path: .packages.genesis.additional
- src:
schema: deckhand/Passphrase/v1
path: .
name: private_docker_key
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.named
dest:
path: .files[4].content
pattern: DH_SUB_PRIVATE_DOCKER_KEY
path: .packages.join.required
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.unnamed
dest:
path: .packages.join.additional
# Genesis validation image
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.validation
path: .validation
dest:
path: .validation
# # Artifactory Auth'd Docker Repo (docker-nc)
# - src:
# schema: nc/CorridorConfig/v1
# name: corridor-config
# path: .artifactory.hostnames.docker
# dest:
# path: .files[5].content
# pattern: DOCKER_ARTIFACTORY_REPO_URL
# # Artifactory authorization
# - src:
# schema: nc/CorridorConfig/v1
# name: corridor-config
# path: .artifactory.auth_key
# dest:
# path: .files[5].content
# pattern: DH_SUB_ARTIFACTORY_DOCKER_KEY
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.repositories.ceph.url
dest:
- path: .packages
pattern: CEPH_REPO
recurse:
depth: -1
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.repositories.ceph.distributions[0]
dest:
- path: .packages
pattern: CEPH_DISTRO
recurse:
depth: -1
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.repositories.ceph.components[0]
dest:
- path: .packages
pattern: CEPH_COMPONENT
recurse:
depth: -1
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.repositories.ceph.gpgkey
dest:
- path: .packages
pattern: CEPH_GPG_KEY
recurse:
depth: -1
# Docker source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.repositories.docker.url
dest:
- path: .packages
pattern: DOCKER_REPO
recurse:
depth: -1
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.repositories.docker.distributions[0]
dest:
- path: .packages
pattern: DOCKER_DISTRO
recurse:
depth: -1
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.repositories.docker.components[0]
dest:
- path: .packages
pattern: DOCKER_COMPONENT
recurse:
depth: -1
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.repositories.docker.gpgkey
dest:
- path: .packages
pattern: DOCKER_GPG_KEY
recurse:
depth: -1
# # Artifactory Auth'd Docker Repo (docker-nc)
# - src:
# schema: nc/CorridorConfig/v1
# name: corridor-config
# path: .artifactory.hostnames.docker
# dest:
# path: .files[8].content
# pattern: DOCKER_ARTIFACTORY_REPO_URL
#
# # Artifactory authorization
# - src:
# schema: nc/CorridorConfig/v1
# name: corridor-config
# path: .artifactory.auth_key
# dest:
# path: .files[8].content
# pattern: DH_SUB_ARTIFACTORY_DOCKER_KEY
# Pause image for sandbox
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.pause
dest:
path: .files[8].content
pattern: PAUSE_IMAGE
# CRI tool
# - src:
# schema: pegleg/SoftwareVersions/v1
# name: software-versions
# path: .files.crictl
# dest:
# path: .files[9].tar_url
data:
# see (and update if needed)
# type/cruiser/profiles/kubernetes-host.yaml (data.files)
files:
# .files[0]
- path: /opt/kubernetes/bin/hyperkube
file_path: /hyperkube
mode: 0555
# .files[1]
- path: /opt/kubernetes/bin/kubelet
symlink: /opt/kubernetes/bin/hyperkube
tar_path: kubernetes/node/bin/kubelet
mode: 0555
# .files[2]
- path: /usr/local/bin/kubectl
symlink: /opt/kubernetes/bin/hyperkube
tar_path: kubernetes/node/bin/kubectl
mode: 0555
# .files[3]
- path: /etc/logrotate.d/json-logrotate
mode: 0444
content: |-
@ -121,99 +247,136 @@ data:
compress
copytruncate
create 0644 root root
weekly
daily
dateext
dateformat -%Y%m%d-%s
maxsize 100M
missingok
notifempty
su root root
rotate 1
rotate 4
}
# .files[4]
- path: /etc/logrotate.d/rsyslog
mode: 0444
content: |-
/var/log/syslog
{
rotate 7
size 10M
create 640 syslog adm
su root syslog
daily
missingok
notifempty
delaycompress
compress
dateext
dateformat -%Y%m%d-%s
postrotate
/bin/kill -HUP `cat /var/run/rsyslogd.pid 2> /dev/null` 2> /dev/null || true
reload rsyslog >/dev/null 2>&1 || true
endscript
}
/var/log/mail.info
/var/log/mail.warn
/var/log/mail.err
/var/log/mail.log
/var/log/daemon.log
/var/log/kern.log
/var/log/auth.log
/var/log/user.log
/var/log/lpr.log
/var/log/cron
/var/log/debug
/var/log/secure
/var/log/messages
{
rotate 7
daily
size 10M
create 640 syslog adm
su root syslog
missingok
notifempty
compress
delaycompress
sharedscripts
dateext
dateformat -%Y%m%d-%s
postrotate
/bin/kill -HUP `cat /var/run/rsyslogd.pid 2> /dev/null` 2> /dev/null || true
reload rsyslog >/dev/null 2>&1 || true
endscript
}
# NOTE(pc1210):Docker AUTH is not requried for upstream images, can be removed
# .files[5]
- path: /var/lib/kubelet/.dockercfg
mode: 0400
# NOTE: Sample key, this repo does not exist
content: |-
{
"https://private.registry.com": {
"auth": "DH_SUB_PRIVATE_DOCKER_KEY"
}
}
# Make sure that promjoin script does not run on every boot,
# otherwise it may downgrade current versions of Docker & Kubelet.
# content: |-
# {
# "https://DOCKER_ARTIFACTORY_REPO_URL": {
# "auth": "DH_SUB_ARTIFACTORY_DOCKER_KEY"
# }
# }
# .files[6]
# NOTE(mb874d): This file is used to signal to the promjoin bootaction to
# not run a second time.
- path: /var/lib/prom.done
mode: 0444
content: ""
- path: /etc/profile.d/kubeconfig.sh
mode: 0744
# .files[7]
- path: /root/.bash_profile
mode: 0740
content: |-
export KUBECONFIG=/etc/kubernetes/admin/kubeconfig.yaml
packages:
common:
repositories:
- deb https://download.docker.com/linux/ubuntu/ xenial stable
keys:
- |-
-----BEGIN PGP PUBLIC KEY BLOCK-----
# .files[8]
- path: /etc/containerd/config.toml
mode: 0400
content: |-
version = 2
disabled_plugins = ["restart"]
mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
/nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
=0YYh
-----END PGP PUBLIC KEY BLOCK-----
[plugins.cri]
systemd_cgroup = true
sandbox_image = "PAUSE_IMAGE"
# .files[9]
- path: /usr/bin/crictl
tar_path: crictl
mode: 0555
# .files[10]
- path: /etc/crictl.yaml
mode: 0400
content: |-
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
packages:
# NOTE(aw442m): During gensis, mini-mirror runs as a standalone container
# on localhost, managed by pre-genesis.sh.
# The repositories listed here will be in
# /etc/apt/sources.list.d/promenade-genesis-sources.list
genesis:
repositories:
- deb http://mirror.mirantis.com/testing/ceph-nautilus/bionic CEPH_DISTRO CEPH_COMPONENT
- deb http://mirror.mirantis.com/testing/kubernetes-extra/bionic DOCKER_DISTRO DOCKER_COMPONENT
keys:
- |-
CEPH_GPG_KEY
- |-
DOCKER_GPG_KEY
# NOTE(aw442m): repository information is populated here from versions.yaml.
# The repositories listed here will be in
# /etc/apt/sources.list.d/promenade-join-sources.list
join:
repositories:
- deb CEPH_REPO CEPH_DISTRO CEPH_COMPONENT
- deb DOCKER_REPO DOCKER_DISTRO DOCKER_COMPONENT
keys:
- |-
CEPH_GPG_KEY
- |-
DOCKER_GPG_KEY
...

View File

@ -0,0 +1,30 @@
---
schema: 'drydock/Region/v1'
metadata:
schema: 'metadata/Document/v1'
name: region-profile
labels:
name: region-profile
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- dest:
path: .repositories.main_archive
src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.repositories.main_archive
- dest:
path: .repositories.ceph
src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .packages.repositories.ceph
data:
tag_definitions: []
authorized_keys: []
repositories:
remove_unlisted: true
...

View File

@ -0,0 +1,61 @@
---
schema: 'pegleg/AppArmorProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: calico-node-v1
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
savePath: /etc/apparmor.d/calico-node-v1
content: |
# AppArmor profile based on docker-default from version 17.03.x
# https://github.com/moby/moby/blob/17.03.x/profiles/apparmor/template.go
# Modified to allow access to /proc/sys/net, as required by calico felix, for example:
# https://github.com/projectcalico/felix/blob/a1b147fee456f378f1dee8503c979ad4c33745d1/dataplane/linux/endpoint_mgr.go#L942-L1027
# https://github.com/projectcalico/felix/blob/a1b147fee456f378f1dee8503c979ad4c33745d1/dataplane/linux/int_dataplane.go#L669
# Also includes fix to https://github.com/moby/moby/issues/39791 (required to make this work)
# Specific changes are NOTEd below
#include <tunables/global>
profile calico-node-v1 flags=(attach_disconnected,mediate_deleted) {
#include <abstractions/base>
network,
capability,
file,
umount,
deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir)
# deny write to files not in /proc/<number>/** or /proc/sys/**
deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9/]*}/** w,
# NOTE: Added '/' docker-default blocks everything in /proc/sys/*/** ^
deny @{PROC}/sys/[^kn]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel)
# NOTE: Added 'n' ^ or /proc/sys/n* (effectively /proc/sys/net)
deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/
deny @{PROC}/sysrq-trigger rwklx,
deny @{PROC}/mem rwklx,
deny @{PROC}/kmem rwklx,
deny @{PROC}/kcore rwklx,
deny mount,
deny /sys/[^f]*/** wklx,
deny /sys/f[^s]*/** wklx,
deny /sys/fs/[^c]*/** wklx,
deny /sys/fs/c[^g]*/** wklx,
deny /sys/fs/cg[^r]*/** wklx,
deny /sys/firmware/** rwklx,
deny /sys/kernel/security/** rwklx,
# suppress ptrace denials when using 'docker ps' or using 'ps' inside a container
ptrace (trace,read) peer=calico-node-v1,
}

View File

@ -0,0 +1,30 @@
---
schema: 'pegleg/AppArmorProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: ceph-osd-v1
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
savePath: /etc/apparmor.d/ceph-osd-v1
content: |
# AppArmor profile based on docker-default from version 17.03.x
# https://github.com/moby/moby/blob/17.03.x/profiles/apparmor/template.go
# Modified to allow access to /proc/sys/net, as required, for example:
# https://github.com/openstack/charm-ceph-osd/blob/master/files/apparmor/usr.bin.ceph-osd
# Also includes fix to https://github.com/moby/moby/issues/39791 (required to make this work)
# Specific changes are NOTEd below
#include <tunables/global>
profile ceph-osd-v1 flags=(attach_disconnected) {
#include <abstractions/base>
#include <abstractions/nameservice>
capability,
network,
mount,
}

View File

@ -0,0 +1,53 @@
---
schema: 'pegleg/AppArmorProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: kubeproxy-v1
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
savePath: /etc/apparmor.d/kubeproxy-v1
content: |
# AppArmor profile based on docker-default from version 17.03.x
#include <tunables/global>
profile kubeproxy-v1 flags=(attach_disconnected,mediate_deleted) {
#include <abstractions/base>
network,
capability,
file,
umount,
deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir)
# deny write to files not in /proc/<number>/** or /proc/sys/**
deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9/]*}/** w,
# NOTE: Added '/' docker-default blocks everything in /proc/sys/*/** ^
deny @{PROC}/sys/[^kn]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel)
# NOTE: Added 'n' ^ or /proc/sys/n* (effectively /proc/sys/net)
deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/
deny @{PROC}/sysrq-trigger rwklx,
deny @{PROC}/mem rwklx,
deny @{PROC}/kmem rwklx,
deny @{PROC}/kcore rwklx,
deny mount,
deny /sys/[^f]*/** wklx,
deny /sys/f[^s]*/** wklx,
deny /sys/fs/[^c]*/** wklx,
deny /sys/fs/c[^g]*/** wklx,
deny /sys/fs/cg[^r]*/** wklx,
deny /sys/firmware/** rwklx,
deny /sys/kernel/security/** rwklx,
# suppress ptrace denials when using 'docker ps' or using 'ps' inside a container
ptrace (trace,read) peer=kubeproxy-v1,
}

View File

@ -0,0 +1,144 @@
---
schema: 'pegleg/AppArmorProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: libvirt-v1
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
savePath: /etc/apparmor.d/libvirt-v1
content: |
# AppArmor profile based on apparmor used in libvirt debian package
#include <tunables/global>
@{LIBVIRT}="libvirt"
profile libvirt-v1 flags=(attach_disconnected) {
#include <abstractions/base>
#include <abstractions/dbus>
capability kill,
capability audit_write,
capability audit_control,
capability net_admin,
capability net_raw,
capability setgid,
capability sys_admin,
capability sys_module,
capability sys_ptrace,
capability sys_pacct,
capability sys_nice,
capability sys_chroot,
capability setuid,
capability dac_override,
capability dac_read_search,
capability fowner,
capability chown,
capability setpcap,
capability mknod,
capability fsetid,
capability audit_write,
capability ipc_lock,
# Needed for vfio
capability sys_resource,
mount options=(rw,rslave) -> /,
mount options=(rw, nosuid) -> /{var/,}run/libvirt/qemu/*.dev/,
mount options=(rw, move) /dev/ -> /{var/,}run/libvirt/qemu/*.dev/,
mount options=(rw, move) /dev/hugepages/ -> /{var/,}run/libvirt/qemu/*.hugepages/,
mount options=(rw, move) /dev/mqueue/ -> /{var/,}run/libvirt/qemu/*.mqueue/,
mount options=(rw, move) /dev/pts/ -> /{var/,}run/libvirt/qemu/*.pts/,
mount options=(rw, move) /dev/shm/ -> /{var/,}run/libvirt/qemu/*.shm/,
mount options=(rw, move) /{var/,}run/libvirt/qemu/*.dev/ -> /dev/,
mount options=(rw, move) /{var/,}run/libvirt/qemu/*.hugepages/ -> /dev/hugepages/,
mount options=(rw, move) /{var/,}run/libvirt/qemu/*.mqueue/ -> /dev/mqueue/,
mount options=(rw, move) /{var/,}run/libvirt/qemu/*.pts/ -> /dev/pts/,
mount options=(rw, move) /{var/,}run/libvirt/qemu/*.shm/ -> /dev/shm/,
network inet stream,
network inet dgram,
network inet6 stream,
network inet6 dgram,
network netlink raw,
network packet dgram,
network packet raw,
# for --p2p migrations
unix (send, receive) type=stream addr=none peer=(label=unconfined addr=none),
ptrace (trace) peer=unconfined,
ptrace (trace) peer=/usr/sbin/libvirtd,
ptrace (trace) peer=/usr/sbin/dnsmasq,
ptrace (trace) peer=libvirt-*,
signal (send) peer=/usr/sbin/dnsmasq,
signal (read, send) peer=libvirt-*,
signal (send) set=("kill", "term") peer=unconfined,
# For communication/control to qemu-bridge-helper
unix (send, receive) type=stream addr=none peer=(label=/usr/sbin/libvirtd//qemu_bridge_helper),
signal (send) set=("term") peer=/usr/sbin/libvirtd//qemu_bridge_helper,
# Very lenient profile for libvirtd since we want to first focus on confining
# the guests. Guests will have a very restricted profile.
/ r,
/** rwmkl,
/bin/* PUx,
/sbin/* PUx,
/usr/bin/* PUx,
/usr/sbin/virtlogd pix,
/usr/sbin/* PUx,
/{usr/,}lib/udev/scsi_id PUx,
/usr/{lib,lib64}/xen-common/bin/xen-toolstack PUx,
/usr/{lib,lib64}/xen/bin/* Ux,
/usr/lib/xen-*/bin/libxl-save-helper PUx,
# Required by nwfilter_ebiptables_driver.c:ebiptablesWriteToTempFile() to
# read and run an ebtables script.
/var/lib/libvirt/virtd* ixr,
# force the use of virt-aa-helper
audit deny /{usr/,}sbin/apparmor_parser rwxl,
audit deny /etc/apparmor.d/libvirt/** wxl,
audit deny /sys/kernel/security/apparmor/features rwxl,
audit deny /sys/kernel/security/apparmor/matching rwxl,
audit deny /sys/kernel/security/apparmor/.* rwxl,
/sys/kernel/security/apparmor/profiles r,
/usr/{lib,lib64}/libvirt/* PUxr,
/usr/{lib,lib64}/libvirt/libvirt_parthelper ix,
/usr/{lib,lib64}/libvirt/libvirt_iohelper ix,
/etc/libvirt/hooks/** rmix,
/etc/xen/scripts/** rmix,
# allow changing to our UUID-based named profiles
change_profile -> @{LIBVIRT}-[0-9a-f]*-[0-9a-f]*-[0-9a-f]*-[0-9a-f]*-[0-9a-f]*,
/usr/{lib,lib64,lib/qemu,libexec}/qemu-bridge-helper Cx -> qemu_bridge_helper,
# child profile for bridge helper process
profile qemu_bridge_helper {
#include <abstractions/base>
capability setuid,
capability setgid,
capability setpcap,
capability net_admin,
network inet stream,
# For communication/control from libvirtd
unix (send, receive) type=stream addr=none peer=(label=/usr/sbin/libvirtd),
signal (receive) set=("term") peer=/usr/sbin/libvirtd,
/dev/net/tun rw,
/etc/qemu/** r,
owner @{PROC}/*/status r,
/usr/{lib,lib64,lib/qemu,libexec}/qemu-bridge-helper rmix,
}
}

View File

@ -1,80 +0,0 @@
---
schema: 'pegleg/AppArmorProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: airship-apparmor-loader
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
savePath: /etc/apparmor.d/profile_airship_loader
content: |
#include <tunables/global>
profile airship-apparmor-loader flags=(attach_disconnected,mediate_deleted) {
#include <abstractions/base>
network inet tcp,
network inet udp,
network inet icmp,
deny network raw,
deny network packet,
file,
umount,
deny /bin/** wl,
deny /boot/** wl,
deny /dev/** wl,
deny /etc/** wl,
deny /home/** wl,
deny /lib/** wl,
deny /lib64/** wl,
deny /media/** wl,
deny /mnt/** wl,
deny /opt/** wl,
deny /proc/** wl,
deny /root/** wl,
deny /sbin/** wl,
deny /srv/** wl,
deny /tmp/** wl,
deny /sys/** wl,
deny /usr/** wl,
audit /etc/apparmor.d/airship_* rwl,
audit /** w,
deny /bin/dash mrwklx,
deny /bin/sh mrwklx,
deny /usr/bin/top mrwklx,
capability chown,
# Allow Apparmor profiles to be loaded
capability mac_admin,
capability dac_override,
capability setuid,
capability setgid,
deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir)
# deny write to files not in /proc/<number>/** or /proc/sys/**
deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w,
deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel)
deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/
deny @{PROC}/sysrq-trigger rwklx,
deny @{PROC}/mem rwklx,
deny @{PROC}/kmem rwklx,
deny @{PROC}/kcore rwklx,
deny mount,
deny /sys/[^f]*/** wklx,
deny /sys/f[^s]*/** wklx,
deny /sys/fs/[^c]*/** wklx,
deny /sys/fs/c[^g]*/** wklx,
deny /sys/fs/cg[^r]*/** wklx,
deny /sys/firmware/** rwklx,
deny /sys/kernel/security/** rwklx,
}

View File

@ -1,78 +0,0 @@
---
schema: 'pegleg/AppArmorProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: airship-default
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: global
data:
savePath: /etc/apparmor.d/profile_airship_default
content: |
#include <tunables/global>
profile airship-default flags=(attach_disconnected,mediate_deleted) {
#include <abstractions/base>
network inet tcp,
network inet udp,
network inet icmp,
deny network raw,
deny network packet,
file,
umount,
deny /bin/** wl,
deny /boot/** wl,
deny /dev/** wl,
deny /etc/** wl,
deny /home/** wl,
deny /lib/** wl,
deny /lib64/** wl,
deny /media/** wl,
deny /mnt/** wl,
deny /opt/** wl,
deny /proc/** wl,
deny /root/** wl,
deny /sbin/** wl,
deny /srv/** wl,
deny /tmp/** wl,
deny /sys/** wl,
deny /usr/** wl,
audit /** w,
deny /bin/dash mrwklx,
deny /bin/sh mrwklx,
deny /usr/bin/top mrwklx,
capability chown,
capability dac_override,
capability setuid,
capability setgid,
capability net_bind_service,
deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir)
# deny write to files not in /proc/<number>/** or /proc/sys/**
deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w,
deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel)
deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/
deny @{PROC}/sysrq-trigger rwklx,
deny @{PROC}/mem rwklx,
deny @{PROC}/kmem rwklx,
deny @{PROC}/kcore rwklx,
deny mount,
deny /sys/[^f]*/** wklx,
deny /sys/f[^s]*/** wklx,
deny /sys/fs/[^c]*/** wklx,
deny /sys/fs/c[^g]*/** wklx,
deny /sys/fs/cg[^r]*/** wklx,
deny /sys/firmware/** rwklx,
deny /sys/kernel/security/** rwklx,
}

View File

@ -16,6 +16,10 @@ data:
# Path to save seccomp profile as file.
# This should be same as seccompDirPath with file name.
savePath: /var/lib/kubelet/seccomp/seccomp_default
# Allowed profile name to set in podsecuritypolicy
allowedProfileNames: localhost/seccomp_default
# Default profile name to set in podsecuritypolicy
defaultProfileName: localhost/seccomp_default
# Content of default seccomp profile file.
content: |
{
@ -784,4 +788,4 @@ data:
"excludes": {}
}
]
}
}

View File

@ -7,6 +7,7 @@ metadata:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
id: 'http://att.com/att-comdev/drydock/baremetalNode.yaml'
type: 'object'
properties:
addressing:

View File

@ -7,6 +7,7 @@ metadata:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
id: 'http://att.com/att-comdev/drydock/bootaction.yaml'
type: 'object'
additionalProperties: false
properties:
@ -30,7 +31,13 @@ data:
- 'file'
- 'pkg_list'
data:
type: 'string'
oneOf:
- type: 'string'
- type: 'object'
additionalProperties:
oneOf:
- type: 'string'
- type: 'null'
location_pipeline:
type: 'array'
items:

View File

@ -7,6 +7,7 @@ metadata:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
id: 'http://att.com/att-comdev/drydock/hardwareProfile.yaml'
type: 'object'
properties:
vendor:

View File

@ -7,6 +7,7 @@ metadata:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
id: 'http://att.com/att-comdev/drydock/hostProfile.yaml'
type: 'object'
properties:
oob:

View File

@ -7,6 +7,7 @@ metadata:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
id: 'http://att.com/att-comdev/drydock/network.yaml'
type: 'object'
properties:
cidr:

View File

@ -7,6 +7,7 @@ metadata:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
id: 'http://att.com/att-comdev/drydock/networkLink.yaml'
type: 'object'
properties:
bonding:

View File

@ -7,6 +7,7 @@ metadata:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
id: 'http://att.com/att-comdev/drydock/rack.yaml'
type: 'object'
properties:
tor_switches:

View File

@ -7,6 +7,7 @@ metadata:
application: drydock
data:
$schema: 'http://json-schema.org/schema#'
id: 'http://att.com/att-comdev/drydock/region.yaml'
type: 'object'
properties:
tag_definitions:

View File

@ -0,0 +1,45 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: nc/ControlPlaneAddresses/v1
data:
$schema: http://json-schema.org/schema#
definitions:
host:
type: object
properties:
hostname:
type: string
ip:
type: object
properties:
ksn:
type: string
oam:
type: string
required:
- ksn
- oam
additionalProperties: false
required:
- hostname
- ip
additionalProperties: false
type: object
properties:
genesis:
$ref: '#/definitions/host'
masters:
type: array
items:
$ref: '#/definitions/host'
required:
- genesis
- masters
additionalProperties: false
...

View File

@ -0,0 +1,574 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: nc/CorridorConfig/v1
data:
$schema: http://json-schema.org/schema#
type: object
properties:
corridor:
type: string
artifactory:
properties:
auth_key:
type: string
hostnames:
type: object
properties:
artifacts:
type: string
docker-open:
type: string
docker:
type: string
additionalProperties: false
required:
- artifacts
- docker-open
- docker
artfactory_ip:
type: string
repo_urls:
type: object
properties:
artfactory_web_url:
type: string
secret:
type: string
additionalProperties: false
required:
- auth_key
- hostnames
- artfactory_ip
- repo_urls
gstools:
type: object
properties:
collecthost:
type: string
qclient:
type: string
uamroles:
type: string
cphalo:
type: object
properties:
agent_key:
type: string
proxy:
type: string
additionalProperties: false
required:
- agent_key
- proxy
allmid:
type: object
properties:
admin:
type: string
additionalProperties: false
required:
- admin
bpa:
type: object
properties:
email:
type: string
additionalProperties: false
required:
- email
additionalProperties: false
required:
- collecthost
- uamroles
- qclient
infrastructure:
type: object
properties:
dns:
type: object
properties:
upstream_servers:
type: array
items:
type: string
upstream_servers_joined:
type: string
additionalProperties: false
required:
- upstream_servers
- upstream_servers_joined
ntp:
type: object
properties:
servers:
type: array
items:
type: string
additionalProperties: false
required:
- servers
snmp:
type: object
properties:
primary_target:
type: string
secondary_target:
type: string
additionalProperties: false
required:
- primary_target
- secondary_target
smtp:
type: object
properties:
server:
type: string
additionalProperties: false
required:
- server
ldap:
type: object
properties:
base_url:
type: string
url:
type: string
auth_path:
type: string
username:
type: string
common_name:
type: string
domain:
type: string
fqdn:
type: string
subdomain:
type: string
additionalProperties: false
required:
- auth_path
- common_name
- domain
- fqdn
- subdomain
proxy:
type: object
properties:
no_proxy:
type: string
additionalProperties: false
required:
- no_proxy
scm:
type: object
properties:
port:
type: number
ssh:
type: string
ssh_codecloud:
type: string
url:
type: string
credentials:
type: string
ucp_credentials:
type: string
additionalProperties: false
required:
- port
- ssh
- url
smtp:
type: object
properties:
relay:
type: string
additionalProperties: false
required:
- relay
ro:
properties:
aai:
type: object
properties:
fqdn:
type: string
port:
type: number
protocol:
type: string
version:
type: string
additionalProperties: false
required:
- fqdn
- port
- protocol
- version
netcool:
type: object
properties:
fqdn:
type: string
port:
type: number
protocol:
type: string
version:
type: string
additionalProperties: false
required:
- fqdn
- port
- protocol
- version
epr:
type: object
properties:
fqdn:
type: string
port:
type: number
protocol:
type: string
version:
type: string
additionalProperties: false
required:
- fqdn
- port
- protocol
- version
additionalProperties: false
required:
- aai
- netcool
- epr
ozone:
properties:
ozone-collector:
type: object
properties:
user:
type: string
additionalProperties: true
required:
- user
additionalProperties: true
required:
- ozone-collector
airship:
type: object
properties:
log_level:
type: string
enum:
- 'DEBUG'
- 'INFO'
- 'WARNING'
- 'CRITICAL'
- 'ERROR'
log_level_numeric:
type: integer
enum:
- 10
- 20
- 30
- 40
- 50
maas:
type: object
properties:
ingress_disable_gui:
type: boolean
additionaProperties: false
required:
- ingress_disable_gui
additionalProperties: false
required:
- log_level
- maas
policy:
type: object
additionalProperties: true
ranger:
properties:
ranger:
type: object
properties:
ranger_keystone_user:
type: string
customer_domain:
type: string
user:
type: string
user_home:
type: string
ranger_agent_client_cert_path:
type: string
ranger_agent_client_cert:
type: string
debug_level:
type: string
additionalProperties: false
required:
- ranger_keystone_user
- customer_domain
- user
- user_home
- ranger_agent_client_cert_path
- ranger_agent_client_cert
- debug_level
ranger-agent:
type: object
properties:
rds_listener_endpoint:
type: string
ranger_agent_keystone_user:
type: string
user:
type: string
user_home:
type: string
keystone_password:
type: string
debug_level:
type: string
additionalProperties: false
required:
- rds_listener_endpoint
- ranger_agent_keystone_user
- user
- user_home
- keystone_password
- debug_level
additionalProperties: false
required:
- ranger
- ranger-agent
calico:
type: object
properties:
calico_startup_loglevel:
type: string
enum:
- 'Debug'
- 'Info'
felix_logseverity:
type: string
enum:
- 'Debug'
- 'Info'
cni_log_level:
type: string
enum:
- 'Debug'
- 'Info'
additionalProperties: false
required:
- calico_startup_loglevel
kubernetes_components:
type: object
properties:
apiserver_log_level:
type: integer
enum:
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
controller_manager_log_level:
type: integer
enum:
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
scheduler_log_level:
type: integer
enum:
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
proxy_log_level:
type: integer
enum:
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
apiserver_webhook_log_level:
type: integer
enum:
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
additionalProperties: false
required:
- apiserver_log_level
- controller_manager_log_level
- scheduler_log_level
- proxy_log_level
- apiserver_webhook_log_level
nagios:
type: object
properties:
notification:
type: object
propeties:
snmp:
type: object
properties:
primary_target:
type: string
secondary_target:
type: string
required:
- primary_target
- secondary_target
http:
type: object
properties:
primary_target:
type: string
secondary_target:
type: string
required:
- primary_target
- secondary_target
required:
- snmp
- http
required:
- notification
cicd:
type: object
properties:
generic_pipe:
type: object
properties:
user:
type: string
required_role:
type: string
required:
- user
- required_role
required:
- generic_pipe
utility:
type: object
properties:
always_log_user:
type: boolean
additionalProperties: false
required:
- always_log_user
jenkins:
type: object
properties:
global_env_vars:
type: object
properties:
conf_package_path:
type: string
internal_gerrit_key:
type: string
jenkins_cli:
type: string
os_project_name:
type: string
ssh_data:
type: string
required:
- conf_package_path
- internal_gerrit_key
- jenkins_cli
- os_project_name
- ssh_data
required:
- global_env_vars
additionalProperties: true
...

View File

@ -0,0 +1,166 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: nc/MiniMirrorSource/v1
data:
"$schema": http://json-schema.org/schema#
"$id": "#/properties/source"
type: object
title: Source
required:
- name
- url
- key_url
- aptly_config
- components
- subrepos
properties:
name:
"$id": "#/properties/data/properties/name"
type: string
title: Source Name
default: ''
examples:
- ubuntu
- linux_dell
url:
"$id": "#/properties/data/properties/url"
type: string
title: Source URL
default: ''
examples:
- https://artifacts-nc.auk3.cci.att.com/artifactory/mirantis-dev-ceph/xenial
key_url:
"$id": "#/properties/data/properties/key_url"
type: string
title: Source Repository Public Key URL
default: ''
examples:
- https://artifacts-nc.auk3.cci.att.com/artifactory/mirantis-dev-ceph/xenial/archive-ceph-mimic.key
aptly_config:
"$id": "#/properties/data/properties/aptly_config"
type: string
title: Aptly Config
default: ''
examples:
- |
{
"rootDir": "/opt/.aptly",
"downloadConcurrency": 4,
"downloadSpeedLimit": 0,
"architectures": ["amd64"],
"dependencyFollowSuggests": false,
"dependencyFollowRecommends": false,
"dependencyFollowAllVariants": false,
"dependencyFollowSource": false,
"dependencyVerboseResolve": false,
"gpgDisableSign": false,
"gpgDisableVerify": false,
"gpgProvider": "gpg",
"downloadSourcePackages": false,
"skipLegacyPool": true,
"ppaDistributorID": "ubuntu",
"ppaCodename": "",
"FileSystemPublishEndpoints": {
"test": {
"rootDir": "/opt/aptly-publish",
"linkMethod": "copy",
"verifyMethod": "md5"
}
},
"S3PublishEndpoints": {
"test": {
"region": "us-east-1",
"bucket": "repo",
"endpoint": "",
"awsAccessKeyID": "",
"awsSecretAccessKey": "",
"prefix": "",
"acl": "public-read",
"storageClass": "",
"encryptionMethod": "",
"plusWorkaround": false,
"disableMultiDel": false,
"forceSigV2": false,
"debug": false
}
},
"SwiftPublishEndpoints": {
"test": {
"container": "repo",
"osname": "",
"password": "",
"prefix": "",
"authurl": "",
"tenant": "",
"tenantid": "",
"domain": "",
"domainid": "",
"tenantdomain": "",
"tenantdomainid": ""
}
}
}
components:
"$id": "#/properties/data/properties/components"
type: array
title: Source Repository Components
items:
"$id": "#/properties/data/properties/components/items"
type: string
title: APT Repository Component
default: ''
examples:
- main
- universe
- multiverse
subrepos:
"$id": "#/properties/data/properties/subrepos"
type: array
title: APT Sub-repositories
items:
"$id": "#/properties/data/properties/subrepos/items"
type: object
title: APT Sub-repository
required:
- distribution
- packages
properties:
distribution:
"$id": "#/properties/data/properties/subrepos/items/properties/distribution"
type: string
title: Ubuntu Distribution
default: ''
examples:
- xenial
- xenial-updates
- bionic
- bionic-updates
packages:
"$id": "#/properties/data/properties/subrepos/items/properties/packages"
type: array
title: APT Packages
items:
"$id": "#/properties/data/properties/subrepos/items/properties/packages/items"
type: object
title: APT Package
required:
- name
properties:
name:
"$id": "#/properties/data/properties/subrepos/items/properties/packages/items/properties/name"
type: string
title: Package Name
default: ''
examples:
- tmux
- vim
version:
"$id": "#/properties/data/properties/subrepos/items/properties/packages/items/properties/version"
type: string
title: Package Version
default: ''
examples:
- 13.2.4-2~u16.04
...

View File

@ -0,0 +1,15 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: nc/Policy/v1
data:
$schema: http://json-schema.org/schema#
type: object
properties:
policy:
type: object
additionalProperties: true
...

View File

@ -0,0 +1,25 @@
---
# The purpose of this file is to define the Passphrases for the environment
#
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: pegleg/PassphraseCatalog/v1
data:
$schema: http://json-schema.org/schema#
type: 'object'
additionalProperties: false
properties:
passphrases:
type: 'array'
items:
type: 'object'
additionalProperties: true
properties:
description:
type: 'string'
document_name:
type: 'string'
encrypted:
type: 'boolean'
...

View File

@ -256,6 +256,83 @@ data:
type: string
project_domain_name:
type: string
ro:
type: object
properties:
keystone:
type: object
properties:
ro:
type: object
properties:
role:
type: string
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
oslo_db:
type: object
properties:
username:
type: string
database:
type: string
keystone_oslo_messaging:
type: object
properties:
ro:
type: object
properties:
username:
type: string
heat_oslo_messaging:
type: object
properties:
ro:
type: object
properties:
username:
type: string
cinder_oslo_messaging:
type: object
properties:
ro:
type: object
properties:
username:
type: string
nova_oslo_messaging:
type: object
properties:
ro:
type: object
properties:
username:
type: string
neutron_oslo_messaging:
type: object
properties:
ro:
type: object
properties:
username:
type: string
glance_oslo_messaging:
type: object
properties:
ro:
type: object
properties:
username:
type: string
osh:
type: object
properties:
@ -585,6 +662,70 @@ data:
type: string
database:
type: string
osh-addons:
ranger:
type: object
properties:
ranger:
type: object
properties:
role:
type: string
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
oslo_db:
type: object
properties:
username:
type: string
database:
type: string
ranger-agent:
type: object
properties:
ranger-agent:
type: object
properties:
role:
type: string
region_name:
type: string
username:
type: string
project_name:
type: string
user_domain_name:
type: string
project_domain_name:
type: string
oslo_db:
type: object
properties:
username:
type: string
database:
type: string
oslo_messaging:
type: object
properties:
admin:
type: object
properties:
username:
type: string
ranger-agent:
type: object
properties:
username:
type: string
osh_infra:
type: object
properties:

View File

@ -30,13 +30,19 @@ data:
type: string
upstream_servers_joined:
type: string
ingress_domain:
type: string
genesis:
type: object
required:
- oob
properties:
hostname:
type: string
ip:
type: string
oob:
type: string
bootstrap:
type: object
properties:
@ -73,24 +79,20 @@ data:
properties:
hostname:
type: string
node_ports:
type: object
properties:
drydock_api:
type: number
maas_api:
type: number
maas_proxy:
type: number
shipyard_api:
type: number
airflow_web:
type: number
ntp:
type: object
properties:
servers_joined:
type: string
servers:
type: array
items:
type: string
ldap:
type: object
properties:
username:
type: string
storage:
type: object
properties:

View File

@ -7,9 +7,32 @@ data:
$schema: 'http://json-schema.org/schema#'
type: object
properties:
infrastructure:
type: object
properties:
sysadmin:
type: string
additionalProperties: false
osh:
type: object
properties:
region_name:
type: string
location:
type: object
properties:
location_corridor:
type: string
location_witt_address_id:
type: string
location_cloud_parent_id:
type: string
location_name:
type: string
location_state:
type: string
location_country:
type: string
location_physical_location_id:
type: string
...

View File

@ -0,0 +1,17 @@
---
schema: 'deckhand/DataSchema/v1'
metadata:
schema: metadata/Control/v1
name: pegleg/NetworkSettings/v1
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
properties:
mtu:
type: object
additionalProperties:
# NOTE(mb874d): This was chosen to match Drydock's constraints on mtu
# in its Network schema.
type: number
additionalProperties: false
...

View File

@ -0,0 +1,57 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: pegleg/PeglegManagedDocument/v1
data:
$schema: 'http://json-schema.org/schema#'
type: 'object'
additionalProperties: true
properties:
encrypted:
type: 'object'
properties:
at:
type: 'string'
by:
type: 'string'
generated:
type: 'object'
properties:
at:
type: 'string'
by:
type: 'string'
specifiedBy:
type: 'object'
properties:
path:
type: 'array'
reference:
type: 'string'
repo:
type: 'string'
managedDocument:
type: 'object'
properties:
data:
type: 'string'
metadata:
type: 'object'
properties:
layeringDefinition:
type: 'object'
properties:
abstract:
type: 'boolean'
layer:
type: 'string'
name:
type: 'string'
schema:
type: 'string'
storagePolicy:
type: 'string'
schema:
type: 'string'
...

View File

@ -14,6 +14,10 @@ data:
type: 'string'
savePath:
type: 'string'
allowedProfileNames:
type: 'string'
defaultProfileName:
type: 'string'
content:
type: 'string'
required: ['seccompDirPath', 'savePath', 'content']
required: ['seccompDirPath', 'savePath', 'content', 'allowedProfileNames', 'defaultProfileName']

View File

@ -1,29 +0,0 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Control/v1
name: pegleg/SiteDefinition/v1
data:
$schema: http://json-schema.org/schema#
type: object
properties:
repositories:
type: object
additionalProperties:
type: object
properties:
revision:
type: string
url:
type: string
required:
- revision
- url
site_type:
type: string
required:
- site_type
additionalProperties: false
...

File diff suppressed because it is too large Load Diff

View File

@ -62,11 +62,23 @@ data:
properties:
target_manifest:
type: string
metrics:
type: object
properties:
output_dir:
type: string
max_attempts:
type: integer
additionalProperties: false
additionalProperties: false
apiserver:
type: object
properties:
command_prefix:
type: array
items:
type: string
arguments:
type: array
items:
@ -87,6 +99,25 @@ data:
additionalProperties: true
additionalProperties: false
etcd:
type: object
properties:
# What number of non-auxiliary etcd members are needed
# before the auxiliary members will self-terminate
auxiliary_threshold:
type: integer
additionalProperties: false
tiller:
type: object
properties:
listen:
type: integer
probe_listen:
type: integer
storage:
type: string
additionalProperties: false
files:
type: array
items:
@ -96,7 +127,7 @@ data:
type: object
properties:
run_as_user:
type: integer
type: string
additionalProperties: false
hostname:
@ -108,6 +139,9 @@ data:
ip:
$ref: '#/definitions/ip_address'
external_ip:
$ref: '#/definitions/ip_address'
labels:
properties:
static:

View File

@ -68,7 +68,6 @@ data:
- docker_image
- file_path
additionalProperties: false
image:
type: string
# XXX add regex
@ -84,9 +83,47 @@ data:
url:
type: string
# XXX add regex
repos_and_packages:
type: object
properties:
# apt repositories
repositories:
type: array
items:
$ref: '#/definitions/apt_source_line'
# apt repository keys
keys:
type: array
items:
$ref: '#/definitions/public_key'
# required packages
required:
type: object
properties:
docker:
$ref: '#/definitions/package'
runtime:
$ref: '#/definitions/package'
socat:
$ref: '#/definitions/package'
# a container runtime is required
# using either docker or runtime key
oneOf:
- required:
- docker
- required:
- runtime
# socat is required
required:
- socat
additionalProperties: false
# additional packages
additional:
type: array
items:
$ref: '#/definitions/package'
additionalProperties: false
type: object
properties:
files:
type: array
@ -101,6 +138,7 @@ data:
images:
type: object
properties:
# NOTE(mark-burnett): No longer used.
coredns:
$ref: '#/definitions/image'
haproxy:
@ -121,113 +159,25 @@ data:
monitoring_image:
$ref: '#/definitions/image'
required:
- coredns
- haproxy
- helm
- kubernetes
- monitoring_image
additionalProperties: false
packages:
type: object
common:
type: object
properties:
additional:
type: array
items:
$ref: '#/definitions/package'
keys:
type: array
items:
$ref: '#/definitions/public_key'
required:
type: object
properties:
docker:
$ref: '#/definitions/package'
socat:
$ref: '#/definitions/package'
required:
- docker
- socat
additionalProperties: false
repositories:
type: array
items:
$ref: '#/definitions/apt_source_line'
required:
- required
additionalProperties: false
genesis:
type: object
properties:
additional:
type: array
items:
$ref: '#/definitions/package'
keys:
type: array
items:
$ref: '#/definitions/public_key'
required:
type: object
properties:
docker:
$ref: '#/definitions/package'
socat:
$ref: '#/definitions/package'
required:
- docker
- socat
additionalProperties: false
repositories:
type: array
items:
$ref: '#/definitions/apt_source_line'
required:
- required
additionalProperties: false
join:
type: object
properties:
additional:
type: array
items:
$ref: '#/definitions/package'
keys:
type: array
items:
$ref: '#/definitions/public_key'
required:
type: object
properties:
docker:
$ref: '#/definitions/package'
socat:
$ref: '#/definitions/package'
required:
- docker
- socat
additionalProperties: false
repositories:
type: array
items:
$ref: '#/definitions/apt_source_line'
required:
- required
additionalProperties: false
properties:
common:
type: object
$ref: '#/definitions/repos_and_packages'
genesis:
type: object
$ref: '#/definitions/repos_and_packages'
join:
type: object
$ref: '#/definitions/repos_and_packages'
additionalProperties: false
validation:
type: object
properties:
@ -238,8 +188,18 @@ data:
type: string
additionalProperties: false
additionalProperties: false
required:
- images
- packages
allOf:
# ensure that required packages are defined for genesis node
# (either in .packages.genesis or .packages.common)
- anyOf:
- {properties: {packages: {properties: {common: {required: [required]}}}}}
- {properties: {packages: {properties: {genesis: {required: [required]}}}}}
# ensure that required packages are defined for join nodes
# (either in .packages.join or .packages.common)
- anyOf:
- {properties: {packages: {properties: {common: {required: [required]}}}}}
- {properties: {packages: {properties: {join: {required: [required]}}}}}
additionalProperties: false

View File

@ -101,6 +101,17 @@ data:
items:
$ref: '#/definitions/hostname'
ntp:
type: object
properties:
servers:
type: array
items:
$ref: '#/definitions/hostname_or_ip_address'
additionalProperties: false
required:
- servers
proxy:
type: object
properties:

View File

@ -26,6 +26,9 @@ data:
ip:
$ref: '#/definitions/ip_address'
domain:
type: string
join_ip:
$ref: '#/definitions/ip_address'
@ -45,3 +48,4 @@ data:
- ip
- join_ip
additionalProperties: false
...

View File

@ -7,6 +7,7 @@ metadata:
application: shipyard
data:
$schema: 'http://json-schema.org/schema#'
id: 'https://github.com/att-comdev/shipyard/deploymentConfiguration.yaml'
type: 'object'
properties:
physical_provisioner:

View File

@ -7,6 +7,7 @@ metadata:
application: shipyard
data:
$schema: 'http://json-schema.org/schema#'
id: 'https://github.com/att-comdev/shipyard/deploymentStrategy.yaml'
type: 'object'
required:
- groups

View File

@ -17,7 +17,7 @@ data: |-
-c POD_CIDR The pod CIDR for the Kubernetes cluster, e.g. 10.97.0.0/16
-i INTERFACE (optional) The interface for internal pod traffic, e.g.
bond0.22. Used to auto-detect the service gateway.
bond1.2006. Used to auto-detect the service gateway.
Exclusive with -g.
-g SERVICE_GW (optional) The service gateway/VRR IP for routing pod
traffic. Exclusive with -i.
@ -26,7 +26,7 @@ data: |-
complete Calico routes cannot be received via BGP.
e.g. 10.96.0.0/15. NOTE: This must include the POD_CIDR.
-s SERVICE_CIDR (optional) A routable CIDR to configure for ingress, maas,
e.g. 10.23.22.192/29
e.g. 135.21.99.192/29
EOU
}
@ -93,27 +93,32 @@ data: |-
TABLE="1500"
if [ "x${intra_vrrp_ip}" == "x" ]; then
echo "Either INTERFACE or SERVICE_GW is required: e.g. either -i bond0.22 or -g 10.23.22.1"
echo "Either INTERFACE or SERVICE_GW is required: e.g. either -i bond1.2006 or -g 172.29.0.1"
usage
exit 1
fi
# Setup a routing table for traffic from service IPs
ip route flush table "${TABLE}"
ip route add default via "${intra_vrrp_ip}" table "${TABLE}"
# Introduce a file lock as concurrent runs of this script
# suffer from a race condition w/ the table 1500 routes
(
flock --verbose -w 120 9 || exit 1
# Setup arp_announce adjustment on interface facing gateway
arp_intf=$(ip route get ${intra_vrrp_ip} | grep dev | awk '{print $3}')
echo 2 > /proc/sys/net/ipv4/conf/${arp_intf}/arp_announce
# Setup a routing table for traffic from service IPs
ip route flush table "${TABLE}"
ip route add default via "${intra_vrrp_ip}" table "${TABLE}"
# Setup arp_announce adjustment on interface facing gateway
arp_intf=$(ip route get ${intra_vrrp_ip} | grep dev | awk '{print $3}')
echo 2 > /proc/sys/net/ipv4/conf/${arp_intf}/arp_announce
if [ "x$OVERLAP_CIDR" != "x" ]; then
# NOTE: This is a work-around for nodes not receiving complete
# routes via BGP.
ip route add "${OVERLAP_CIDR}" via "${intra_vrrp_ip}"
fi
if [ "x$OVERLAP_CIDR" != "x" ]; then
# NOTE(mb874d): This is a work-around for nodes not receiving complete
# routes via BGP. It may also be required for brownfield large sites.
ip route add "${OVERLAP_CIDR}" via "${intra_vrrp_ip}"
fi
if [ "x$SERVICE_CIDR" != "x" ]; then
if [ "x$SERVICE_CIDR" != "x" ]; then
# Traffic from the service IPs to pods should use the pod network.
ip rule add \
from "${SERVICE_CIDR}" \
@ -125,4 +130,5 @@ data: |-
from "${SERVICE_CIDR}" \
lookup "${TABLE}" \
pref 10100
fi
fi
) 9> /tmp/configure-ip-rules.lock

View File

@ -0,0 +1,305 @@
---
schema: pegleg/Script/v1
metadata:
schema: metadata/Document/v1
name: i40e-dkms-install
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: global
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .kernel_drivers.i40e_driver.location
dest:
path: .
pattern: DH_SUB_DRVURL
data: |-
#!/bin/bash
set -ex
# defaults
DRVURL="DH_SUB_DRVURL"
PREP=0
TEMPDIR=1
HDRFIX=1
REBOOT=1
ERR=0
ARCH=x86_64
DRV=i40e
apt_install(){
for pkg in $@; do
dpkg -s $pkg 2> /dev/null | grep 'Status:.*install' || DEBIAN_FRONTEND=noninteractive apt -y -o Dpkg::Options::=--force-confdef -o Dpkg::Options::=--force-confold install $pkg
done
}
apt_reinstall(){
for pkg in $@; do
DEBIAN_FRONTEND=noninteractive apt -y -o Dpkg::Options::=--force-confdef -o Dpkg::Options::=--force-confold install --reinstall $pkg
done
}
function usage() {
cat <<EOF >&2
Usage: $(basename $0) [-h] [-u driver-url] [-p http://proxy.to.use:port] [ -T ] [ -x ] [ -s ]
-s Prep system / install packages (default: no)
-h help
-u URL to fetch, known to work:
https://artifacts-nc.mtn29.cci.att.com/artifactory/list/soureforgeie40/i40e/i40e-2.4.6.tar.gz (default)
-p proxy string to use; sets both http_proxy and https_proxy (default: nothing set)
-T don't use a temporary directory (default: do use a temp directory)
-x don't try to install missing kernel headers (default: do install missing headers)
-r mark the operating system for deferred reboot (default: reboot immediately)
EOF
exit 1
}
# ###########################################################################
while getopts ":Thp:su:xr" opt; do
case ${opt} in
T )
TEMPDIR=0
;;
s )
PREP=1
;;
h )
usage;
;;
u )
DRVURL=$OPTARG
;;
p )
export http_proxy=$OPTARG
export https_proxy=$OPTARG
;;
x )
HDRFIX=0
;;
r )
REBOOT=0
;;
\?)
echo "Invalid: $OPTARG" 1>&2
ERR=1
;;
: )
echo "Invalid: $OPTARG requires an argument" 1>&2
ERR=1
;;
esac
done
[ $ERR -ne 0 ] && exit 1
echo "URL: $DRVURL"
echo "PROXY: ${https_proxy:-(not set)}"
# pkgs to make dkms work
if [ $PREP -ne 0 ] ; then
echo "Prepping system"
apt_install wget build-essential dkms curl rsync
fi
# DKMS operations that iterates over all the available driver versions in
# /var/lib/dkms causes DKMS to break/fail if even one driver version's dkms.conf
# is missing. This also causes kernel header install to fail, because is runs
# DKMS hooks during installation. And despite the incomplete header install, apt
# does not register this as a failure and exits 0 thinking all is well.
# Therefore to work around DKMS fragility, we iterate through each driver version
# directory and remove any folders for driver versions that do not contain a
# dkms.conf file.
# Get all /var/lib/dkms/<module>/<module-version> directories. We don't limit the
# search to ${DRV} modules because missing dkms.conf in any of them can break DKMS
# and kernel headers install.
dkms_dirs="$(find /var/lib/dkms -maxdepth 2 -mindepth 2 -type d)"
for dir in $dkms_dirs; do
# DKMS will complain and fail to do anything useful if one of its drivers is
# missing its dkms.conf file.
# Also, headers will fail to install properly if driver in /var/lib/dkms are
# missing a *.ko file
if [[ ! -f $dir/source/dkms.conf ]] || [[ -z $(find $dir -name "*.ko") ]]; then
rm -r $dir
fi
done
# missing kernel headers for all kernel versions
if [ $HDRFIX -ne 0 ] ; then
# Perform for each kernel on the system. That way we are covered if a new
# kernel had been installed, but wasn't active yet before rebooting.
for krel in $(ls /lib/modules/) ; do
apt_install linux-headers-$krel
# Check to see if headers were *really* installed, and try
# re-installing them if not (to fix environments that previously
# did not have the above DKMS fix applied).
## e.g. linux-headers-x.y.z-ab
base_header_name="$(echo "linux-headers-$krel" | grep -o 'linux-headers-[0-9.-]*[^-a-z]')"
if [ ! -d /usr/src/$base_header_name ]; then
apt_reinstall $base_header_name
fi
## e.g. linux-headers-x.y.z-generic
if [ ! -d /usr/src/linux-headers-$krel ]; then
apt_reinstall linux-headers-$krel
fi
done
fi
if [ $TEMPDIR -ne 0 ] ; then
tmpdir=$(mktemp -d /tmp/i40-install.XXXXXX)
function cleanup {
rm -rf "$tmpdir"
}
trap cleanup EXIT
cd $tmpdir
fi
# it's been observed when testing the fetch from sf.net it fails frequently
trycount=1
while : ; do
if curl -L --silent $DRVURL | tar -xz ; then
break
fi
if [ $trycount -ge 3 ] ; then
echo 1>&2
echo "Fetching $DRVURL failed after $trycount attempts" 1>&2
exit 1
fi
sleep 10
trycount=$(($trycount+1))
done
# base dir (name)
bdir=$(ls|grep ${DRV})
if [ "$(echo $bdir | wc -w)" -ne 1 ] ; then
echo "Unable to determine correct module directory, I see $bdir" 2>&1
exit 1
fi
# i40e.spec contains the driver version; get it from there
DRVVER="$(find . -name ${DRV}.spec | xargs grep Version | awk '{print $2}' | head -1)"
# target dir
tdir="/usr/src/${bdir}"
echo "VERSION: $DRVVER"
echo "TARGET: $tdir"
add_dkms_moudle() {
# We do not loop through kernels here because this is not a kernel-specific check.
# We are just seeing if this $DRVVER of $DRV is added to the DKMS tree or not.
is_added="$(dkms status -m $DRV -v $DRVVER -k null | wc -l)"
# add dkms modules only if they are not alreay added.
if [[ ${is_added} == 0 ]]; then
# We have seen some cases where the is_added dkms check above
# gives a false-positive, so as an added layer we check here
# for an error message the the module is already added, and
# ignore the error if that happens.
dkms_add_output="$(dkms add -m ${DRV} -v "${DRVVER}" 2>&1)" || \
echo "$dkms_add_output" | grep 'Error! DKMS tree already contains:' || \
(echo "$dkms_add_output" 1>&2 && exit 1)
else
echo "The target dkms module is already loaded to the dkms tree."
fi
}
install_dkms_module() {
# dkms install for each kernel on the system. That way we are covered if a new
# kernel had been installed, but wasn't active yet before rebooting.
for krel in $(ls /lib/modules/) ; do
# install dkms modules only if they are not alreay installed for this kernel.
is_installed="$(dkms status -m $DRV -v $DRVVER -k $krel | grep installed | wc -l)"
# install for other kernels ('dkms install' won't do this)
#for krel in $(ls /lib/modules/) ; do dkms install -k $krel ; done
if [[ ${is_installed} == 0 ]]; then
dkms_install_output="$(dkms install ${DRV}/${DRVVER} -k $krel 2>&1)" || \
echo "$dkms_install_output" | grep 'Error! This module/version combo is already installed' || \
(echo "$dkms_install_output" 1>&2 && exit 1)
else
echo "The target dkms module is already installed."
fi
done
}
# if there are exising kernel modules for this driver, repalce them with
# module from dkms tree
replace_driver_module() {
# Perform for each kernel on the system. That way we are covered if a new
# kernel had been installed, but wasn't active yet before rebooting.
for krel in $(ls /lib/modules/) ; do
for file in $(find /lib/modules/$krel -type f -name '${DRV}.ko'); do
cp /var/lib/dkms/${DRV}/${DRVVER}/$krel/${ARCH}/module/${DRV}.ko $file
done
done
}
# DO NOT remove or rename the directory under /usr/src, as this completes breaks DKMS.
# In general if the target directory already exists, there's no need to re-copy
# because the target directory contains the driver name (i40e) and driver version
# (e.g. 2.7.12). So unless there's another 2.7.12 driver verison that's different
# than the first, this should not be an issue. For a simple idempotent solution,
# we just rsync the files to the target, and only move in files that don't exist
# at the dst.
rsync -a --ignore-existing "${bdir}/src"/ "${tdir}"/
cat <<EOF > "${tdir}/dkms.conf"
PACKAGE_NAME="${DRV}"
PACKAGE_VERSION="${DRVVER}"
BUILT_MODULE_NAME[0]="${DRV}"
DEST_MODULE_LOCATION[0]="/updates/"
REMAKE_INITRD="yes"
AUTOINSTALL="yes"
EOF
add_dkms_moudle
install_dkms_module
replace_driver_module
# make sure modprobe sees the 'right' module version
pver=$(modinfo ${DRV} | grep ^version | awk '{print $2}')
# this extra step is to support any patched driver version from Intel
# as they like to add "_att" to the version that is patched, and will
# break the check below otherwise. (i.e. "2.9.23" vs "2.9.23_att")
read vMaj vMin vUpd <<<${DRVVER//[^0-9]/ }
cver=$vMaj.$vMin.$vUpd
if [[ "${cver}" != ${pver} ]] ; then
# not really sure if this can ever happen
echo "ERROR: Module system does not see the version we just built" 2>&1
exit 1
fi
# If we've already installed this driver version, we don't need to reboot or mark for reboot.
# We still run the idempotent steps above, because there is the possibility that someone will
# have installed a new kernel version (possibly without the needed headers), in which case this
# script should run again for that new kernel version even though the i40e version has not changed.
# In the case of a newly installed/staged kernel that is pending reboot, that kernel will have
# already marked the node for reboot, so we can skip doing that here.
[ -e /var/lib/${DRV}.done ] && [ "$(cat /var/lib/${DRV}.done)" = "$DRVVER" ] && cleanup && exit 0
# Marker the driver version installation as done to avoid flagging the node for needing reboots
# on this same driver version again (unless they are because a different kernel version was
# installed with no change in the driver version, in which case the kernel update will have
# marked the node for needing reboot).
touch /var/lib/${DRV}.done
# save the driver version in the i40e.done file for the verify driver
# script to read and validate the expected against the actual driver
# version
echo "${pver}" | tee /var/lib/${DRV}.done
sync
systemctl disable i40e-dkms-install.service
if [ $REBOOT -ne 0 ]; then
# we can't rely on rmmod/insmod; the driver may not be robust or the
# interface is in use in complicated ways
wall "${DRV} driver updated - rebooting"
/sbin/reboot
# don't exit successfully, doing that would allow prom to start a few
# seconds before reboot takes effect
sleep infinity
exit 1
else
echo '*** System restart required ***' > /run/reboot-required
echo "$(basename ${DRVURL})" >> /run/reboot-required.pkgs
fi

View File

@ -0,0 +1,17 @@
---
schema: pegleg/Script/v1
metadata:
schema: metadata/Document/v1
name: linux-crashdump-install
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: global
data: |-
#!/bin/bash
set -ex
echo '*** System restart required ***' > /run/reboot-required
echo "linux-crashdump" >> /run/reboot-required.pkgs
# Prevent the service from requesting another reboot
touch /var/lib/linux-crashdump-reboot.done

View File

@ -0,0 +1,70 @@
---
schema: pegleg/Script/v1
metadata:
schema: metadata/Document/v1
name: node-reboot
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: global
data: |-
#!/bin/bash
set -ex
REBOOT_HISTORY=/var/lib/node-reboot-history.txt
rrr="/run/reboot-required"
rrpkgs="/run/reboot-required.pkgs"
safety_valve() {
declare -i window
window="$1"
window_secs=$(( window * 60 ))
if [ -f "$REBOOT_HISTORY" ]
then
declare -i last_boot
last_boot=$(cat "$REBOOT_HISTORY")
since_last_reboot=$(( $(date +%s) - last_boot ))
if [[ $since_last_reboot -lt $window_secs ]]
then
echo "Still in reboot safety window, refusing to reboot!"
return 1
else
return 0
fi
fi
return 0
}
SAFETY_DURATION=30
while getopts ":s:" opt;
do
case ${opt} in
s )
SAFETY_DURATION=$OPTARG
;;
\? )
echo "Usage: node-reboot [-s mins]"
echo ""
echo " -s - Safety window in minutes to avoid reboot loops."
echo ""
;;
esac
done
if [[ -f $rrr && $(grep "System restart required" "$rrr") ]]; then
if safety_valve "$SAFETY_DURATION"; then
pkgs=$(tr '\n' ' ' < "$rrpkgs")
wall "'$pkgs installed - rebooting"
rm -vf $rrr $rrpkgs
date +%s > "$REBOOT_HISTORY"
/sbin/reboot
# don't exit successfully, doing that would allow prom to start a few
# seconds before reboot takes effect
sleep infinity
exit 1
fi
fi

View File

@ -0,0 +1,70 @@
---
schema: pegleg/Script/v1
metadata:
schema: metadata/Document/v1
name: probe-killer
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: global
data: |-
#!/usr/bin/env bash
IFS=$'\n'
export KUBECONFIG=/etc/kubernetes/admin/kubeconfig.yaml
pods="$(kubectl get pods --all-namespaces -o wide --no-headers)"
for podRow in $pods; do
ns="$(echo "$podRow" | awk '{print $1}')"
pod="$(echo "$podRow" | awk '{print $2}')"
desc="$(kubectl describe -n $ns pod $pod)"
# Example output
# Liveness: exec [/tmp/bin/liveness-probe.sh] delay=15s timeout=10s period=35s #success=1 #failure=10
# Readiness: exec [/tmp/bin/readiness-probe.sh] delay=15s timeout=1s period=15s #success=1 #failure=3
probeCLIs="$(echo "$desc" | grep 'Liveness:\|Readiness:' | grep exec | grep -o '\[.*\]' | tr -d '][')"
containers="$(/usr/local/bin/kubectl get pods $pod -n $ns -o jsonpath='{.spec.containers[*].name}' | tr ' ' '\n')"
for container in $containers; do
for probeCLI in $probeCLIs; do
timeout 15 kubectl exec -n $ns $pod --container $container -- /bin/bash -c "
# Whether or not PID will be killed. Useful for debugging.
killPID=true
# Find processes older than 5 minutes
nsProcs=\"\$(find /proc -maxdepth 1 -name '[0-9]*' -type d -mmin +5)\"
# Kill matches
IFS=\$'\n'
for procDir in \$nsProcs; do
# Replace null byte with space.
# The null byte is used in this file instead of space to separate CLi args.
pidCLI=\"\$(cat \$procDir/cmdline | tr '\0' ' ')\"
if [[ \"\$pidCLI\" = *\"$probeCLI\"* ]]; then
pidToKill=\"\$(basename \$procDir)\"
# Do not left script kill itself
if [[ \$pidToKill = \$\$ ]] || [[ \"\$pidCLI\" = *'kubectl exec'* ]]; then
continue
fi
# never kill pid 1
if [[ \$pidToKill = 1 ]]; then
echo \"WARN: PID 1 regex match for '$probeCLI'. Check regex list.\"
continue
fi
if [[ \$killPID = true ]]; then
kill \$pidToKill
# Also kill all the children of this PID
kill -TERM -- -\$pidToKill
echo \"INFO: Killed PID \$pidToKill \$pidCLI and its children\"
else
echo \"DEBUG: PID to kill in non-debug mode: \$pidToKill \$pidCLI\"
fi
fi
done
" 2>&1 | grep '^INFO\|^WARN\|^DEBUG' &
done
done
done
echo "probeKiller execution completed."

View File

@ -22,9 +22,14 @@ data: |-
DATE=$(date)
for rbd in `lsblk | awk '/^rbd/ {if($7==""){print $0}}' | awk '{ printf "/dev/%s\n",$1 }'`; do
if grep -q $rbd /var/run/rbd_list; then
echo "[${DATE}] Unmapping stale RBD $rbd"
/usr/bin/rbd unmap -o force $rbd
# NOTE(supamatt): rbd unmap -o force will only succeed if there are NO pending I/O
if ps -ax | grep "mkfs" | grep -q "$rbd"; then
echo "[${DATE}] $rbd is currently being formatted, ignoring"
else
echo "[${DATE}] Unmapping stale RBD $rbd"
/usr/bin/rbd unmap -o force $rbd
# NOTE(supamatt): rbd unmap -o force will only succeed if there are NO pending I/O
# NOTE(stevetaylor): ^^^ doesn't seem to apply to formatting RBDs, mkfs check added
fi
else
echo "[${DATE}] Skipping RBD $rbd as it hasn't been stale for at least 60 seconds"
fi

View File

@ -0,0 +1,623 @@
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Document/v1
name: bird-cfg-template
labels:
name: bird-cfg-template
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data: |
# Generated by confd
# AT&T {
{{- $extensions := json (getv "/global/extensions" "{}") }}
{{- $ipv4AdditionalCIDRs := split (or $extensions.ipv4AdditionalCIDRs "") "," }}
{{- $ipv4CommunityCIDRRef := json (or $extensions.ipv4CommunityCIDRRef "{}") }}
function osh_filters()
{
{{- range $ipv4AdditionalCIDRs }}
{{/* go has annoying behavior where splitting an empty string returns an array of length 1 containing an empty string, so filter out any empty strings */}}
{{- if ne . "" }}
if ( net ~ {{ . }} ) then { accept; }
{{- end }}
{{- end }}
}
function apply_communities ()
{
# Set community value based on dictionary of cidrs
{{- range $ipv4CommunityCIDRRef }}
{{- $community := .community }}
{{- $cidr := .cidr }}
{{- if .prefix }}
if ( net ~ {{ $cidr }} ) then { bgp_community.add(({{ .prefix }}, {{ $community }})); }
{{- else }}
{{/* Add the AS number */}}
{{- $node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}
if ( net ~ {{ $cidr }} ) then { bgp_community.add(({{if exists $node_as_key}}{{getv $node_as_key}}{{else}}{{getv "/global/as_num"}}{{end}}, {{ $community }})); }
{{- end }}
{{- end }}
}
# } # AT&T
include "bird_aggr.cfg";
include "bird_ipam.cfg";
{{- $node_ip_key := printf "/host/%s/ip_addr_v4" (getenv "NODENAME")}}{{$node_ip := getv $node_ip_key}}
{{- $router_id := getenv "CALICO_ROUTER_ID" ""}}
{{- $node_name := getenv "NODENAME"}}
router id {{if eq "hash" ($router_id) -}}
{{hashToIPv4 $node_name}};
{{- else -}}
{{if ne "" ($router_id)}}{{$router_id}}{{else}}{{$node_ip}}{{end}};
{{- end}}
{{- define "LOGGING"}}
{{- $node_logging_key := printf "/host/%s/loglevel" (getenv "NODENAME")}}
{{- if exists $node_logging_key}}
{{- $logging := getv $node_logging_key}}
{{- if eq $logging "debug"}}
debug all;
{{- else if ne $logging "none"}}
debug { states };
{{- end}}
{{- else if exists "/global/loglevel"}}
{{- $logging := getv "/global/loglevel"}}
{{- if eq $logging "debug"}}
debug all;
{{- else if ne $logging "none"}}
debug { states };
{{- end}}
{{- else}}
debug { states };
{{- end}}
{{- end}}
# Configure synchronization between routing tables and kernel.
protocol kernel {
learn; # Learn all alien routes from the kernel
persist; # Don't remove routes on bird shutdown
scan time 2; # Scan kernel routing table every 2 seconds
import all;
export filter calico_kernel_programming; # Default is export none
graceful restart; # Turn on graceful restart to reduce potential flaps in
# routes when reloading BIRD configuration. With a full
# automatic mesh, there is no way to prevent BGP from
# flapping since multiple nodes update their BGP
# configuration at the same time, GR is not guaranteed to
# work correctly in this scenario.
}
# Watch interface up/down events.
protocol device {
{{- template "LOGGING"}}
scan time 2; # Scan interfaces every 2 seconds
}
protocol direct {
{{- template "LOGGING"}}
interface -"cali*", "*"; # Exclude cali* but include everything else.
}
{{if eq "" ($node_ip)}}# IPv4 disabled on this node.
{{else}}{{$node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}
# Template for all BGP clients
template bgp bgp_template {
{{- $as_key := or (and (exists $node_as_key) $node_as_key) "/global/as_num"}}
{{- $node_as_num := getv $as_key}}
{{- template "LOGGING"}}
description "Connection to BGP peer";
local as {{$node_as_num}};
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export filter calico_export_to_bgp_peers; # Only want to export routes for workloads.
source address {{$node_ip}}; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
connect delay time 2;
connect retry time 5;
error wait time 5,30;
}
# ------------- Node-to-node mesh -------------
{{- $node_cid_key := printf "/host/%s/rr_cluster_id" (getenv "NODENAME")}}
{{- $node_cluster_id := getv $node_cid_key}}
{{- if ne "" ($node_cluster_id)}}
# This node is configured as a route reflector with cluster ID {{$node_cluster_id}};
# ignore node-to-node mesh setting.
{{- else}}
{{if (json (getv "/global/node_mesh")).enabled}}
{{range $host := lsdir "/host"}}
{{$onode_as_key := printf "/host/%s/as_num" .}}
{{$onode_ip_key := printf "/host/%s/ip_addr_v4" .}}{{if exists $onode_ip_key}}{{$onode_ip := getv $onode_ip_key}}
{{$nums := split $onode_ip "."}}{{$id := join $nums "_"}}
# For peer {{$onode_ip_key}}
{{if eq $onode_ip ($node_ip) }}# Skipping ourselves ({{$node_ip}})
{{else if ne "" $onode_ip}}protocol bgp Mesh_{{$id}} from bgp_template {
neighbor {{$onode_ip}} as {{if exists $onode_as_key}}{{getv $onode_as_key}}{{else}}{{getv "/global/as_num"}}{{end}};
{{- /*
Make the peering unidirectional. This avoids a race where
- peer A opens a connection and begins a graceful restart
- before the restart completes, peer B opens its connection
- peer A sees the new connection and aborts the graceful restart, causing a route flap.
*/ -}}
{{if gt $onode_ip $node_ip}}
passive on; # Mesh is unidirectional, peer will connect to us.
{{- end}}
}{{end}}{{end}}{{end}}
{{else}}
# Node-to-node mesh disabled
{{end}}
{{- end}}
# ------------- Global peers -------------
{{if ls "/global/peer_v4"}}
{{range gets "/global/peer_v4/*"}}{{$data := json .Value}}
{{$nums := split $data.ip "."}}{{$id := join $nums "_"}}
# For peer {{.Key}}
{{- if eq $data.ip ($node_ip) }}
# Skipping ourselves ({{$node_ip}})
{{- else}}
protocol bgp Global_{{$id}} from bgp_template {
neighbor {{$data.ip}} as {{$data.as_num}};
{{- if and (eq $data.as_num $node_as_num) (ne "" ($node_cluster_id)) (ne $data.rr_cluster_id ($node_cluster_id))}}
rr client;
rr cluster id {{$node_cluster_id}};
{{- end}}
}
{{- end}}
{{end}}
{{else}}# No global peers configured.{{end}}
# ------------- Node-specific peers -------------
{{$node_peers_key := printf "/host/%s/peer_v4" (getenv "NODENAME")}}
{{if ls $node_peers_key}}
{{range gets (printf "%s/*" $node_peers_key)}}{{$data := json .Value}}
{{$nums := split $data.ip "."}}{{$id := join $nums "_"}}
# For peer {{.Key}}
{{- if eq $data.ip ($node_ip) }}
# Skipping ourselves ({{$node_ip}})
{{- else}}
protocol bgp Node_{{$id}} from bgp_template {
neighbor {{$data.ip}} as {{$data.as_num}};
{{- if and (eq $data.as_num $node_as_num) (ne "" ($node_cluster_id)) (ne $data.rr_cluster_id ($node_cluster_id))}}
rr client;
rr cluster id {{$node_cluster_id}};
{{- end}}
}
{{- end}}
{{end}}
{{else}}# No node-specific peers configured.{{end}}
{{end}}{{/* End of IPv4 enable check */}}
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Document/v1
name: bird-aggr-cfg-template
labels:
name: bird-aggr-cfg-template
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data: |
# Generated by confd
{{- $block_key := printf "/calico/ipam/v2/host/%s/ipv4/block" (getenv "NODENAME")}}
{{- $static_key := "/calico/staticroutes"}}
{{if or (ls $block_key) (ls $static_key)}}
protocol static {
{{- if ls $block_key}}
# IP blocks for this host.
{{- range ls $block_key}}
{{- $parts := split . "-"}}
{{- $cidr := join $parts "/"}}
route {{$cidr}} blackhole;
{{- end}}
{{- end}}
{{- if ls $static_key}}
# Static routes.
{{- range ls $static_key}}
{{- $parts := split . "-"}}
{{- $cidr := join $parts "/"}}
route {{$cidr}} blackhole;
{{- end}}
{{- end}}
}
{{else}}# No IP blocks or static routes for this host.{{end}}
# Aggregation of routes on this host; export the block, nothing beneath it.
function calico_aggr ()
{
{{- range ls $block_key}}
{{- $parts := split . "-"}}
{{- $cidr := join $parts "/"}}
{{- $affinity := json (getv (printf "%s/%s" $block_key .))}}
{{- if $affinity.state}}
# Block {{$cidr}} is {{$affinity.state}}
{{- if eq $affinity.state "confirmed"}}
if ( net = {{$cidr}} ) then { accept; }
if ( net ~ {{$cidr}} ) then { reject; }
{{- end}}
{{- else }}
# Block {{$cidr}} is implicitly confirmed.
if ( net = {{$cidr}} ) then { accept; }
if ( net ~ {{$cidr}} ) then { reject; }
{{- end }}
{{- end}}
}
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Document/v1
name: bird-ipam-cfg-template
labels:
name: bird-ipam-cfg-template
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data: |
# Generated by confd
filter calico_export_to_bgp_peers {
# AT&T {
apply_communities();
# } # AT&T
calico_aggr();
# AT&T {
osh_filters();
# } # AT&T
{{- $static_key := "/staticroutes"}}
{{- if ls $static_key}}
# Export static routes.
{{- range ls $static_key}}
{{- $parts := split . "-"}}
{{- $cidr := join $parts "/"}}
if ( net ~ {{$cidr}} ) then { accept; }
{{- end}}
{{- end}}
{{range ls "/v1/ipam/v4/pool"}}{{$data := json (getv (printf "/v1/ipam/v4/pool/%s" .))}}
if ( net ~ {{$data.cidr}} ) then {
accept;
}
{{- end}}
reject;
}
{{$network_key := printf "/bgp/v1/host/%s/network_v4" (getenv "NODENAME")}}{{if exists $network_key}}{{$network := getv $network_key}}
filter calico_kernel_programming {
{{- $reject_key := "/rejectcidrs"}}
{{- if ls $reject_key}}
# Don't program static routes into kernel.
{{- range ls $reject_key}}
{{- $parts := split . "-"}}
{{- $cidr := join $parts "/"}}
if ( net ~ {{$cidr}} ) then { reject; }
{{- end}}
{{- end}}
{{range ls "/v1/ipam/v4/pool"}}{{$data := json (getv (printf "/v1/ipam/v4/pool/%s" .))}}
if ( net ~ {{$data.cidr}} ) then {
{{- if $data.vxlan_mode}}
# Don't program VXLAN routes into the kernel - these are handled by Felix.
reject;
}
{{- else if $data.ipip_mode}}{{if eq $data.ipip_mode "cross-subnet"}}
if defined(bgp_next_hop) && ( bgp_next_hop ~ {{$network}} ) then
krt_tunnel = ""; {{/* Destination in ipPool, mode is cross sub-net, route from-host on subnet, do not use IPIP */}}
else
krt_tunnel = "{{$data.ipip}}"; {{/* Destination in ipPool, mode is cross sub-net, route from-host off subnet, set the tunnel (if IPIP not enabled, value will be "") */}}
accept;
} {{else}}
krt_tunnel = "{{$data.ipip}}"; {{/* Destination in ipPool, mode not cross sub-net, set the tunnel (if IPIP not enabled, value will be "") */}}
accept;
} {{end}} {{else}}
krt_tunnel = "{{$data.ipip}}"; {{/* Destination in ipPool, mode field is not present, set the tunnel (if IPIP not enabled, value will be "") */}}
accept;
} {{end}}
{{end}}
accept; {{/* Destination is not in any ipPool, accept */}}
}
{{else}}
filter calico_kernel_programming { accept; }
{{end}}{{/* End of 'exists $network_key' */}}
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Document/v1
name: bird6-cfg-template
labels:
name: bird6-cfg-template
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data: |
# Generated by confd
# AT&T {
{{- $extensions := json (getv "/global/extensions" "{}") }}
{{- $ipv6AdditionalCIDRs := split (or $extensions.ipv6AdditionalCIDRs "") "," }}
{{- $ipv6CommunityCIDRRef := json (or $extensions.ipv6CommunityCIDRRef "{}") }}
function osh_filters()
{
{{- range $ipv6AdditionalCIDRs }}
{{/* go has annoying behavior where splitting an empty string returns an array of length 1 containing an empty string, so filter out any empty strings */}}
{{- if ne . "" }}
if ( net ~ {{ . }} ) then { accept; }
{{- end }}
{{- end }}
}
function apply_communities ()
{
# Set community value based on dictionary of cidrs
{{- range $ipv6CommunityCIDRRef }}
{{- $community := .community }}
{{- $cidr := .cidr }}
{{- if .prefix }}
if ( net ~ {{ $cidr }} ) then { bgp_community.add(({{ .prefix }}, {{ $community }})); }
{{- else }}
{{/* Add the AS number */}}
{{- $node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}
if ( net ~ {{ $cidr }} ) then { bgp_community.add(({{if exists $node_as_key}}{{getv $node_as_key}}{{else}}{{getv "/global/as_num"}}{{end}}, {{ $community }})); }
{{- end }}
{{- end }}
}
# } # AT&T
include "bird6_aggr.cfg";
include "bird6_ipam.cfg";
{{- $node_ip_key := printf "/host/%s/ip_addr_v4" (getenv "NODENAME")}}{{$node_ip := getv $node_ip_key}}
{{- $node_ip6_key := printf "/host/%s/ip_addr_v6" (getenv "NODENAME")}}{{$node_ip6 := getv $node_ip6_key}}
{{- $router_id := getenv "CALICO_ROUTER_ID" ""}}
{{- $node_name := getenv "NODENAME"}}
router id {{if eq "hash" ($router_id) -}}
{{hashToIPv4 $node_name}}; # Use IP address generated by nodename's hash
{{- else -}}
{{if ne "" ($router_id)}}{{$router_id}}{{else}}{{$node_ip}}{{end}}; # Use IPv4 address since router id is 4 octets, even in MP-BGP
{{- end}}
{{- define "LOGGING"}}
{{- $node_logging_key := printf "/host/%s/loglevel" (getenv "NODENAME")}}
{{- if exists $node_logging_key}}
{{- $logging := getv $node_logging_key}}
{{- if eq $logging "debug"}}
debug all;
{{- else if ne $logging "none"}}
debug { states };
{{- end}}
{{- else if exists "/global/loglevel"}}
{{- $logging := getv "/global/loglevel"}}
{{- if eq $logging "debug"}}
debug all;
{{- else if ne $logging "none"}}
debug { states };
{{- end}}
{{- else}}
debug { states };
{{- end}}
{{- end}}
# Configure synchronization between routing tables and kernel.
protocol kernel {
learn; # Learn all alien routes from the kernel
persist; # Don't remove routes on bird shutdown
scan time 2; # Scan kernel routing table every 2 seconds
import all;
export all; # Default is export none
graceful restart; # Turn on graceful restart to reduce potential flaps in
# routes when reloading BIRD configuration. With a full
# automatic mesh, there is no way to prevent BGP from
# flapping since multiple nodes update their BGP
# configuration at the same time, GR is not guaranteed to
# work correctly in this scenario.
}
# Watch interface up/down events.
protocol device {
{{- template "LOGGING"}}
scan time 2; # Scan interfaces every 2 seconds
}
protocol direct {
{{- template "LOGGING"}}
interface -"cali*", "*"; # Exclude cali* but include everything else.
}
{{if eq "" ($node_ip6)}}# IPv6 disabled on this node.
{{else}}{{$node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}
# Template for all BGP clients
template bgp bgp_template {
{{- $as_key := or (and (exists $node_as_key) $node_as_key) "/global/as_num"}}
{{- $node_as_num := getv $as_key}}
{{- template "LOGGING"}}
description "Connection to BGP peer";
local as {{$node_as_num}};
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export filter calico_export_to_bgp_peers; # Only want to export routes for workloads.
source address {{$node_ip6}}; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
connect delay time 2;
connect retry time 5;
error wait time 5,30;
}
# ------------- Node-to-node mesh -------------
{{- $node_cid_key := printf "/host/%s/rr_cluster_id" (getenv "NODENAME")}}
{{- $node_cluster_id := getv $node_cid_key}}
{{- if ne "" ($node_cluster_id)}}
# This node is configured as a route reflector with cluster ID {{$node_cluster_id}};
# ignore node-to-node mesh setting.
{{- else}}
{{if (json (getv "/global/node_mesh")).enabled}}
{{range $host := lsdir "/host"}}
{{$onode_as_key := printf "/host/%s/as_num" .}}
{{$onode_ip_key := printf "/host/%s/ip_addr_v6" .}}{{if exists $onode_ip_key}}{{$onode_ip := getv $onode_ip_key}}
{{$nums := split $onode_ip ":"}}{{$id := join $nums "_"}}
# For peer {{$onode_ip_key}}
{{if eq $onode_ip ($node_ip6) }}# Skipping ourselves ({{$node_ip6}})
{{else if eq "" $onode_ip}}# No IPv6 address configured for this node
{{else}}protocol bgp Mesh_{{$id}} from bgp_template {
neighbor {{$onode_ip}} as {{if exists $onode_as_key}}{{getv $onode_as_key}}{{else}}{{getv "/global/as_num"}}{{end}};
{{- /*
Make the peering unidirectional. This avoids a race where
- peer A opens a connection and begins a graceful restart
- before the restart completes, peer B opens its connection
- peer A sees the new connection and aborts the graceful restart, causing a route flap.
*/ -}}
{{if gt $onode_ip $node_ip6 }}
passive on; # Mesh is unidirectional, peer will connect to us.
{{- end}}
}{{end}}{{end}}{{end}}
{{else}}
# Node-to-node mesh disabled
{{end}}
{{- end}}
# ------------- Global peers -------------
{{if ls "/global/peer_v6"}}
{{range gets "/global/peer_v6/*"}}{{$data := json .Value}}
{{$nums := split $data.ip ":"}}{{$id := join $nums "_"}}
# For peer {{.Key}}
{{- if eq $data.ip ($node_ip6) }}
# Skipping ourselves ({{$node_ip6}})
{{- else}}
protocol bgp Global_{{$id}} from bgp_template {
neighbor {{$data.ip}} as {{$data.as_num}};
{{- if and (eq $data.as_num $node_as_num) (ne "" ($node_cluster_id)) (ne $data.rr_cluster_id ($node_cluster_id))}}
rr client;
rr cluster id {{$node_cluster_id}};
{{- end}}
}
{{- end}}
{{end}}
{{else}}# No global peers configured.{{end}}
# ------------- Node-specific peers -------------
{{$node_peers_key := printf "/host/%s/peer_v6" (getenv "NODENAME")}}
{{if ls $node_peers_key}}
{{range gets (printf "%s/*" $node_peers_key)}}{{$data := json .Value}}
{{$nums := split $data.ip ":"}}{{$id := join $nums "_"}}
# For peer {{.Key}}
{{- if eq $data.ip ($node_ip6) }}
# Skipping ourselves ({{$node_ip6}})
{{- else}}
protocol bgp Node_{{$id}} from bgp_template {
neighbor {{$data.ip}} as {{$data.as_num}};
{{- if and (eq $data.as_num $node_as_num) (ne "" ($node_cluster_id)) (ne $data.rr_cluster_id ($node_cluster_id))}}
rr client;
rr cluster id {{$node_cluster_id}};
{{- end}}
}
{{- end}}
{{end}}
{{else}}# No node-specific peers configured.{{end}}
{{end}}
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Document/v1
name: bird6-aggr-cfg-template
labels:
name: bird6-aggr-cfg-template
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data: |
# Generated by confd
{{- $block_key := printf "/calico/ipam/v2/host/%s/ipv6/block" (getenv "NODENAME")}}
{{if ls $block_key}}
protocol static {
# IP blocks for this host.
{{- range ls $block_key}}
{{- $parts := split . "-"}}
{{- $cidr := join $parts "/"}}
route {{$cidr}} blackhole;
{{- end}}
}
{{else}}# No IP blocks or static routes for this host.{{end}}
# Aggregation of routes on this host; export the block, nothing beneath it.
function calico_aggr ()
{
{{- range ls $block_key}}
{{- $parts := split . "-"}}
{{- $cidr := join $parts "/"}}
{{- $affinity := json (getv (printf "%s/%s" $block_key .))}}
{{- if $affinity.state}}
# Block {{$cidr}} is {{$affinity.state}}
{{- if eq $affinity.state "confirmed"}}
if ( net = {{$cidr}} ) then { accept; }
if ( net ~ {{$cidr}} ) then { reject; }
{{- end}}
{{- else }}
# Block {{$cidr}} is implicitly confirmed.
if ( net = {{$cidr}} ) then { accept; }
if ( net ~ {{$cidr}} ) then { reject; }
{{- end }}
{{- end}}
}
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Document/v1
name: bird6-ipam-cfg-template
labels:
name: bird6-ipam-cfg-template
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data: |
# Generated by confd
filter calico_export_to_bgp_peers {
# AT&T {
apply_communities();
# } # AT&T
calico_aggr();
# AT&T {
osh_filters();
# } # AT&T
{{range ls "/pool"}}{{$data := json (getv (printf "/pool/%s" .))}}
if ( net ~ {{$data.cidr}} ) then {
accept;
}
{{end}}
reject;
}
---
schema: deckhand/DataSchema/v1
metadata:
schema: metadata/Document/v1
name: tunl-ip-template
labels:
name: tunl-ip-template
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data: |
We must dump all pool data to this file to trigger a re-run of the tunnel
address allocation code whenever an IP pool changes.
{{range ls "/pool"}}{{$data := json (getv (printf "/pool/%s" .))}}
{{- if or $data.ipip $data.vxlan_mode}}
{{- if not $data.disabled}}{{$data}}{{end}}
{{- end}}
{{end}}

View File

@ -2,9 +2,9 @@
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-calico
name: kubernetes-calico-base
layeringDefinition:
abstract: false
abstract: true
layer: global
labels:
name: kubernetes-calico-global
@ -53,6 +53,27 @@ metadata:
dest:
path: .values.conf.node.IP_AUTODETECTION_METHOD
# Calico log level
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .calico.calico_startup_loglevel
dest:
path: .values.conf.node.CALICO_STARTUP_LOGLEVEL
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .calico.calico_startup_loglevel
dest:
path: .values.conf.node.FELIX_LOGSEVERITYSCREEN
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .calico.calico_startup_loglevel
dest:
path: .values.conf.cni_network_config.plugins[0].log_level
# Certificates
- src:
schema: deckhand/CertificateAuthority/v1
@ -73,6 +94,7 @@ metadata:
dest:
path: .values.endpoints.etcd.auth.client.tls.key
data:
chart_name: calico
release: kubernetes-calico
@ -80,17 +102,22 @@ data:
protected:
continue_processing: true
wait:
timeout: 600
timeout: 1800
labels:
release_group: airship-kubernetes-calico
release_group: clcp-kubernetes-calico
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-calico
release_group: clcp-kubernetes-calico
values:
pod:
mandatory_access_control:
calico-node:
calico-node: unconfined
conf:
cni_network_config:
name: k8s-pod-network
@ -102,6 +129,7 @@ data:
etcd_cert_file: /etc/calico/pki/crt
etcd_key_file: /etc/calico/pki/key
log_level: info
mtu: 1500
ipam:
type: calico-ipam
policy:
@ -123,6 +151,8 @@ data:
ETCD_CERT_FILE: /etc/calico/pki/crt
ETCD_KEY_FILE: /etc/calico/pki/key
WAIT_FOR_STORAGE: "true"
FELIX_FAILSAFEINBOUNDHOSTPORTS: "none"
FELIX_FAILSAFEOUTBOUNDHOSTPORTS: "none"
endpoints:
etcd:
@ -132,7 +162,6 @@ data:
default: https
networking:
mtu: 1500
settings:
mesh: "on"
ippool:
@ -145,6 +174,7 @@ data:
manifests:
daemonset_calico_etcd: false
job_image_repo_sync: false
pod_calicoctl: false
service_calico_etcd: false
dependencies:
- calico-htk

View File

@ -8,8 +8,9 @@ metadata:
layer: global
storagePolicy: cleartext
data:
description: Container networking via Calico
description: Container networking via calico
sequenced: true
chart_group:
- kubernetes-calico-etcd
- kubernetes-calico
#- calicoctl-utility

View File

@ -76,16 +76,20 @@ data:
protected:
continue_processing: true
wait:
timeout: 600
timeout: 1800
labels:
release_group: airship-kubernetes-calico-etcd
release_group: clcp-kubernetes-calico-etcd
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-calico-etcd
release_group: clcp-kubernetes-calico-etcd
- type: pod
labels:
release_group: clcp-kubernetes-calico-etcd
component: etcd-test
values:
labels:
anchor:

View File

@ -0,0 +1,731 @@
---
schema: nc/Policy/v1
metadata:
schema: metadata/Document/v1
name: global-policy
layeringDefinition:
abstract: false
layer: global
labels:
name: global-policy
storagePolicy: cleartext
substitutions:
- src:
schema: drydock/Network/v1
name: oam
path: .cidr
dest:
- path: .policy.globalsitelevel.rules[0].spec.ingress[0].source.nets[0]
pattern: OAM_CIDR
- path: .policy.globalsitelevel.rules[0].spec.ingress[1].source.nets[0]
pattern: OAM_CIDR
- path: .policy.globalsitelevel.rules[1].spec.egress[0].source.nets[0]
pattern: OAM_CIDR
- path: .policy.globalsitelevel.rules[15].spec.egress[0].source.nets[0]
pattern: OAM_CIDR
- path: .policy.globalsitelevel.rules[16].spec.ingress[0].source.nets[0]
pattern: OAM_CIDR
- path: .policy.globalsitelevel.rules[17].spec.egress[0].destination.nets[0]
pattern: OAM_CIDR
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.bgp.ipv4.ingress_vip
dest:
- path: .policy.globalsitelevel.rules[1].spec.egress[0].destination.nets[0]
pattern: CALICO_BGP_IP
- path: .policy.globalsitelevel.rules[19].spec.ingress[0].destination.nets[0]
pattern: INGRESS_VIP
- path: .policy.globalsitelevel.rules[21].spec.ingress[0].destination.nets[0]
pattern: INGRESS_VIP
- path: .policy.globalsitelevel.rules[22].spec.ingress[0].destination.notNets[0]
pattern: INGRESS_VIP
- src:
schema: promenade/KubernetesNetwork/v1
name: kubernetes-network
path: .kubernetes.service_cidr
dest:
- path: .policy.globalsitelevel.rules[2].spec.ingress[0].source.nets[0]
pattern: KUBERNETES_SERVICE_CIDR
- path: .policy.globalsitelevel.rules[2].spec.ingress[1].source.nets[0]
pattern: KUBERNETES_SERVICE_CIDR
- path: .policy.globalsitelevel.rules[2].spec.egress[0].destination.nets[0]
pattern: KUBERNETES_SERVICE_CIDR
- path: .policy.globalsitelevel.rules[2].spec.egress[1].destination.nets[0]
pattern: KUBERNETES_SERVICE_CIDR
- src:
schema: promenade/KubernetesNetwork/v1
name: kubernetes-network
path: .kubernetes.pod_cidr
dest:
- path: .policy.globalsitelevel.rules[3].spec.ingress[0].source.nets[0]
pattern: KUBERNETES_POD_CIDR
- path: .policy.globalsitelevel.rules[3].spec.ingress[1].source.nets[0]
pattern: KUBERNETES_POD_CIDR
- path: .policy.globalsitelevel.rules[3].spec.egress[0].destination.nets[0]
pattern: KUBERNETES_POD_CIDR
- path: .policy.globalsitelevel.rules[3].spec.egress[1].destination.nets[0]
pattern: KUBERNETES_POD_CIDR
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.public_cidr
dest:
- path: .policy.globalsitelevel.rules[4].spec.ingress[0].source.nets[0]
pattern: CEPH_PUBLIC_CIDR
- path: .policy.globalsitelevel.rules[4].spec.ingress[1].source.nets[0]
pattern: CEPH_PUBLIC_CIDR
- path: .policy.globalsitelevel.rules[4].spec.egress[0].destination.nets[0]
pattern: CEPH_PUBLIC_CIDR
- path: .policy.globalsitelevel.rules[4].spec.egress[1].destination.nets[0]
pattern: CEPH_PUBLIC_CIDR
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.cluster_cidr
dest:
- path: .policy.globalsitelevel.rules[5].spec.ingress[0].source.nets[0]
pattern: CEPH_CLUSTER_CIDR
- path: .policy.globalsitelevel.rules[5].spec.ingress[1].source.nets[0]
pattern: CEPH_CLUSTER_CIDR
- path: .policy.globalsitelevel.rules[5].spec.egress[0].destination.nets[0]
pattern: CEPH_CLUSTER_CIDR
- path: .policy.globalsitelevel.rules[5].spec.egress[1].destination.nets[0]
pattern: CEPH_CLUSTER_CIDR
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .genesis.ip
dest:
- path: .policy.globalsitelevel.rules[6].spec.ingress[0].source.nets[0]
pattern: GENESIS_IP
- path: .policy.globalsitelevel.rules[6].spec.ingress[1].source.nets[0]
pattern: GENESIS_IP
- path: .policy.globalsitelevel.rules[6].spec.egress[0].destination.nets[0]
pattern: GENESIS_IP
- path: .policy.globalsitelevel.rules[6].spec.egress[1].destination.nets[0]
pattern: GENESIS_IP
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .bootstrap.ip
dest:
- path: .policy.globalsitelevel.rules[7].spec.ingress[0].source.nets[0]
pattern: BOOTSTRAP_IP
- path: .policy.globalsitelevel.rules[7].spec.ingress[1].source.nets[0]
pattern: BOOTSTRAP_IP
- path: .policy.globalsitelevel.rules[7].spec.egress[0].destination.nets[0]
pattern: BOOTSTRAP_IP
- path: .policy.globalsitelevel.rules[7].spec.egress[1].destination.nets[0]
pattern: BOOTSTRAP_IP
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.bgp.ipv4.maas_vip
dest:
- path: .policy.globalsitelevel.rules[8].spec.ingress[0].source.nets[0]
pattern: MAAS_VIP
- path: .policy.globalsitelevel.rules[8].spec.ingress[1].source.nets[0]
pattern: MAAS_VIP
- path: .policy.globalsitelevel.rules[8].spec.egress[0].destination.nets[0]
pattern: MAAS_VIP
- path: .policy.globalsitelevel.rules[8].spec.egress[1].destination.nets[0]
pattern: MAAS_VIP
- path: .policy.globalsitelevel.rules[20].spec.ingress[0].destination.nets[0]
pattern: MAAS_VIP
- path: .policy.globalsitelevel.rules[20].spec.ingress[1].destination.nets[0]
pattern: MAAS_VIP
- path: .policy.globalsitelevel.rules[22].spec.ingress[0].destination.notNets[1]
pattern: MAAS_VIP
- src:
schema: drydock/Network/v1
name: oob
path: .cidr
dest:
- path: .policy.globalsitelevel.rules[14].spec.egress[0].destination.nets[0]
pattern: IPMI_CIDR
- path: .policy.globalsitelevel.rules[14].spec.egress[1].destination.nets[0]
pattern: IPMI_CIDR
data:
policy:
globalsitelevel:
priority: 4
rules:
# Rule 0
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: nc1-kvm-hosts-ingress
spec:
order: 20
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
ingress:
- action: Allow
protocol: TCP
source:
nets: ["OAM_CIDR"]
destination:
ports:
- 80
- 443
- 2378
- 4149
- 6443
- 6553
- 6666
- 6667
- 9099
- 10250
- 10255
- 10256
- action: Allow
protocol: ICMP
icmp:
type: 0
code: 0
source:
nets: ["OAM_CIDR"]
doNotTrack: false
preDNAT: false
applyOnForward: true
# Rule 1
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: nc1-host-to-ingress
spec:
order: 23
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
egress:
- action: Allow
protocol: TCP
source:
nets: ["OAM_CIDR"]
destination:
nets: ["CALICO_BGP_IP"]
ports:
- 80
- 443
doNotTrack: false
preDNAT: false
applyOnForward: true
# rule 2: all UCP containers should allow all TCP and UDP connections to and from the kubernetes service network
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: ucp-kubernetes-service
spec:
selector: host == 'nc-control' && intf-alias == 'oam'
order: 0
ingress:
- action: Allow
protocol: TCP
source:
nets:
- "KUBERNETES_SERVICE_CIDR"
- action: Allow
protocol: UDP
source:
nets:
- "KUBERNETES_SERVICE_CIDR"
egress:
- action: Allow
protocol: TCP
destination:
nets:
- "KUBERNETES_SERVICE_CIDR"
- action: Allow
protocol: UDP
destination:
nets:
- "KUBERNETES_SERVICE_CIDR"
doNotTrack: false
preDNAT: false
applyOnForward: true
# rule 3: all UCP containers should allow all TCP and UDP connections to and from the kubernetes pod network
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: ucp-kubernetes-pod
spec:
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
order: 0
ingress:
- action: Allow
protocol: TCP
source:
nets:
- "KUBERNETES_POD_CIDR"
- action: Allow
protocol: UDP
source:
nets:
- "KUBERNETES_POD_CIDR"
egress:
- action: Allow
protocol: TCP
destination:
nets:
- "KUBERNETES_POD_CIDR"
- action: Allow
protocol: UDP
destination:
nets:
- "KUBERNETES_POD_CIDR"
doNotTrack: false
preDNAT: false
applyOnForward: true
# rule 4: all UCP containers should allow all TCP and UDP connections to and from the public ceph network
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: ucp-ceph-public
spec:
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
order: 0
ingress:
- action: Allow
protocol: TCP
source:
nets:
- "CEPH_PUBLIC_CIDR"
- action: Allow
protocol: UDP
source:
nets:
- "CEPH_PUBLIC_CIDR"
egress:
- action: Allow
protocol: TCP
destination:
nets:
- "CEPH_PUBLIC_CIDR"
- action: Allow
protocol: UDP
destination:
nets:
- "CEPH_PUBLIC_CIDR"
doNotTrack: false
preDNAT: false
applyOnForward: true
# rule 5: all UCP containers should allow all TCP and UDP connections to and from the cluster ceph network
# (can this sometimes be different from the public ceph network?)
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: ucp-ceph-cluster
spec:
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
order: 0
ingress:
- action: Allow
protocol: TCP
source:
nets:
- "CEPH_CLUSTER_CIDR"
- action: Allow
protocol: UDP
source:
nets:
- "CEPH_CLUSTER_CIDR"
egress:
- action: Allow
protocol: TCP
destination:
nets:
- "CEPH_CLUSTER_CIDR"
- action: Allow
protocol: UDP
destination:
nets:
- "CEPH_CLUSTER_CIDR"
doNotTrack: false
preDNAT: false
applyOnForward: true
# rule 6: all UCP containers should allow all TCP and UDP connections to and from the genesis node
# (is this needed, or does the bootstrap IP rule below cover everything?)
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: ucp-genesis
spec:
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
order: 0
ingress:
- action: Allow
protocol: TCP
source:
nets:
- "GENESIS_IP/32"
- action: Allow
protocol: UDP
source:
nets:
- "GENESIS_IP/32"
egress:
- action: Allow
protocol: TCP
destination:
nets:
- "GENESIS_IP/32"
- action: Allow
protocol: UDP
destination:
nets:
- "GENESIS_IP/32"
doNotTrack: false
preDNAT: false
applyOnForward: true
# rule 7: all UCP containers should allow all TCP and UDP connections to and from the bootstrap IP
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: ucp-bootstrap
spec:
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
order: 0
ingress:
- action: Allow
protocol: TCP
source:
nets:
- "BOOTSTRAP_IP/32"
- action: Allow
protocol: UDP
source:
nets:
- "BOOTSTRAP_IP/32"
egress:
- action: Allow
protocol: TCP
destination:
nets:
- "BOOTSTRAP_IP/32"
- action: Allow
protocol: UDP
destination:
nets:
- "BOOTSTRAP_IP/32"
doNotTrack: false
preDNAT: false
applyOnForward: true
# rule 8: all UCP containers should allow all TCP and UDP connections to and from the maas VIP
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: ucp-maas
spec:
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
order: 0
ingress:
- action: Allow
protocol: TCP
source:
nets:
- "MAAS_VIP"
- action: Allow
protocol: UDP
source:
nets:
- "MAAS_VIP"
egress:
- action: Allow
protocol: TCP
destination:
nets:
- "MAAS_VIP"
- action: Allow
protocol: UDP
destination:
nets:
- "MAAS_VIP"
doNotTrack: false
preDNAT: false
applyOnForward: true
# rule 9: FAILSAFE: all UCP containers should allow incoming TCP connections on port 22 from anywhere (SSH)
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: failsafe-ssh
spec:
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
order: 0
ingress:
- action: Allow
protocol: TCP
destination:
ports: [22]
# rule 10: FAILSAFE: all UCP containers should allow incoming and outgoing ICMP connections to/from anywhere (pings & traces)
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: failsafe-icmp
spec:
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
order: 0
ingress:
- action: Allow
protocol: ICMP
egress:
- action: Allow
protocol: ICMP
doNotTrack: false
preDNAT: false
applyOnForward: true
# rule 11: FAILSAFE: all UCP containers should allow outgoing UDP connections on port 53 to anywhere (DNS)
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: failsafe-dns
spec:
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
order: 0
egress:
- action: Allow
protocol: UDP
destination:
ports: [53]
doNotTrack: false
preDNAT: false
applyOnForward: true
# rule 12: FAILSAFE: all UCP containers should allow outgoing UDP connections on port 67 to anywhere (DHCP)
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: failsafe-dhcp
spec:
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
order: 0
egress:
- action: Allow
protocol: UDP
destination:
ports: [67]
doNotTrack: false
preDNAT: false
applyOnForward: true
# rule 13: FAILSAFE: all UCP containers should allow outgoing UDP connections on port 123 to anywhere (NTP)
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: failsafe-ntp
spec:
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
order: 0
egress:
- action: Allow
protocol: UDP
destination:
ports: [123]
doNotTrack: false
preDNAT: false
applyOnForward: true
# rule 14: FAILSAFE: ipmi
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: failsafe-ipmi-egress
spec:
order: 13
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
egress:
- action: Allow
protocol: TCP
destination:
nets: ["IPMI_CIDR"]
ports:
- 22
- 80
- 443
- action: Allow
protocol: UDP
destination:
nets: ["IPMI_CIDR"]
ports:
- 623
doNotTrack: false
preDNAT: false
applyOnForward: true
# Rule 15
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: nc1-kvm-hosts-egress
spec:
order: 20
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
egress:
- action: Allow
protocol: TCP
source:
nets: ["OAM_CIDR"]
destination:
ports:
- 80
- 443
- 2378
- 4149
- 6443
- 6553
- 6666
- 6667
- 9099
- 10250
- 10255
- 10256
doNotTrack: false
preDNAT: false
applyOnForward: true
# Rule 16
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: vnc-traffic-ingress
spec:
order: 60
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
ingress:
- action: Allow
protocol: TCP
source:
nets: ["OAM_CIDR"]
destination:
ports:
- "5900:5999"
doNotTrack: false
preDNAT: true
applyOnForward: true
# Rule 17
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: vnc-traffic-egress
spec:
order: 5
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
egress:
- action: Allow
protocol: TCP
destination:
nets: ["OAM_CIDR"]
ports:
- "5900:5999"
doNotTrack: false
preDNAT: false
applyOnForward: true
# Rule 18
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: ksn-allow-icmp
spec:
order: 0
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'ksn'
applyOnForward: true
ingress:
- action: Allow
protocol: ICMP
egress:
- action: Allow
protocol: ICMP
# Rule 19
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: ksn-allow-to-ingress-vip
spec:
order: 0
selector: host == 'nc-control' && intf-alias == 'ksn'
applyOnForward: true
ingress:
# allow kubernetes ingress-controller ports
- action: Allow
protocol: TCP
destination:
ports:
- 80
- 443
nets:
- INGRESS_VIP
# Rule 20
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: ksn-allow-to-maas-vip
spec:
order: 0
selector: host == 'nc-control' && intf-alias == 'ksn'
applyOnForward: true
ingress:
# allow maas and minimirror ports
- action: Allow
protocol: TCP
destination:
ports:
- 53
- 80
- 8000
nets:
- MAAS_VIP
- action: Allow
protocol: UDP
destination:
ports:
- 514
- 53
- 514
nets:
- MAAS_VIP
# Rule 21
# TODO: move this to a corridor policy
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: ksn-allow-kubernetes-api
spec:
order: 0
selector: host == 'nc-control' && intf-alias == 'ksn'
applyOnForward: true
ingress:
# allow kubernetes api access
- action: Allow
protocol: TCP
destination:
ports:
- 6443
- 6553
nets:
- INGRESS_VIP
# Rule 22
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: ksn-allow-all-non-vip
spec:
order: 10000
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'ksn'
applyOnForward: true
ingress:
- action: Allow
destination:
notNets:
- INGRESS_VIP
- MAAS_VIP
egress:
- action: Allow
...

View File

@ -3,8 +3,6 @@ schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-apiserver
labels:
name: kubernetes-apiserver-global
layeringDefinition:
abstract: false
layer: global
@ -26,6 +24,14 @@ metadata:
dest:
path: .values.images.tags
# Kube-Apiserver Log Level
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .kubernetes_components.apiserver_log_level
dest:
path: .values.apiserver.logging.log_level
# IP addresses
- src:
schema: pegleg/CommonAddresses/v1
@ -110,24 +116,66 @@ metadata:
dest:
path: .values.conf.encryption_provider.content.resources
# Aggregation API config
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-agg-api
path: .
dest:
path: .values.conf.agg_api_ca.content
- src:
schema: deckhand/Certificate/v1
name: apiserver-proxy
path: .
dest:
path: .values.conf.apiserver_proxy_cert.content
- src:
schema: deckhand/CertificateKey/v1
name: apiserver-proxy
path: .
dest:
path: .values.conf.apiserver_proxy_key.content
data:
chart_name: apiserver
release: kubernetes-apiserver
namespace: kube-system
protected:
continue_processing: true
continue_processing: false
wait:
timeout: 600
timeout: 900
# Don't want to wait on the keyrotation job during bootstrap
resources:
# Wait on the anchor daemonset rolling update in order to
# allow time for the static pods to start updating, which should then be
# handled by the below pod wait.
# If the apiserver static pods are being updated, this should quarantine
# down time to only affect the armada wait logic, and not any
# tiller-apiserver interaction, which can cause releases to be marked
# FAILED, which prevents deployment progress if it affects protected
# charts.
- type: daemonset
- type: pod
labels:
release_group: airship-kubernetes-apiserver
release_group: clcp-kubernetes-apiserver
native:
enabled: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-apiserver
release_group: clcp-kubernetes-apiserver
values:
pod:
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: RollingUpdate
kubernetes-apiserver-anchor:
enabled: true
min_ready_seconds: 0
max_unavailable: 50%
apiserver:
etcd:
endpoints: https://127.0.0.1:2378
@ -140,17 +188,15 @@ data:
- --authorization-mode=Node,RBAC
- --service-cluster-ip-range=SERVICE_CIDR
- --service-node-port-range=SERVICE_NODE_PORT_RANGE
- --endpoint-reconciler-type=lease
- --feature-gates=PodShareProcessNamespace=true
- --v=3
- --feature-gates=PodShareProcessNamespace=true,TaintBasedEvictions=false
conf:
encryption_provider:
file: encryption_provider.yaml
command_options:
- '--experimental-encryption-provider-config=/etc/kubernetes/apiserver/encryption_provider.yaml'
- '--encryption-provider-config=/etc/kubernetes/apiserver/encryption_provider.yaml'
content:
kind: EncryptionConfig
apiVersion: v1
kind: EncryptionConfiguration
apiVersion: apiserver.config.k8s.io/v1
eventconfig:
file: eventconfig.yaml
content:
@ -171,17 +217,37 @@ data:
plugins:
- name: EventRateLimit
path: eventconfig.yaml
agg_api_ca:
file: agg-api-ca.pem
command_options:
- '--requestheader-client-ca-file=/etc/kubernetes/apiserver/agg-api-ca.pem'
- '--requestheader-extra-headers-prefix=X-Remote-Extra-'
- '--requestheader-group-headers=X-Remote-Group'
- '--requestheader-username-headers=X-Remote-User'
- '--requestheader-allowed-names=aggregator'
content: null
apiserver_proxy_cert:
file: 'apiserver-proxy-cert.pem'
command_options:
- '--proxy-client-cert-file=/etc/kubernetes/apiserver/apiserver-proxy-cert.pem'
content: null
apiserver_proxy_key:
file: 'apiserver-proxy-key.pem'
command_options:
- '--proxy-client-key-file=/etc/kubernetes/apiserver/apiserver-proxy-key.pem'
content: null
dependencies:
- kubernetes-apiserver-htk
- apiserver-htk
...
---
## Helm toolkit
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-apiserver-htk
name: apiserver-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
@ -189,10 +255,16 @@ metadata:
path: .charts.kubernetes.apiserver-htk
dest:
path: .source
storagePolicy: cleartext
data:
chart_name: kubernetes-apiserver-htk
release: kubernetes-apiserver-htk
namespace: kubernetes-apiserver-htk
chart_name: apiserver-htk
release: apiserver-htk
namespace: apiserver-htk
timeout: 600
wait:
timeout: 600
upgrade:
no_hooks: true
values: {}
dependencies: []
...

View File

@ -3,8 +3,6 @@ schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-controller-manager
labels:
name: kubernetes-controller-manager-global
layeringDefinition:
abstract: false
layer: global
@ -26,6 +24,14 @@ metadata:
dest:
path: .values.images.tags
# Kube-controller-manager Log Level
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .kubernetes_components.controller_manager_log_level
dest:
path: .values.controller_manager.logging.log_level
# IP addresses
- src:
schema: pegleg/CommonAddresses/v1
@ -44,14 +50,14 @@ metadata:
name: common-addresses
path: .kubernetes.pod_cidr
dest:
path: .values.command_prefix[1]
path: .values.command_prefix[2]
pattern: SUB_POD_CIDR
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.service_cidr
dest:
path: .values.command_prefix[2]
path: .values.command_prefix[3]
pattern: SUB_SERVICE_CIDR
# CA
@ -89,39 +95,51 @@ data:
release: kubernetes-controller-manager
namespace: kube-system
protected:
continue_processing: true
continue_processing: false
wait:
timeout: 600
timeout: 900
labels:
release_group: airship-kubernetes-controller-manager
release_group: clcp-kubernetes-controller-manager
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-controller-manager
release_group: clcp-kubernetes-controller-manager
values:
command_prefix:
- /controller-manager
- /hyperkube
- kube-controller-manager
- --cluster-cidr=SUB_POD_CIDR
- --service-cluster-ip-range=SUB_SERVICE_CIDR
- --node-monitor-period=5s
- --node-monitor-grace-period=20s
- --pod-eviction-timeout=60s
- --terminated-pod-gc-threshold=1000
network:
kubernetes_netloc: 127.0.0.1:6553
pod:
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: RollingUpdate
kubernetes-controller-manager-anchor:
enabled: true
min_ready_seconds: 0
max_unavailable: 50%
dependencies:
- kubernetes-controller-manager-htk
- controller-manager-htk
...
---
# Helm toolkit
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-controller-manager-htk
name: controller-manager-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
@ -129,10 +147,16 @@ metadata:
path: .charts.kubernetes.controller-manager-htk
dest:
path: .source
storagePolicy: cleartext
data:
chart_name: kubernetes-controller-manager-htk
release: kubernetes-controller-manager-htk
namespace: kubernetes-controller-manager-htk
chart_name: controller-manager-htk
release: controller-manager-htk
namespace: controller-manager-htk
timeout: 600
wait:
timeout: 600
upgrade:
no_hooks: true
values: {}
dependencies: []
...

View File

@ -3,8 +3,6 @@ schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-scheduler
labels:
name: kubernetes-scheduler-global
layeringDefinition:
abstract: false
layer: global
@ -26,6 +24,14 @@ metadata:
dest:
path: .values.images.tags
# Kube-scheduler Log Level
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .kubernetes_components.scheduler_log_level
dest:
path: .values.scheduler.logging.log_level
# CA
- src:
schema: deckhand/CertificateAuthority/v1
@ -53,32 +59,41 @@ data:
release: kubernetes-scheduler
namespace: kube-system
protected:
continue_processing: true
continue_processing: false
wait:
timeout: 600
timeout: 900
labels:
release_group: airship-kubernetes-scheduler
release_group: clcp-kubernetes-scheduler
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-scheduler
release_group: clcp-kubernetes-scheduler
values:
network:
kubernetes_netloc: 127.0.0.1:6553
pod:
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: RollingUpdate
scheduler:
enabled: true
min_ready_seconds: 0
max_unavailable: 50%
dependencies:
- kubernetes-scheduler-htk
- scheduler-htk
...
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-scheduler-htk
name: scheduler-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
@ -86,10 +101,16 @@ metadata:
path: .charts.kubernetes.scheduler-htk
dest:
path: .source
storagePolicy: cleartext
data:
chart_name: kubernetes-scheduler-htk
release: kubernetes-scheduler-htk
namespace: kubernetes-scheduler-htk
chart_name: scheduler-htk
release: scheduler-htk
namespace: scheduler-htk
timeout: 600
wait:
timeout: 600
upgrade:
no_hooks: true
values: {}
dependencies: []
...

View File

@ -71,27 +71,33 @@ metadata:
dest:
path: .values.conf.coredns.corefile
pattern: '(UPSTREAM2)'
data:
chart_name: coredns
release: coredns
namespace: kube-system
protected:
continue_processing: false
wait:
timeout: 600
timeout: 900
labels:
release_group: airship-coredns
release_group: clcp-coredns
test:
enabled: true
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-coredns
release_group: clcp-coredns
values:
conf:
coredns:
corefile: |
.:53 {
errors
loadbalance round_robin
health
autopath @kubernetes
kubernetes CLUSTER_DOMAIN SERVICE_CIDR POD_CIDR {
@ -100,18 +106,41 @@ data:
upstream UPSTREAM1
upstream UPSTREAM2
}
prometheus :9153
prometheus :9253
forward . UPSTREAM1 UPSTREAM2
cache 30
log . {
class denial error
}
}
test:
names_to_resolve:
- kubernetes.default.svc.cluster.local
monitoring:
prometheus:
enabled: true
pod:
# TODO: replicas can be removed once we switch coredns to
# DaemonSet-only. It will be deployed with both DaemonSet
# and Deployment-managed pods as we transition to DaemonSet.
replicas:
coredns: 2
labels:
coredns:
# NOTE(mb874d): This is the label specified in the IPDD
node_selector_key: kube-dns
node_selector_value: enabled
manifests:
# NOTE(mc981n) Delay adding the coredns daemonset until after all
# the nodes have bootstraped/joined
daemonset: false
# TODO: `deployment` can be set to false once we switch coredns to
# DaemonSet-only. It will be deployed with both DaemonSet
# and Deployment-managed pods as we transition to DaemonSet.
deployment: true
dependencies:
- coredns-htk
...
---
schema: armada/Chart/v1
metadata:
@ -120,7 +149,6 @@ metadata:
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
@ -128,22 +156,16 @@ metadata:
path: .charts.kubernetes.coredns-htk
dest:
path: .source
storagePolicy: cleartext
data:
chart_name: coredns-htk
release: coredns-htk
namespace: coredns-htk
values:
pod:
# TODO: replicas can be removed once we switch coredns to
# DaemonSet-only. It will be deployed with both DaemonSet
# and Deployment-managed pods as we transition to DaemonSet.
replicas:
coredns: 2
manifests:
daemonset: true
# TODO: `deployment` can be set to false once we switch coredns to
# DaemonSet-only. It will be deployed with both DaemonSet
# and Deployment-managed pods as we transition to DaemonSet.
deployment: true
timeout: 600
wait:
timeout: 600
upgrade:
no_hooks: true
values: {}
dependencies: []
...

View File

@ -79,19 +79,41 @@ data:
release: kubernetes-etcd
namespace: kube-system
protected:
continue_processing: true
continue_processing: false
wait:
timeout: 600
timeout: 1800
native:
enabled: false
resources:
- type: "daemonset"
- type: "pod"
labels:
release_group: airship-kubernetes-etcd
release_group: clcp-kubernetes-etcd
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-etcd
release_group: clcp-kubernetes-etcd
values:
pod:
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: RollingUpdate
anchor:
enabled: true
min_ready_seconds: 0
max_unavailable: 1
env:
etcd:
# default ETCD_HEARTBEAT_INTERVAL: 100
ETCD_HEARTBEAT_INTERVAL: 100
# default ETCD_ELECTION_TIMEOUT: 1000
ETCD_ELECTION_TIMEOUT: 1000
# default ETCD_SNAPSHOT_COUNT: 100000
ETCD_SNAPSHOT_COUNT: 10000
labels:
anchor:
node_selector_key: kubernetes-etcd
@ -110,17 +132,22 @@ data:
name: service_peer
port: 2380
target_port: 2380
jobs:
etcd_backup:
cron: "0 0 * * *"
backup:
no_backup_keep: 3
dependencies:
- kubernetes-etcd-htk
- etcd-htk
...
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-etcd-htk
name: etcd-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
@ -128,10 +155,16 @@ metadata:
path: .charts.kubernetes.etcd-htk
dest:
path: .source
storagePolicy: cleartext
data:
chart_name: kubernetes-etcd-htk
release: kubernetes-etcd-htk
namespace: kubernetes-etcd-htk
chart_name: etcd-htk
release: etcd-htk
namespace: etcd-htk
timeout: 600
wait:
timeout: 600
upgrade:
no_hooks: true
values: {}
dependencies: []
...

View File

@ -2,12 +2,13 @@
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-promenade
name: kubernetes-falco
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: Promenade
description: Kubernetes falco
sequenced: true
chart_group:
- ucp-promenade
- kubernetes-falco

View File

@ -0,0 +1,67 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-falco
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.falco
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.falco
dest:
path: .values.images.tags
data:
chart_name: falco
release: kubernetes-falco
namespace: kube-system
protected:
continue_processing: false
wait:
timeout: 600
labels:
release_group: clcp-kubernetes-falco
dependencies:
- falco-htk
...
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: falco-htk
layeringDefinition:
abstract: false
layer: global
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.kubernetes.falco-htk
dest:
path: .source
storagePolicy: cleartext
data:
chart_name: falco-htk
release: falco-htk
namespace: falco-htk
timeout: 600
wait:
timeout: 600
upgrade:
no_hooks: true
values: {}
dependencies: []
...

View File

@ -3,8 +3,6 @@ schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: haproxy
labels:
name: haproxy-global
layeringDefinition:
abstract: false
layer: global
@ -41,22 +39,38 @@ data:
release: haproxy
namespace: kube-system
protected:
continue_processing: true
continue_processing: false
wait:
timeout: 600
timeout: 1800
labels:
release_group: airship-haproxy
release_group: clcp-haproxy
test:
enabled: true
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-haproxy
release_group: clcp-haproxy
values:
pod:
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: RollingUpdate
haproxy_anchor:
enabled: true
min_ready_seconds: 0
max_unavailable: '50%'
security_context:
haproxy:
pod:
runAsUser: 0
conf:
anchor:
kubernetes_url: https://KUBERNETES_IP:443
enable_cleanup: false
services:
default:
kubernetes:
@ -72,6 +86,7 @@ data:
- option tcp-check
- option redispatch
kube-system:
kubernetes-apiserver: null
kubernetes-etcd:
server_opts: "check port 2379"
conf_parts:
@ -86,6 +101,7 @@ data:
- option redispatch
dependencies:
- haproxy-htk
...
---
schema: armada/Chart/v1
metadata:
@ -94,7 +110,6 @@ metadata:
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
@ -102,10 +117,16 @@ metadata:
path: .charts.kubernetes.haproxy-htk
dest:
path: .source
storagePolicy: cleartext
data:
chart_name: haproxy-htk
release: haproxy-htk
namespace: haproxy-htk
timeout: 600
wait:
timeout: 600
upgrade:
no_hooks: true
values: {}
dependencies: []
...

View File

@ -2,12 +2,12 @@
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: global-ingress-kube-system
name: ingress-kube-system
labels:
ingress: kube-system
name: ingress-kube-system-global
name: cluster-ingress-global
layeringDefinition:
abstract: true
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
@ -29,10 +29,12 @@ data:
chart_name: ingress-kube-system
release: ingress-kube-system
namespace: kube-system
protected:
continue_processing: false
wait:
timeout: 300
timeout: 600
labels:
release_group: airship-ingress-kube-system
release_group: clcp-ingress-kube-system
install:
no_hooks: false
upgrade:
@ -41,7 +43,7 @@ data:
delete:
- type: job
labels:
release_group: airship-ingress-kube-system
release_group: clcp-ingress-kube-system
values:
labels:
server:
@ -52,27 +54,32 @@ data:
node_selector_value: enabled
deployment:
mode: cluster
type: Deployment
type: DaemonSet
network:
host_namespace: true
ingress:
annotations:
nginx.ingress.kubernetes.io/proxy-read-timeout: "603"
nginx.ingress.kubernetes.io/configuration-snippet: |
more_set_headers "X-Content-Type-Options: 'nosniff'";
more_set_headers "X-Frame-Options: 'deny'";
more_set_headers "Content-Security-Policy: script-src 'self'";
more_set_headers "X-Permitted-Cross-Domain-Policies: none";
more_clear_headers "Server";
pod:
replicas:
ingress: 1
error_page: 1
error_page: 2
dependencies:
- ingress-kube-system-htk
- ingress-htk
...
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ingress-kube-system-htk
name: ingress-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
@ -80,9 +87,16 @@ metadata:
path: .charts.kubernetes.ingress-htk
dest:
path: .source
storagePolicy: cleartext
data:
chart_name: ingress-kube-system-htk
release: ingress-kube-system-htk
namespace: ingress-kube-system-htk
chart_name: ingress-htk
release: ingress-htk
namespace: ingress-htk
timeout: 600
wait:
timeout: 600
upgrade:
no_hooks: true
values: {}
dependencies: []
...

View File

@ -3,8 +3,6 @@ schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-proxy
labels:
name: kubernetes-proxy-global
layeringDefinition:
abstract: false
layer: global
@ -26,13 +24,21 @@ metadata:
dest:
path: .values.images.tags
# Kube-proxy Log Level
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .kubernetes_components.proxy_log_level
dest:
path: .values.proxy.logging.log_level
# IP Addresses
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.pod_cidr
dest:
path: .values.command_prefix[1]
path: .values.command_prefix[2]
pattern: POD_CIDR
# Secrets
@ -46,20 +52,36 @@ data:
chart_name: proxy
release: kubernetes-proxy
namespace: kube-system
protected:
continue_processing: false
wait:
timeout: 600
timeout: 1800
labels:
release_group: airship-kubernetes-proxy
release_group: clcp-kubernetes-proxy
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: airship-kubernetes-proxy
release_group: clcp-kubernetes-proxy
values:
pod:
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: RollingUpdate
proxy:
enabled: true
min_ready_seconds: 0
max_unavailable: '50%'
mandatory_access_control:
type: apparmor
kubernetes-proxy:
proxy: localhost/kubeproxy-v1
command_prefix:
- /proxy
- /hyperkube
- kube-proxy
- --cluster-cidr=POD_CIDR
- --proxy-mode=iptables
kube_service:
@ -67,18 +89,22 @@ data:
port: 6553
livenessProbe:
whitelist:
- tiller-deploy
- 'tiller-deploy'
- 'kube-controller-manager'
- 'kube-controller-manager-discovery'
- 'kube-scheduler'
- 'kube-scheduler-discovery'
dependencies:
- kubernetes-proxy-htk
- proxy-htk
...
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-proxy-htk
name: proxy-htk
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/SoftwareVersions/v1
@ -86,9 +112,16 @@ metadata:
path: .charts.kubernetes.proxy-htk
dest:
path: .source
storagePolicy: cleartext
data:
chart_name: kubernetes-proxy-htk
release: kubernetes-proxy-htk
namespace: kubernetes-proxy-htk
chart_name: proxy-htk
release: proxy-htk
namespace: proxy-htk
timeout: 600
wait:
timeout: 600
upgrade:
no_hooks: true
values: {}
dependencies: []
...

View File

@ -48,7 +48,6 @@ metadata:
dest:
path: .values.endpoints.ceph_mon
data:
chart_name: osh-infra-ceph-config
release: osh-infra-ceph-config
@ -56,7 +55,9 @@ data:
wait:
timeout: 900
labels:
release_group: airship-osh-infra-ceph-config
release_group: clcp-osh-infra-ceph-config
resources:
- type: job
install:
no_hooks: false
upgrade:
@ -65,7 +66,13 @@ data:
delete:
- type: job
labels:
release_group: airship-osh-infra-ceph-config
release_group: clcp-osh-infra-ceph-config
- type: pod
labels:
release_group: clcp-osh-infra-ceph-config
component: test
test:
enabled: true
values:
labels:
job:
@ -79,7 +86,6 @@ data:
client_secrets: true
rbd_provisioner: false
cephfs_provisioner: false
rgw_keystone_user_and_endpoints: false
bootstrap:
enabled: false
storageclass:
@ -90,5 +96,5 @@ data:
cephfs:
provision_storage_class: false
dependencies:
- ceph-htk
- ceph-htk
...

View File

@ -3,6 +3,8 @@ schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: osh-infra-ceph-config
labels:
name: osh-infra-ceph-config-global
layeringDefinition:
abstract: false
layer: global

View File

@ -3,6 +3,8 @@ schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: osh-infra-dashboards
labels:
name: osh-infra-dashboards-global
layeringDefinition:
abstract: false
layer: global

View File

@ -61,7 +61,7 @@ metadata:
name: osh_infra_elasticsearch_admin_password
path: .
# LDAP Details
# LDAP Mech ID Details
- src:
schema: pegleg/AccountCatalogue/v1
name: osh_infra_service_accounts
@ -72,7 +72,7 @@ metadata:
path: .values.endpoints.ldap.auth.admin.password
src:
schema: deckhand/Passphrase/v1
name: osh_keystone_ldap_password
name: osh_keystone_ldap_mechid_password
path: .
data:
chart_name: kibana
@ -81,7 +81,7 @@ data:
wait:
timeout: 900
labels:
release_group: airship-kibana
release_group: clcp-kibana
install:
no_hooks: false
upgrade:
@ -90,32 +90,153 @@ data:
delete:
- type: job
labels:
release_group: airship-kibana
release_group: clcp-kibana
create: []
post:
create: []
values:
conf:
apache:
host: |
<VirtualHost *:80>
ProxyRequests off
ProxyPreserveHost On
<Location />
ProxyPass http://localhost:{{ tuple "kibana" "internal" "kibana" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}/
ProxyPassReverse http://localhost:{{ tuple "kibana" "internal" "kibana" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}/
</Location>
<Proxy *>
AuthName "Kibana"
AuthType Basic
AuthBasicProvider file ldap
AuthUserFile /usr/local/apache2/conf/.htpasswd
AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}
AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}
AuthLDAPURL {{ tuple "ldap" "public" "ldap" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}
Require valid-user
</Proxy>
</VirtualHost>
create_kibana_indexes:
enabled: true
indexes:
- airship
- audit_tsee
- auth
- calico
- ceph
- flows
- jenkins
- journal
- kernel_syslog
- libvirt
- logstash
- lma
- openstack
- utility_access
- openvswitch
default_index: logstash
kibana:
elasticsearch:
pingTimeout: 30000
requestTimeout: 60000
startupTimeout: 15000
httpd: |
ServerRoot "/usr/local/apache2"
Listen 80
LoadModule mpm_event_module modules/mod_mpm_event.so
LoadModule authn_file_module modules/mod_authn_file.so
LoadModule authn_core_module modules/mod_authn_core.so
LoadModule authz_host_module modules/mod_authz_host.so
LoadModule authz_groupfile_module modules/mod_authz_groupfile.so
LoadModule authz_user_module modules/mod_authz_user.so
LoadModule authz_core_module modules/mod_authz_core.so
LoadModule access_compat_module modules/mod_access_compat.so
LoadModule auth_basic_module modules/mod_auth_basic.so
LoadModule ldap_module modules/mod_ldap.so
LoadModule authnz_ldap_module modules/mod_authnz_ldap.so
LoadModule reqtimeout_module modules/mod_reqtimeout.so
LoadModule filter_module modules/mod_filter.so
LoadModule proxy_html_module modules/mod_proxy_html.so
LoadModule log_config_module modules/mod_log_config.so
LoadModule env_module modules/mod_env.so
LoadModule headers_module modules/mod_headers.so
LoadModule setenvif_module modules/mod_setenvif.so
LoadModule version_module modules/mod_version.so
LoadModule proxy_module modules/mod_proxy.so
LoadModule proxy_connect_module modules/mod_proxy_connect.so
LoadModule proxy_http_module modules/mod_proxy_http.so
LoadModule proxy_balancer_module modules/mod_proxy_balancer.so
LoadModule slotmem_shm_module modules/mod_slotmem_shm.so
LoadModule slotmem_plain_module modules/mod_slotmem_plain.so
LoadModule unixd_module modules/mod_unixd.so
LoadModule status_module modules/mod_status.so
LoadModule autoindex_module modules/mod_autoindex.so
<IfModule unixd_module>
User daemon
Group daemon
</IfModule>
<Directory />
AllowOverride none
Require all denied
</Directory>
<Files ".ht*">
Require all denied
</Files>
ErrorLog /dev/stderr
LogLevel warn
<IfModule log_config_module>
LogFormat "%a %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" proxy
LogFormat "%h %l %u %t \"%r\" %>s %b" common
<IfModule logio_module>
LogFormat "%a %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio
</IfModule>
SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded
CustomLog /dev/stdout common
CustomLog /dev/stdout combined
CustomLog /dev/stdout proxy env=forwarded
</IfModule>
<Directory "/usr/local/apache2/cgi-bin">
AllowOverride None
Options None
Require all granted
</Directory>
<IfModule headers_module>
RequestHeader unset Proxy early
</IfModule>
<IfModule proxy_html_module>
Include conf/extra/proxy-html.conf
</IfModule>
LDAPVerifyServerCert Off
LDAPTrustedGlobalCert CA_BASE64 /dev/null
<VirtualHost *:80>
<Location />
ProxyPass http://localhost:{{ tuple "kibana" "internal" "kibana" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}/
ProxyPassReverse http://localhost:{{ tuple "kibana" "internal" "kibana" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}/
</Location>
<Proxy *>
AuthName "Kibana"
AuthType Basic
AuthBasicProvider file ldap
AuthUserFile /usr/local/apache2/conf/.htpasswd
AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }}
AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }}
AuthLDAPURL {{ tuple "ldap" "public" "ldap" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" | quote }}
Require valid-user
</Proxy>
</VirtualHost>
pod:
affinity:
anti:
type:
dashboard: requiredDuringSchedulingIgnoredDuringExecution
weight:
default: 100
replicas:
kibana: 3
resources:
enabled: true
apache_proxy:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "100m"
kibana:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "4096Mi"
cpu: "2000m"
lifecycle:
upgrades:
deployments:
rolling_update:
max_unavailable: 50%
labels:
kibana:
node_selector_key: openstack-control-plane
@ -123,6 +244,14 @@ data:
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
dependencies:
- osh-infra-helm-toolkit
network:
kibana:
ingress:
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/affinity: cookie
nginx.ingress.kubernetes.io/session-cookie-name: kube-ingress-session-kibana
nginx.ingress.kubernetes.io/session-cookie-hash: sha1
nginx.ingress.kubernetes.io/session-cookie-expires: "600"
nginx.ingress.kubernetes.io/session-cookie-max-age: "600"
...

Some files were not shown because too many files have changed in this diff Show More