Add integration tests and fix BMO integration

The commit adds integration test that includes baremetal operator
- test is driven by airshipctl phases
- Deploys BMO from airshipctl repository as a phase
- Verifies that after VINO-CR is deployed BMHs are created
- Verifies that BMO can install an image into those BMHs using pxe
- Various fixes that allow to integrate with BMO
- Disables password authentication for BMHs untill we have a fix
- BMO fails to authenticate against simple auth provided by nginx
- Removes unit-tests for BMO creation. The whole approach of
requesting VMs from vino-builder should be changed. When we have
final view of the process, we will well define vino-builder API
and add unit-tests to vino controller and builder

Change-Id: I51976ca20811b227ecb069c4ffd81d8afe086e57
changes/32/790532/3
Kostiantyn Kalynovskyi 1 year ago
parent 9e920c9367
commit 31f5e96402
  1. 44
      config/manager/daemonset-template.yaml
  2. 52
      config/manager/flavor-templates.yaml
  3. 2
      config/manager/flavors.yaml
  4. 2
      config/phases/baremetal-operator/cleanup/kustomization.yaml
  5. 23
      config/phases/baremetal-operator/cleanup/smp.yaml
  6. 33
      config/phases/baremetal-operator/env-vars.yaml
  7. 34
      config/phases/baremetal-operator/image-versions.yaml
  8. 15
      config/phases/baremetal-operator/ironic-var-substitution.yaml
  9. 12
      config/phases/baremetal-operator/kustomization.yaml
  10. 22
      config/phases/baremetal-operator/watched_namespace.yaml
  11. 12
      config/phases/cluster-map.yaml
  12. 72
      config/phases/executors.yaml
  13. 6
      config/phases/kustomization.yaml
  14. 2
      config/phases/metadata.yaml
  15. 35
      config/phases/phase-helpers/check-bmh.sh
  16. 26
      config/phases/phase-helpers/check-daemonset.sh
  17. 25
      config/phases/phase-helpers/delete-vino-cr.sh
  18. 27
      config/phases/phase-helpers/kustomization.yaml
  19. 15
      config/phases/phase-helpers/node-labler.sh
  20. 24
      config/phases/phase-helpers/wait-for-bmh.sh
  21. 103
      config/phases/phases.yaml
  22. 16
      config/phases/plans.yaml
  23. 4
      config/samples/kustomization.yaml
  24. 56
      config/samples/vino_cr.yaml
  25. 79
      config/samples/vino_cr_4_workers_1_cp.yaml
  26. 2
      pkg/api/v1/vino_types.go
  27. 179
      pkg/controllers/bmh_test.go
  28. 45
      pkg/controllers/vino_controller.go
  29. 11
      pkg/controllers/vino_controller_test.go
  30. 293
      pkg/managers/bmh.go
  31. 29
      playbooks/integration-test-airshipctl.yaml
  32. 3
      playbooks/integration-test-bash.yaml
  33. 31
      tools/deployment/configure-airship.sh
  34. 37
      tools/deployment/configure-bridges.sh
  35. 8
      tools/deployment/install-airship.sh
  36. 6
      tools/deployment/run-test-plan.sh
  37. 10
      tools/deployment/test-cr.sh
  38. 12
      zuul.d/jobs.yaml
  39. 9
      zuul.d/projects.yaml

@ -49,20 +49,48 @@ spec:
mountPath: /etc/libvirt/hooks
- name: etc-storage
mountPath: /etc/libvirt/storage
- name: var-lib-vino
mountPath: /var/lib/vino
- name: sushy
image: quay.io/metal3-io/sushy-tools
imagePullPolicy: IfNotPresent
command: ["/usr/local/bin/sushy-emulator", "--port", "5000"]
command: ["/usr/local/bin/sushy-emulator", "-i", "::", "--debug", "--port", "8000"]
volumeMounts:
- name: var-run-libvirt
mountPath: /var/run/libvirt
- name: var-lib-libvirt
mountPath: /var/lib/libvirt
- name: vino-reverse-proxy
image: quay.io/airshipit/vino-reverse-proxy
ports:
- containerPort: 8000
hostPort: 8000
readinessProbe:
httpGet:
path: /redfish/v1/Systems
host: 127.0.0.1
port: 8000
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet:
path: /redfish/v1/Systems
host: 127.0.0.1
port: 8000
initialDelaySeconds: 10
periodSeconds: 20
# - name: vino-reverse-proxy
# image: quay.io/airshipit/vino-reverse-proxy
# ports:
# - containerPort: 8000
# hostPort: 8000
# readinessProbe:
# tcpSocket:
# port: 8000
# host: 127.0.0.1
# initialDelaySeconds: 10
# periodSeconds: 5
# livenessProbe:
# tcpSocket:
# port: 8000
# host: 127.0.0.1
# initialDelaySeconds: 30
# periodSeconds: 30
- name: labeler
image: quay.io/airshipit/nodelabeler
imagePullPolicy: IfNotPresent
@ -176,3 +204,7 @@ spec:
hostPath:
path: /etc/vino-hooks
type: DirectoryOrCreate
- name: var-lib-vino
hostPath:
path: /var/lib/vino
type: DirectoryOrCreate

@ -79,20 +79,22 @@ flavorTemplates:
</interface>
{% endfor %}
<serial type="pty">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="isa-serial" port="0">
<model name="isa-serial"/>
</target>
<alias name="serial0"/>
<serial type='file'>
<source path='/var/lib/libvirt/{{ nodename }}-console.log'/>
</serial>
<console type="pty" tty="/dev/pts/3">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="serial" port="0"/>
<alias name="serial0"/>
<serial type='pty'/>
<console type='file'>
<source path='/var/lib/libvirt/{{ nodename }}-console.log'/>
<target type='serial'/>
</console>
{% if domain.enable_vnc | default(false) %}
<graphics type='vnc' autoport='yes' listen='0.0.0.0'>
<listen type='address' address='0.0.0.0'/>
</graphics>
{% endif %}
<memballoon model="virtio">
<stats period="10"/>
<alias name="balloon0"/>
@ -193,20 +195,22 @@ flavorTemplates:
</interface>
{% endfor %}
<serial type="pty">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="isa-serial" port="0">
<model name="isa-serial"/>
</target>
<alias name="serial0"/>
<serial type='file'>
<source path='/var/lib/libvirt/{{ nodename }}-console.log'/>
</serial>
<console type="pty" tty="/dev/pts/3">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="serial" port="0"/>
<alias name="serial0"/>
<serial type='pty'/>
<console type='file'>
<source path='/var/lib/libvirt/{{ nodename }}-console.log'/>
<target type='serial'/>
</console>
{% if domain.enable_vnc | default(false) %}
<graphics type='vnc' autoport='yes' listen='0.0.0.0'>
<listen type='address' address='0.0.0.0'/>
</graphics>
{% endif %}
<memballoon model="virtio">
<stats period="10"/>
<alias name="balloon0"/>

@ -2,10 +2,8 @@ flavors:
master:
vcpus: 1
memory: 4
hugepages: true
rootSize: 30
worker:
vcpus: 1
memory: 2
hugepages: true
rootSize: 10

@ -0,0 +1,23 @@
apiVersion: builtin
kind: PatchStrategicMergeTransformer
metadata:
name: bmo-cleanup
patches: |-
---
apiVersion: airshipit.org/v1alpha1
kind: VersionsCatalogue
metadata:
name: versions-airshipctl
$patch: delete
---
apiVersion: airshipit.org/v1alpha1
kind: NetworkCatalogue
metadata:
name: networking
$patch: delete
---
apiVersion: airshipit.org/v1alpha1
kind: VariableCatalogue
metadata:
name: env-vars-catalogue
$patch: delete

@ -0,0 +1,33 @@
apiVersion: airshipit.org/v1alpha1
kind: Templater
metadata:
name: env-vars-template
labels:
airshipit.org/deploy-k8s: "false"
annotations:
config.kubernetes.io/function: |-
container:
image: quay.io/airshipit/templater:v2
envs:
- HTTP_PROXY
- HTTPS_PROXY
- http_proxy
- https_proxy
- NO_PROXY
- no_proxy
template: |
---
apiVersion: airshipit.org/v1alpha1
kind: VariableCatalogue
metadata:
labels:
airshipit.org/deploy-k8s: "false"
name: env-vars-catalogue
env:
HTTP_PROXY: '{{ env "HTTP_PROXY" }}'
HTTPS_PROXY: '{{ env "HTTPS_PROXY" }}'
http_proxy: '{{ env "http_proxy" }}'
https_proxy: '{{ env "https_proxy" }}'
NO_PROXY: '{{ env "NO_PROXY" }}'
no_proxy: '{{ env "no_proxy" }}'
WATCH_NAMESPACE: ""

@ -0,0 +1,34 @@
# The default versions catalogue for functions hosted in the airshipctl project.
# These values can be overridden at the site, type, etc levels as appropriate.
apiVersion: airshipit.org/v1alpha1
kind: VersionsCatalogue
metadata:
name: versions-airshipctl
labels:
airshipit.org/deploy-k8s: "false"
spec:
images:
baremetal_operator:
ironic: # ironic Deployment
init_bootstrap:
image: quay.io/centos/centos:8.3.2011
init_images:
image: quay.io/airshipit/ipa:latest
qcow_bundle:
image: quay.io/airshipit/qcow-bundle:latest-ubuntu_focal
dnsmasq:
image: quay.io/metal3-io/ironic:capm3-v0.4.0
httpd:
image: quay.io/metal3-io/ironic:capm3-v0.4.0
ironic:
image: quay.io/metal3-io/ironic:capm3-v0.4.0
ironic_inspector:
image: quay.io/metal3-io/ironic-inspector:capm3-v0.4.0
metal3_baremetal_operator: # metal3-baremetal-operator Deployment
baremetal_operator:
image: quay.io/metal3-io/baremetal-operator:capm3-v0.4.0
ironic_proxy:
image: alpine/socat
ironic_inspector_proxy:
image: alpine/socat

@ -0,0 +1,15 @@
apiVersion: airshipit.org/v1alpha1
kind: NetworkCatalogue
metadata:
name: networking
spec:
ironic:
provisioningIp: "172.3.3.1"
dhcpRange: "172.3.3.200,172.3.3.250"
ironicAutomatedClean: ""
httpPort: ""
ironicFastTrack: ""
deployKernelUrl: ""
deployRamdiskUrl: ""
ironicEndpoint: ""
ironicInspectorEndpoint: ""

@ -0,0 +1,12 @@
resources:
- ironic-var-substitution.yaml
- image-versions.yaml
- ../../../../airshipctl/manifests/function/baremetal-operator/
transformers:
- ../../../../airshipctl/manifests/function/baremetal-operator/replacements
- watched_namespace.yaml
- cleanup
generators:
- env-vars.yaml

@ -0,0 +1,22 @@
# These rules inject host-specific information from the `host-catalogue`
# into the hostgenerator-m3 function's Template plugin config.
apiVersion: airshipit.org/v1alpha1
kind: ReplacementTransformer
metadata:
name: watched_namespace_change
annotations:
config.kubernetes.io/function: |-
container:
image: quay.io/airshipit/replacement-transformer:v2
replacements:
# Container versions for the ironic Deployment
- source:
objref:
kind: VariableCatalogue
name: env-vars-catalogue
fieldref: env.WATCH_NAMESPACE
target:
objref:
kind: Deployment
name: metal3-baremetal-operator
fieldrefs: ["{.spec.template.spec.containers[?(.name == 'baremetal-operator')].env[?(.name == 'WATCH_NAMESPACE')].value}"]

@ -0,0 +1,12 @@
---
apiVersion: airshipit.org/v1alpha1
kind: ClusterMap
metadata:
name: main-map
map:
minikube:
kubeconfigSources:
- type: "filesystem"
filesystem:
path: ~/.kube/config
contextName: minikube

@ -0,0 +1,72 @@
---
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
metadata:
name: default-applier
config:
waitOptions:
timeout: 1000
pruneOptions:
prune: false
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: node-labels-rack-server
spec:
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
envVars:
- NODE_LABELS=airshipit.org/server=s1 airshipit.org/rack=r1
configRef:
kind: ConfigMap
name: node-labler
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: check-daemonset
spec:
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
configRef:
kind: ConfigMap
name: check-daemonset
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: check-bmh
spec:
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
configRef:
kind: ConfigMap
name: check-bmh
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: wait-bmh
spec:
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
configRef:
kind: ConfigMap
name: wait-bmh
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: delete-vino-cr
spec:
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
configRef:
kind: ConfigMap
name: delete-vino-cr
apiVersion: v1

@ -0,0 +1,6 @@
resources:
- cluster-map.yaml
- executors.yaml
- phases.yaml
- plans.yaml
- phase-helpers

@ -0,0 +1,2 @@
phase:
path: config/phases

@ -0,0 +1,35 @@
#!/bin/sh
set -xe
# test will assert that these labels exist on the BMHs
: ${NODE_COPY_LABELS:="airshipit.org/server=s1,airshipit.org/rack=r1"}
# test will assert that these labels exist on control plane BMHs
: ${CONTROL_PLANE_LABELS:="airshipit.org/k8s-role=master"}
# test will assert that these labels exist on worker BMHs
: ${WORKER_LABELS:="airshipit.org/k8s-role=worker"}
echo "Checking control plane baremetal hosts created by ViNO" >&2
controlPlaneCount=$(kubectl get baremetalhosts \
--context "${KCTL_CONTEXT}" \
--namespace vino-system \
--selector "${NODE_COPY_LABELS},${CONTROL_PLANE_LABELS}" \
--output name | wc -l)
echo "Control plane BMH count ${controlPlaneCount}" >&2
# With this test exactly 1 control plane node must have been created by VINO controller
[ "$controlPlaneCount" -eq "1" ]
echo "Control plane BMH count verified" >&2
#Echo "Checking worker baremetal hosts created by ViNO" >&2
#WorkerCount=$(kubectl get baremetalhosts \
# --context "${KCTL_CONTEXT}" \
# --namespace vino-system \
# --selector "${NODE_COPY_LABELS},${WORKER_LABELS}" \
# --output name | wc -l)
#
#Echo "Worker BMH count ${workerCount}" >&2
## With this test exactly 4 workers must have been created by VINO controller
#[ "$workerCount" -eq "1" ]
#Echo "Worker BMH count verified" >&2

@ -0,0 +1,26 @@
#!/bin/sh
set -xe
# Name of the daemonset
: ${DAEMONSET_NAME:="default-vino-test-cr"}
# Namespace of the daemonset
: ${DAEMONSET_NAMESPACE:="vino-system"}
# Maximum retries
: ${MAX_RETRY:="30"}
# How long to wait between retries in seconds
: ${RETRY_INTERVAL_SECONDS:="2"}
echo "Verifying that daemonset ${DAEMONSET_NAME} created in namespace ${vino-system} exists" >&2
count=0
until kubectl --context "${KCTL_CONTEXT}" -n "${DAEMONSET_NAMESPACE}" get ds "${DAEMONSET_NAME}" >&2; do
count=$((count + 1))
if [ "${count}" -eq "${MAX_RETRY}" ]; then
echo 'Timed out waiting for daemonset to exist' >&2
exit 1
fi
echo "Retrying to get daemonset attempt ${count}/${MAX_RETRY}" >&2
sleep "${RETRY_INTERVAL_SECONDS}"
done
echo "Succesfuly verified that daemonset ${DAEMONSET_NAMESPACE}/${DAEMONSET_NAME} exists" >&2

@ -0,0 +1,25 @@
#!/bin/sh
set -xe
TIMEOUT=${TIMEOUT:-600}
end=$(($(date +%s) + $TIMEOUT))
timeout 180 kubectl delete vino --all --context $KCTL_CONTEXT >&2
node_name=$(kubectl --context $KCTL_CONTEXT get node -o name)
while true; do
annotation=$(kubectl --context $KCTL_CONTEXT get $node_name -o=jsonpath="{.metadata.annotations.airshipit\.org/vino\.network-values}")
if [ "${annotation}" == "" ]
then
echo "Succesfuly remove annotation from a node" >&2
break
else
now=$(date +%s)
if [ $now -gt $end ]; then
echo "Failed to removed annotation from node ${node_name} after deleting vino CR, exiting" >&2
exit 1
fi
sleep 15
fi
done

@ -0,0 +1,27 @@
configMapGenerator:
- name: node-labler
options:
disableNameSuffixHash: true
files:
- script=node-labler.sh
- name: check-bmh
options:
disableNameSuffixHash: true
files:
- script=check-bmh.sh
- name: check-daemonset
options:
disableNameSuffixHash: true
files:
- script=check-daemonset.sh
- name: wait-bmh
options:
disableNameSuffixHash: true
files:
- script=wait-for-bmh.sh
- name: delete-vino-cr
options:
disableNameSuffixHash: true
files:
- script=delete-vino-cr.sh

@ -0,0 +1,15 @@
#!/bin/sh
set -xe
# If NODE_NAME is not set, all nodes will be labeled
: ${NODE_NAME:="--all"}
# Node label example: NODE_LABELS="airshipit.org/rack=r1 airshipit.org/server=s1"
: ${NODE_LABELS:=""}
echo "Labeling node(s) ${NODE_NAME} with labels ${NODE_LABELS}" >&2
kubectl label node \
--context $KCTL_CONTEXT \
--overwrite \
${NODE_NAME} ${NODE_LABELS} >&2

@ -0,0 +1,24 @@
#!/bin/sh
set -xe
TIMEOUT=${TIMEOUT:-3600}
end=$(($(date +%s) + $TIMEOUT))
while true; do
# TODO (kkalynovskyi) figure out how we can handle multiple BMHs
if [ "$(kubectl get bmh --context $KCTL_CONTEXT -n vino-system -o jsonpath='{.items[*].status.provisioning.state}')" == "ready" ]
then
echo "BMH successfully reached provisioning state ready" 1>&2
break
else
now=$(date +%s)
if [ $now -gt $end ]; then
echo "BMH(s) didn't reach provisioning state ready in given timeout ${TIMEOUT}" 1>&2
exit 1
fi
sleep 15
fi
done

@ -0,0 +1,103 @@
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: deploy-bmo
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
name: default-applier
documentEntryPoint: config/phases/baremetal-operator
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: deploy-crds
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
name: default-applier
documentEntryPoint: config/crd
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: deploy-controller
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
name: default-applier
documentEntryPoint: config/default
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: node-labels-rack-server
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: node-labels-rack-server
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: deploy-vino-cr
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
name: default-applier
documentEntryPoint: config/samples
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: daemonset-readiness
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: check-daemonset
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: bmh-count
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: check-bmh
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: wait-bmh
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: wait-bmh
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: delete-vino-cr
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: delete-vino-cr

@ -0,0 +1,16 @@
---
apiVersion: airshipit.org/v1alpha1
kind: PhasePlan
metadata:
name: test-plan
description: "Runs phases to build iso image"
phases:
- name: deploy-bmo
- name: deploy-crds
- name: deploy-controller
- name: node-labels-rack-server
- name: deploy-vino-cr
- name: daemonset-readiness
- name: bmh-count
- name: wait-bmh
- name: delete-vino-cr

@ -0,0 +1,4 @@
resources:
- ippool.yaml
- vino_cr.yaml
- network-template-secret.yaml

@ -4,7 +4,7 @@ metadata:
name: vino-test-cr
labels: {}
spec:
vmBridge: lo
vmBridge: vm-infra
nodeLabelKeysToCopy:
- "airshipit.org/server"
- "airshipit.org/rack"
@ -24,16 +24,17 @@ spec:
netmask: 255.255.255.0
gateway: $vinobridge # vino will need to populate this from the nodelabel value `airshipit.org/vino.nodebridgegw`
dns_servers: ["135.188.34.124"]
- name: external
subnet: 169.0.0.0/24
macPrefix: "52:54:00:06:00:00"
- name: pxe
subnet: 172.3.3.0/24
type: ipv4
routes:
- network: 0.0.0.0
netmask: 0.0.0.0
gateway: 169.0.0.1
allocationStart: 169.0.0.10
allocationStop: 169.0.0.254
macPrefix: "0A:00:00:00:00:00"
gateway: 172.3.3.1
allocationStart: 172.3.3.10
allocationStop: 172.3.3.199
macPrefix: "52:54:00:09:00:00"
nodes:
- name: master
count: 1
@ -44,49 +45,16 @@ spec:
namespace: "default"
labels:
vmFlavor: master
# libvirtTemplate:
# name: libvirt-template-master
# namespace: vino-system
bootInterfaceName: management
bootInterfaceName: pxe
networkInterfaces:
- name: management
- name: vm-infra
type: bridge
network: management
mtu: 1500
diskDrives:
- name: root
type: qcow2
path: /home/foobar/qemu.img
options:
sizeGb: 30
sparse: true
- name: worker
count: 4
bmhLabels:
airshipit.org/k8s-role: worker
networkDataTemplate:
name: "test-template"
namespace: "default"
labels:
vmFlavor: worker
# libvirtTemplate:
# name: libvirt-template-worker
# namespace: vino-system
bootInterfaceName: management
networkInterfaces:
- name: management
- name: pxe
type: bridge
network: management
network: pxe
mtu: 1500
options:
bridgeName: vminfra-bridge
diskDrives:
- name: root
type: qcow2
path: /home/foobar/qemu.img
options:
sizeGb: 10
sparse: true
bmcCredentials:
username: admin
password: passw0rd

@ -0,0 +1,79 @@
apiVersion: airship.airshipit.org/v1
kind: Vino
metadata:
name: vino-test-cr
labels: {}
spec:
vmBridge: vm-infra
nodeLabelKeysToCopy:
- "airshipit.org/server"
- "airshipit.org/rack"
nodeSelector:
matchLabels:
beta.kubernetes.io/os: linux
configuration:
cpuExclude: 0-1
networks:
- name: management
subnet: 192.168.2.0/20
type: ipv4
allocationStart: 192.168.2.10
allocationStop: 192.168.2.14 # docs should specify that the range should = number of vms (to permit future expansion over multiple vino crs etc)
routes:
- network: 10.0.0.0
netmask: 255.255.255.0
gateway: $vinobridge # vino will need to populate this from the nodelabel value `airshipit.org/vino.nodebridgegw`
dns_servers: ["135.188.34.124"]
macPrefix: "52:54:00:06:00:00"
- name: pxe
subnet: 172.3.3.0/24
type: ipv4
routes:
- network: 0.0.0.0
netmask: 0.0.0.0
gateway: 172.3.3.1
allocationStart: 172.3.3.10
allocationStop: 172.3.3.199
macPrefix: "52:54:00:09:00:00"
nodes:
- name: master
count: 1
bmhLabels:
airshipit.org/k8s-role: master
networkDataTemplate:
name: "test-template"
namespace: "default"
labels:
vmFlavor: master
bootInterfaceName: pxe
networkInterfaces:
- name: vm-infra
type: bridge
network: management
mtu: 1500
- name: pxe
type: bridge
network: pxe
mtu: 1500
- name: worker
count: 4
bmhLabels:
airshipit.org/k8s-role: worker
networkDataTemplate:
name: "test-template"
namespace: "default"
labels:
vmFlavor: worker
bootInterfaceName: pxe
networkInterfaces:
- name: vm-infra
type: bridge
network: management
mtu: 1500
- name: pxe
type: bridge
network: pxe
mtu: 1500
bmcCredentials:
username: admin
password: passw0rd

@ -37,6 +37,8 @@ const (
VinoDefaultGatewayBridgeLabel = "airshipit.org/vino.nodebridgegw"
// VinoNodeNetworkValuesAnnotation vino controller saves ip and mac address information for the node in it
VinoNodeNetworkValuesAnnotation = "airshipit.org/vino.network-values"
// VinoNetworkDataTemplateDefaultKey expected template key networkdata template secret for vino node
VinoNetworkDataTemplateDefaultKey = "template"
)
// Constants for BasicAuth

@ -1,179 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"os"
"github.com/go-logr/logr"
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
vinov1 "vino/pkg/api/v1"
)
// TODO expand tests when network and credential secret support is implemented
var _ = Describe("Test BMH reconciliation", func() {
Context("when there are 2 k8s pods and worker count is 3", func() {
It("creates 6 BMH hosts", func() {
os.Setenv("RUNTIME_NAMESPACE", "vino-system")
defer os.Unsetenv("RUNTIME_NAMESPACE")
rackLabel := "airshipit.org/rack"
serverLabel := "airshipit.org/server"
vino := testVINO()
providedFlavorLabel := "provided-label"
providedFlavorValue := "provided-value"
vino.Spec.NodeLabelKeysToCopy = []string{rackLabel, serverLabel}
vino.Spec.Nodes = []vinov1.NodeSet{
{
Name: "worker",
BMHLabels: map[string]string{
providedFlavorLabel: providedFlavorValue,
},
Count: 3,
NetworkDataTemplate: vinov1.NamespacedName{
Name: "default-template",
Namespace: "default",
},
},
}
podList := &corev1.PodList{
Items: []corev1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node01-pod",
Namespace: "vino-system",
Labels: map[string]string{
vinov1.VinoLabelDSNameSelector: vino.Name,
vinov1.VinoLabelDSNamespaceSelector: vino.Namespace,
},
},
Spec: corev1.PodSpec{
NodeName: "node01",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node02-pod",
Namespace: "vino-system",
Labels: map[string]string{
vinov1.VinoLabelDSNameSelector: vino.Name,
vinov1.VinoLabelDSNamespaceSelector: vino.Namespace,
},
},
Spec: corev1.PodSpec{
NodeName: "node02",
},
},
},
}
networkTmplSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "default-template",
Namespace: "default",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
TemplateDefaultKey: []byte("REPLACEME"),
},
}
rack1 := "r1"
server1 := "s1"
node1Labels := map[string]string{
rackLabel: rack1,
serverLabel: server1,
vinov1.VinoDefaultGatewayBridgeLabel: "127.0.0.1",
}
node1 := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node01",
Labels: node1Labels,
Annotations: make(map[string]string),
},
Status: corev1.NodeStatus{
Addresses: []corev1.NodeAddress{
{
Type: corev1.NodeInternalIP,
Address: "10.0.0.2",
},
},
},
}
node2 := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node02",
Annotations: map[string]string{},
Labels: map[string]string{
vinov1.VinoDefaultGatewayBridgeLabel: "127.0.0.1",
},
},
Status: corev1.NodeStatus{
Addresses: []corev1.NodeAddress{
{
Type: corev1.NodeInternalIP,
Address: "10.0.0.1",
},
},
},
}
fake.NewClientBuilder()
reconciler := &VinoReconciler{
Client: fake.NewFakeClient(podList, node1, node2, vino, networkTmplSecret),
}
l := zap.New(zap.UseDevMode(true))
ctx := logr.NewContext(context.Background(), l)
Expect(reconciler.reconcileBMHs(ctx, vino)).Should(Succeed())
bmhName := "default-vino-node01-worker-1"
bmh := &metal3.BareMetalHost{
ObjectMeta: metav1.ObjectMeta{
Name: bmhName,
Namespace: "vino-system",
},
}
networkSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "default-vino-node01-worker-0-network-data",
Namespace: "vino-system",
},
}
Expect(reconciler.Get(ctx, client.ObjectKeyFromObject(bmh), bmh)).Should(Succeed())
Expect(bmh.Spec.BMC.Address).To(Equal("redfish+http://10.0.0.2:8000/redfish/v1/Systems/worker-1"))
Expect(bmh.Labels).To(HaveKeyWithValue(rackLabel, rack1))
Expect(bmh.Labels).To(HaveKeyWithValue(serverLabel, server1))
Expect(bmh.Labels).To(HaveKeyWithValue(providedFlavorLabel, providedFlavorValue))
Expect(reconciler.Get(ctx, client.ObjectKeyFromObject(networkSecret), networkSecret)).Should(Succeed())
Expect(networkSecret.StringData["networkData"]).To(Equal("REPLACEME"))
})
})
})

@ -40,6 +40,7 @@ import (
vinov1 "vino/pkg/api/v1"
"vino/pkg/ipam"
"vino/pkg/managers"
)
const (
@ -198,7 +199,16 @@ func (r *VinoReconciler) ensureDaemonSet(ctx context.Context, vino *vinov1.Vino)
return err
}
if err = r.reconcileBMHs(ctx, vino); err != nil {
bmhManager := &managers.BMHManager{
Namespace: getRuntimeNamespace(),
ViNO: vino,
Client: r.Client,
Ipam: r.Ipam,
Logger: logger,
}
logger.Info("Requesting Virtual Machines from vino-builders")
if err := bmhManager.ScheduleVMs(ctx); err != nil {
return err
}
@ -206,7 +216,12 @@ func (r *VinoReconciler) ensureDaemonSet(ctx context.Context, vino *vinov1.Vino)
defer cancel()
logger.Info("Waiting for daemonset to become ready")
return r.waitDaemonSet(waitTimeoutCtx, dsReady, ds)
if err := r.waitDaemonSet(waitTimeoutCtx, dsReady, ds); err != nil {
return err
}
logger.Info("Creating BaremetalHosts")
return bmhManager.CreateBMHs(ctx)
}
func (r *VinoReconciler) decorateDaemonSet(ctx context.Context, ds *appsv1.DaemonSet, vino *vinov1.Vino) {
@ -339,31 +354,31 @@ func (r *VinoReconciler) SetupWithManager(mgr ctrl.Manager) error {
}
func (r *VinoReconciler) finalize(ctx context.Context, vino *vinov1.Vino) error {
bmhManager := &managers.BMHManager{
Namespace: getRuntimeNamespace(),
ViNO: vino,
Client: r.Client,
Ipam: r.Ipam,
Logger: logr.FromContext(ctx),
}
if err := bmhManager.UnScheduleVMs(ctx); err != nil {
return err
}
// TODO aggregate errors instead
if err := r.Delete(ctx,
&appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: r.getDaemonSetName(vino), Namespace: getRuntimeNamespace(),
},
}); err != nil {
}); err != nil && !apierror.IsNotFound(err) {
return err
}
controllerutil.RemoveFinalizer(vino, vinov1.VinoFinalizer)
return r.Update(ctx, vino)
}
func applyRuntimeObject(ctx context.Context, key client.ObjectKey, obj client.Object, c client.Client) error {
getObj := obj
err := c.Get(ctx, key, getObj)
switch {
case apierror.IsNotFound(err):
err = c.Create(ctx, obj)
case err == nil:
err = c.Patch(ctx, obj, client.MergeFrom(getObj))
}
return err
}
func getRuntimeNamespace() string {
return os.Getenv("RUNTIME_NAMESPACE")
}

@ -8,7 +8,6 @@ import (
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
vinov1 "vino/pkg/api/v1"
)
@ -20,16 +19,6 @@ func testDS() *appsv1.DaemonSet {
Containers: []corev1.Container{}}}}}
}
func testVINO() *vinov1.Vino {
return &vinov1.Vino{
ObjectMeta: v1.ObjectMeta{
Name: "vino",
Namespace: "default",
},
Spec: vinov1.VinoSpec{
Networks: []vinov1.Network{}}}
}
var _ = Describe("Test Setting Env variables", func() {
Context("when daemonset is created", func() {
l := logr.Discard()

@ -12,7 +12,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
package managers
import (
"bytes"
@ -25,10 +25,9 @@ import (
"github.com/go-logr/logr"
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
corev1 "k8s.io/api/core/v1"
apimeta "k8s.io/apimachinery/pkg/api/meta"
apierror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
kerror "k8s.io/apimachinery/pkg/util/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
@ -55,33 +54,108 @@ type generatedValues struct {
BootMACAdress string
}
func (r *VinoReconciler) ensureBMHs(ctx context.Context, vino *vinov1.Vino) error {
type BMHManager struct {
Namespace string
client.Client
ViNO *vinov1.Vino
Ipam *ipam.Ipam
Logger logr.Logger
bmhList []*metal3.BareMetalHost
networkSecrets []*corev1.Secret
credentialSecrets []*corev1.Secret
}
func (r *BMHManager) ScheduleVMs(ctx context.Context) error {
return r.requestVMs(ctx)
}
func (r *BMHManager) CreateBMHs(ctx context.Context) error {
for _, secret := range r.networkSecrets {
objKey := client.ObjectKeyFromObject(secret)
r.Logger.Info("Applying network secret", "secret", objKey)
if err := applyRuntimeObject(ctx, objKey, secret, r.Client); err != nil {
return err
}
}
for _, secret := range r.credentialSecrets {
objKey := client.ObjectKeyFromObject(secret)
r.Logger.Info("Applying network secret", "secret", objKey)
if err := applyRuntimeObject(ctx, objKey, secret, r.Client); err != nil {
return err
}
}
for _, bmh := range r.bmhList {
objKey := client.ObjectKeyFromObject(bmh)
r.Logger.Info("Applying BaremetalHost", "BMH", objKey)
if err := applyRuntimeObject(ctx, objKey, bmh, r.Client); err != nil {
return err
}
}
return nil
}
func (r *BMHManager) UnScheduleVMs(ctx context.Context) error {
podList, err := r.getPods(ctx)
if err != nil {
return err
}
for _, pod := range podList.Items {
k8sNode, err := r.getNode(ctx, pod)
if err != nil {
return err
}
annotations := k8sNode.GetAnnotations()
if k8sNode.GetAnnotations() == nil {
continue
}
delete(annotations, vinov1.VinoNodeNetworkValuesAnnotation)
k8sNode.SetAnnotations(annotations)
// TODO consider accumulating errors instead
if err = r.Update(ctx, k8sNode); err != nil {
return err
}
}
return nil
}
func (r *BMHManager) getPods(ctx context.Context) (*corev1.PodList, error) {
labelOpt := client.MatchingLabels{
vinov1.VinoLabelDSNameSelector: vino.Name,
vinov1.VinoLabelDSNamespaceSelector: vino.Namespace,
vinov1.VinoLabelDSNameSelector: r.ViNO.Name,
vinov1.VinoLabelDSNamespaceSelector: r.ViNO.Namespace,
}
nsOpt := client.InNamespace(getRuntimeNamespace())
nsOpt := client.InNamespace(r.Namespace)
podList := &corev1.PodList{}
err := r.List(ctx, podList, labelOpt, nsOpt)
return podList, r.List(ctx, podList, labelOpt, nsOpt)
}
// requestVMs iterates over each vino-builder pod, and annotates a k8s node for the pod
// with a request for VMs. Each vino-builder pod waits for the annotation.
// when annotation with VM request is added to a k8s node, vino manager WaitVMs should be used before creating BMHs
func (r *BMHManager) requestVMs(ctx context.Context) error {
podList, err := r.getPods(ctx)
if err != nil {
return err
}
logger := logr.FromContext(ctx)
logger.Info("Vino daemonset pod count", "count", len(podList.Items))
r.Logger.Info("Vino daemonset pod count", "count", len(podList.Items))
for _, pod := range podList.Items {
logger.Info("Creating baremetal hosts for pod",
r.Logger.Info("Creating baremetal hosts for pod",
"pod name",
types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name},
)
err := r.createIpamNetworks(ctx, vino)
err := r.createIpamNetworks(ctx, r.ViNO)
if err != nil {
return err
}
err = r.createBMHperPod(ctx, vino, pod)
err = r.setBMHs(ctx, pod)
if err != nil {
return err
}
@ -89,45 +163,7 @@ func (r *VinoReconciler) ensureBMHs(ctx context.Context, vino *vinov1.Vino) erro
return nil
}
func (r *VinoReconciler) reconcileBMHs(ctx context.Context, vino *vinov1.Vino) error {
if err := r.ensureBMHs(ctx, vino); err != nil {
err = fmt.Errorf("could not reconcile BaremetalHosts: %w", err)
apimeta.SetStatusCondition(&vino.Status.Conditions, metav1.Condition{
Status: metav1.ConditionFalse,
Reason: vinov1.ReconciliationFailedReason,
Message: err.Error(),
Type: vinov1.ConditionTypeReady,
ObservedGeneration: vino.GetGeneration(),
})
apimeta.SetStatusCondition(&vino.Status.Conditions, metav1.Condition{
Status: metav1.ConditionFalse,
Reason: vinov1.ReconciliationFailedReason,
Message: err.Error(),
Type: vinov1.ConditionTypeBMHReady,
ObservedGeneration: vino.GetGeneration(),
})
if patchStatusErr := r.patchStatus(ctx, vino); patchStatusErr != nil {
err = kerror.NewAggregate([]error{err, patchStatusErr})
err = fmt.Errorf("unable to patch status after BaremetalHosts reconciliation failed: %w", err)
}
return err
}
apimeta.SetStatusCondition(&vino.Status.Conditions, metav1.Condition{
Status: metav1.ConditionTrue,
Reason: vinov1.ReconciliationSucceededReason,
Message: "BaremetalHosts reconciled",
Type: vinov1.ConditionTypeBMHReady,
ObservedGeneration: vino.GetGeneration(),
})
if err := r.patchStatus(ctx, vino); err != nil {
err = fmt.Errorf("unable to patch status after BaremetalHosts reconciliation succeeded: %w", err)
return err
}
return nil
}
func (r *VinoReconciler) createIpamNetworks(ctx context.Context, vino *vinov1.Vino) error {
logger := logr.FromContext(ctx)
func (r *BMHManager) createIpamNetworks(ctx context.Context, vino *vinov1.Vino) error {
for _, network := range vino.Spec.Networks {
subnetRange, err := ipam.NewRange(network.AllocationStart, network.AllocationStop)
if err != nil {
@ -135,7 +171,7 @@ func (r *VinoReconciler) createIpamNetworks(ctx context.Context, vino *vinov1.Vi
}
macPrefix := network.MACPrefix
if macPrefix == "" {
logger.Info("No MACPrefix provided; using default MACPrefix for network",
r.Logger.Info("No MACPrefix provided; using default MACPrefix for network",
"default prefix", DefaultMACPrefix, "network name", network.Name)
macPrefix = DefaultMACPrefix
}