Add integration tests and fix BMO integration

The commit adds integration test that includes baremetal operator
- test is driven by airshipctl phases
- Deploys BMO from airshipctl repository as a phase
- Verifies that after VINO-CR is deployed BMHs are created
- Verifies that BMO can install an image into those BMHs using pxe
- Various fixes that allow to integrate with BMO
- Disables password authentication for BMHs untill we have a fix
- BMO fails to authenticate against simple auth provided by nginx
- Removes unit-tests for BMO creation. The whole approach of
requesting VMs from vino-builder should be changed. When we have
final view of the process, we will well define vino-builder API
and add unit-tests to vino controller and builder

Change-Id: I51976ca20811b227ecb069c4ffd81d8afe086e57
This commit is contained in:
Kostiantyn Kalynovskyi 2021-05-11 00:01:52 +00:00
parent 9e920c9367
commit 31f5e96402
39 changed files with 983 additions and 439 deletions

View File

@ -49,20 +49,48 @@ spec:
mountPath: /etc/libvirt/hooks
- name: etc-storage
mountPath: /etc/libvirt/storage
- name: var-lib-vino
mountPath: /var/lib/vino
- name: sushy
image: quay.io/metal3-io/sushy-tools
imagePullPolicy: IfNotPresent
command: ["/usr/local/bin/sushy-emulator", "--port", "5000"]
command: ["/usr/local/bin/sushy-emulator", "-i", "::", "--debug", "--port", "8000"]
volumeMounts:
- name: var-run-libvirt
mountPath: /var/run/libvirt
- name: var-lib-libvirt
mountPath: /var/lib/libvirt
- name: vino-reverse-proxy
image: quay.io/airshipit/vino-reverse-proxy
ports:
- containerPort: 8000
hostPort: 8000
readinessProbe:
httpGet:
path: /redfish/v1/Systems
host: 127.0.0.1
port: 8000
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet:
path: /redfish/v1/Systems
host: 127.0.0.1
port: 8000
initialDelaySeconds: 10
periodSeconds: 20
# - name: vino-reverse-proxy
# image: quay.io/airshipit/vino-reverse-proxy
# ports:
# - containerPort: 8000
# hostPort: 8000
# readinessProbe:
# tcpSocket:
# port: 8000
# host: 127.0.0.1
# initialDelaySeconds: 10
# periodSeconds: 5
# livenessProbe:
# tcpSocket:
# port: 8000
# host: 127.0.0.1
# initialDelaySeconds: 30
# periodSeconds: 30
- name: labeler
image: quay.io/airshipit/nodelabeler
imagePullPolicy: IfNotPresent
@ -176,3 +204,7 @@ spec:
hostPath:
path: /etc/vino-hooks
type: DirectoryOrCreate
- name: var-lib-vino
hostPath:
path: /var/lib/vino
type: DirectoryOrCreate

View File

@ -79,20 +79,22 @@ flavorTemplates:
</interface>
{% endfor %}
<serial type="pty">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="isa-serial" port="0">
<model name="isa-serial"/>
</target>
<alias name="serial0"/>
<serial type='file'>
<source path='/var/lib/libvirt/{{ nodename }}-console.log'/>
</serial>
<console type="pty" tty="/dev/pts/3">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="serial" port="0"/>
<alias name="serial0"/>
<serial type='pty'/>
<console type='file'>
<source path='/var/lib/libvirt/{{ nodename }}-console.log'/>
<target type='serial'/>
</console>
{% if domain.enable_vnc | default(false) %}
<graphics type='vnc' autoport='yes' listen='0.0.0.0'>
<listen type='address' address='0.0.0.0'/>
</graphics>
{% endif %}
<memballoon model="virtio">
<stats period="10"/>
<alias name="balloon0"/>
@ -193,20 +195,22 @@ flavorTemplates:
</interface>
{% endfor %}
<serial type="pty">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="isa-serial" port="0">
<model name="isa-serial"/>
</target>
<alias name="serial0"/>
<serial type='file'>
<source path='/var/lib/libvirt/{{ nodename }}-console.log'/>
</serial>
<console type="pty" tty="/dev/pts/3">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="serial" port="0"/>
<alias name="serial0"/>
<serial type='pty'/>
<console type='file'>
<source path='/var/lib/libvirt/{{ nodename }}-console.log'/>
<target type='serial'/>
</console>
{% if domain.enable_vnc | default(false) %}
<graphics type='vnc' autoport='yes' listen='0.0.0.0'>
<listen type='address' address='0.0.0.0'/>
</graphics>
{% endif %}
<memballoon model="virtio">
<stats period="10"/>
<alias name="balloon0"/>

View File

@ -2,10 +2,8 @@ flavors:
master:
vcpus: 1
memory: 4
hugepages: true
rootSize: 30
worker:
vcpus: 1
memory: 2
hugepages: true
rootSize: 10

View File

@ -0,0 +1,2 @@
resources:
- smp.yaml

View File

@ -0,0 +1,23 @@
apiVersion: builtin
kind: PatchStrategicMergeTransformer
metadata:
name: bmo-cleanup
patches: |-
---
apiVersion: airshipit.org/v1alpha1
kind: VersionsCatalogue
metadata:
name: versions-airshipctl
$patch: delete
---
apiVersion: airshipit.org/v1alpha1
kind: NetworkCatalogue
metadata:
name: networking
$patch: delete
---
apiVersion: airshipit.org/v1alpha1
kind: VariableCatalogue
metadata:
name: env-vars-catalogue
$patch: delete

View File

@ -0,0 +1,33 @@
apiVersion: airshipit.org/v1alpha1
kind: Templater
metadata:
name: env-vars-template
labels:
airshipit.org/deploy-k8s: "false"
annotations:
config.kubernetes.io/function: |-
container:
image: quay.io/airshipit/templater:v2
envs:
- HTTP_PROXY
- HTTPS_PROXY
- http_proxy
- https_proxy
- NO_PROXY
- no_proxy
template: |
---
apiVersion: airshipit.org/v1alpha1
kind: VariableCatalogue
metadata:
labels:
airshipit.org/deploy-k8s: "false"
name: env-vars-catalogue
env:
HTTP_PROXY: '{{ env "HTTP_PROXY" }}'
HTTPS_PROXY: '{{ env "HTTPS_PROXY" }}'
http_proxy: '{{ env "http_proxy" }}'
https_proxy: '{{ env "https_proxy" }}'
NO_PROXY: '{{ env "NO_PROXY" }}'
no_proxy: '{{ env "no_proxy" }}'
WATCH_NAMESPACE: ""

View File

@ -0,0 +1,34 @@
# The default versions catalogue for functions hosted in the airshipctl project.
# These values can be overridden at the site, type, etc levels as appropriate.
apiVersion: airshipit.org/v1alpha1
kind: VersionsCatalogue
metadata:
name: versions-airshipctl
labels:
airshipit.org/deploy-k8s: "false"
spec:
images:
baremetal_operator:
ironic: # ironic Deployment
init_bootstrap:
image: quay.io/centos/centos:8.3.2011
init_images:
image: quay.io/airshipit/ipa:latest
qcow_bundle:
image: quay.io/airshipit/qcow-bundle:latest-ubuntu_focal
dnsmasq:
image: quay.io/metal3-io/ironic:capm3-v0.4.0
httpd:
image: quay.io/metal3-io/ironic:capm3-v0.4.0
ironic:
image: quay.io/metal3-io/ironic:capm3-v0.4.0
ironic_inspector:
image: quay.io/metal3-io/ironic-inspector:capm3-v0.4.0
metal3_baremetal_operator: # metal3-baremetal-operator Deployment
baremetal_operator:
image: quay.io/metal3-io/baremetal-operator:capm3-v0.4.0
ironic_proxy:
image: alpine/socat
ironic_inspector_proxy:
image: alpine/socat

View File

@ -0,0 +1,15 @@
apiVersion: airshipit.org/v1alpha1
kind: NetworkCatalogue
metadata:
name: networking
spec:
ironic:
provisioningIp: "172.3.3.1"
dhcpRange: "172.3.3.200,172.3.3.250"
ironicAutomatedClean: ""
httpPort: ""
ironicFastTrack: ""
deployKernelUrl: ""
deployRamdiskUrl: ""
ironicEndpoint: ""
ironicInspectorEndpoint: ""

View File

@ -0,0 +1,12 @@
resources:
- ironic-var-substitution.yaml
- image-versions.yaml
- ../../../../airshipctl/manifests/function/baremetal-operator/
transformers:
- ../../../../airshipctl/manifests/function/baremetal-operator/replacements
- watched_namespace.yaml
- cleanup
generators:
- env-vars.yaml

View File

@ -0,0 +1,22 @@
# These rules inject host-specific information from the `host-catalogue`
# into the hostgenerator-m3 function's Template plugin config.
apiVersion: airshipit.org/v1alpha1
kind: ReplacementTransformer
metadata:
name: watched_namespace_change
annotations:
config.kubernetes.io/function: |-
container:
image: quay.io/airshipit/replacement-transformer:v2
replacements:
# Container versions for the ironic Deployment
- source:
objref:
kind: VariableCatalogue
name: env-vars-catalogue
fieldref: env.WATCH_NAMESPACE
target:
objref:
kind: Deployment
name: metal3-baremetal-operator
fieldrefs: ["{.spec.template.spec.containers[?(.name == 'baremetal-operator')].env[?(.name == 'WATCH_NAMESPACE')].value}"]

View File

@ -0,0 +1,12 @@
---
apiVersion: airshipit.org/v1alpha1
kind: ClusterMap
metadata:
name: main-map
map:
minikube:
kubeconfigSources:
- type: "filesystem"
filesystem:
path: ~/.kube/config
contextName: minikube

View File

@ -0,0 +1,72 @@
---
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
metadata:
name: default-applier
config:
waitOptions:
timeout: 1000
pruneOptions:
prune: false
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: node-labels-rack-server
spec:
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
envVars:
- NODE_LABELS=airshipit.org/server=s1 airshipit.org/rack=r1
configRef:
kind: ConfigMap
name: node-labler
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: check-daemonset
spec:
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
configRef:
kind: ConfigMap
name: check-daemonset
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: check-bmh
spec:
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
configRef:
kind: ConfigMap
name: check-bmh
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: wait-bmh
spec:
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
configRef:
kind: ConfigMap
name: wait-bmh
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: delete-vino-cr
spec:
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
configRef:
kind: ConfigMap
name: delete-vino-cr
apiVersion: v1

View File

@ -0,0 +1,6 @@
resources:
- cluster-map.yaml
- executors.yaml
- phases.yaml
- plans.yaml
- phase-helpers

View File

@ -0,0 +1,2 @@
phase:
path: config/phases

View File

@ -0,0 +1,35 @@
#!/bin/sh
set -xe
# test will assert that these labels exist on the BMHs
: ${NODE_COPY_LABELS:="airshipit.org/server=s1,airshipit.org/rack=r1"}
# test will assert that these labels exist on control plane BMHs
: ${CONTROL_PLANE_LABELS:="airshipit.org/k8s-role=master"}
# test will assert that these labels exist on worker BMHs
: ${WORKER_LABELS:="airshipit.org/k8s-role=worker"}
echo "Checking control plane baremetal hosts created by ViNO" >&2
controlPlaneCount=$(kubectl get baremetalhosts \
--context "${KCTL_CONTEXT}" \
--namespace vino-system \
--selector "${NODE_COPY_LABELS},${CONTROL_PLANE_LABELS}" \
--output name | wc -l)
echo "Control plane BMH count ${controlPlaneCount}" >&2
# With this test exactly 1 control plane node must have been created by VINO controller
[ "$controlPlaneCount" -eq "1" ]
echo "Control plane BMH count verified" >&2
#Echo "Checking worker baremetal hosts created by ViNO" >&2
#WorkerCount=$(kubectl get baremetalhosts \
# --context "${KCTL_CONTEXT}" \
# --namespace vino-system \
# --selector "${NODE_COPY_LABELS},${WORKER_LABELS}" \
# --output name | wc -l)
#
#Echo "Worker BMH count ${workerCount}" >&2
## With this test exactly 4 workers must have been created by VINO controller
#[ "$workerCount" -eq "1" ]
#Echo "Worker BMH count verified" >&2

View File

@ -0,0 +1,26 @@
#!/bin/sh
set -xe
# Name of the daemonset
: ${DAEMONSET_NAME:="default-vino-test-cr"}
# Namespace of the daemonset
: ${DAEMONSET_NAMESPACE:="vino-system"}
# Maximum retries
: ${MAX_RETRY:="30"}
# How long to wait between retries in seconds
: ${RETRY_INTERVAL_SECONDS:="2"}
echo "Verifying that daemonset ${DAEMONSET_NAME} created in namespace ${vino-system} exists" >&2
count=0
until kubectl --context "${KCTL_CONTEXT}" -n "${DAEMONSET_NAMESPACE}" get ds "${DAEMONSET_NAME}" >&2; do
count=$((count + 1))
if [ "${count}" -eq "${MAX_RETRY}" ]; then
echo 'Timed out waiting for daemonset to exist' >&2
exit 1
fi
echo "Retrying to get daemonset attempt ${count}/${MAX_RETRY}" >&2
sleep "${RETRY_INTERVAL_SECONDS}"
done
echo "Succesfuly verified that daemonset ${DAEMONSET_NAMESPACE}/${DAEMONSET_NAME} exists" >&2

View File

@ -0,0 +1,25 @@
#!/bin/sh
set -xe
TIMEOUT=${TIMEOUT:-600}
end=$(($(date +%s) + $TIMEOUT))
timeout 180 kubectl delete vino --all --context $KCTL_CONTEXT >&2
node_name=$(kubectl --context $KCTL_CONTEXT get node -o name)
while true; do
annotation=$(kubectl --context $KCTL_CONTEXT get $node_name -o=jsonpath="{.metadata.annotations.airshipit\.org/vino\.network-values}")
if [ "${annotation}" == "" ]
then
echo "Succesfuly remove annotation from a node" >&2
break
else
now=$(date +%s)
if [ $now -gt $end ]; then
echo "Failed to removed annotation from node ${node_name} after deleting vino CR, exiting" >&2
exit 1
fi
sleep 15
fi
done

View File

@ -0,0 +1,27 @@
configMapGenerator:
- name: node-labler
options:
disableNameSuffixHash: true
files:
- script=node-labler.sh
- name: check-bmh
options:
disableNameSuffixHash: true
files:
- script=check-bmh.sh
- name: check-daemonset
options:
disableNameSuffixHash: true
files:
- script=check-daemonset.sh
- name: wait-bmh
options:
disableNameSuffixHash: true
files:
- script=wait-for-bmh.sh
- name: delete-vino-cr
options:
disableNameSuffixHash: true
files:
- script=delete-vino-cr.sh

View File

@ -0,0 +1,15 @@
#!/bin/sh
set -xe
# If NODE_NAME is not set, all nodes will be labeled
: ${NODE_NAME:="--all"}
# Node label example: NODE_LABELS="airshipit.org/rack=r1 airshipit.org/server=s1"
: ${NODE_LABELS:=""}
echo "Labeling node(s) ${NODE_NAME} with labels ${NODE_LABELS}" >&2
kubectl label node \
--context $KCTL_CONTEXT \
--overwrite \
${NODE_NAME} ${NODE_LABELS} >&2

View File

@ -0,0 +1,24 @@
#!/bin/sh
set -xe
TIMEOUT=${TIMEOUT:-3600}
end=$(($(date +%s) + $TIMEOUT))
while true; do
# TODO (kkalynovskyi) figure out how we can handle multiple BMHs
if [ "$(kubectl get bmh --context $KCTL_CONTEXT -n vino-system -o jsonpath='{.items[*].status.provisioning.state}')" == "ready" ]
then
echo "BMH successfully reached provisioning state ready" 1>&2
break
else
now=$(date +%s)
if [ $now -gt $end ]; then
echo "BMH(s) didn't reach provisioning state ready in given timeout ${TIMEOUT}" 1>&2
exit 1
fi
sleep 15
fi
done

103
config/phases/phases.yaml Normal file
View File

@ -0,0 +1,103 @@
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: deploy-bmo
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
name: default-applier
documentEntryPoint: config/phases/baremetal-operator
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: deploy-crds
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
name: default-applier
documentEntryPoint: config/crd
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: deploy-controller
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
name: default-applier
documentEntryPoint: config/default
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: node-labels-rack-server
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: node-labels-rack-server
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: deploy-vino-cr
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
name: default-applier
documentEntryPoint: config/samples
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: daemonset-readiness
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: check-daemonset
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: bmh-count
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: check-bmh
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: wait-bmh
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: wait-bmh
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: delete-vino-cr
clusterName: minikube
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: delete-vino-cr

16
config/phases/plans.yaml Normal file
View File

@ -0,0 +1,16 @@
---
apiVersion: airshipit.org/v1alpha1
kind: PhasePlan
metadata:
name: test-plan
description: "Runs phases to build iso image"
phases:
- name: deploy-bmo
- name: deploy-crds
- name: deploy-controller
- name: node-labels-rack-server
- name: deploy-vino-cr
- name: daemonset-readiness
- name: bmh-count
- name: wait-bmh
- name: delete-vino-cr

View File

@ -0,0 +1,4 @@
resources:
- ippool.yaml
- vino_cr.yaml
- network-template-secret.yaml

View File

@ -4,7 +4,7 @@ metadata:
name: vino-test-cr
labels: {}
spec:
vmBridge: lo
vmBridge: vm-infra
nodeLabelKeysToCopy:
- "airshipit.org/server"
- "airshipit.org/rack"
@ -24,16 +24,17 @@ spec:
netmask: 255.255.255.0
gateway: $vinobridge # vino will need to populate this from the nodelabel value `airshipit.org/vino.nodebridgegw`
dns_servers: ["135.188.34.124"]
- name: external
subnet: 169.0.0.0/24
macPrefix: "52:54:00:06:00:00"
- name: pxe
subnet: 172.3.3.0/24
type: ipv4
routes:
- network: 0.0.0.0
netmask: 0.0.0.0
gateway: 169.0.0.1
allocationStart: 169.0.0.10
allocationStop: 169.0.0.254
macPrefix: "0A:00:00:00:00:00"
gateway: 172.3.3.1
allocationStart: 172.3.3.10
allocationStop: 172.3.3.199
macPrefix: "52:54:00:09:00:00"
nodes:
- name: master
count: 1
@ -44,49 +45,16 @@ spec:
namespace: "default"
labels:
vmFlavor: master
# libvirtTemplate:
# name: libvirt-template-master
# namespace: vino-system
bootInterfaceName: management
bootInterfaceName: pxe
networkInterfaces:
- name: management
- name: vm-infra
type: bridge
network: management
mtu: 1500
diskDrives:
- name: root
type: qcow2
path: /home/foobar/qemu.img
options:
sizeGb: 30
sparse: true
- name: worker
count: 4
bmhLabels:
airshipit.org/k8s-role: worker
networkDataTemplate:
name: "test-template"
namespace: "default"
labels:
vmFlavor: worker
# libvirtTemplate:
# name: libvirt-template-worker
# namespace: vino-system
bootInterfaceName: management
networkInterfaces:
- name: management
- name: pxe
type: bridge
network: management
network: pxe
mtu: 1500
options:
bridgeName: vminfra-bridge
diskDrives:
- name: root
type: qcow2
path: /home/foobar/qemu.img
options:
sizeGb: 10
sparse: true
bmcCredentials:
username: admin
password: passw0rd

View File

@ -0,0 +1,79 @@
apiVersion: airship.airshipit.org/v1
kind: Vino
metadata:
name: vino-test-cr
labels: {}
spec:
vmBridge: vm-infra
nodeLabelKeysToCopy:
- "airshipit.org/server"
- "airshipit.org/rack"
nodeSelector:
matchLabels:
beta.kubernetes.io/os: linux
configuration:
cpuExclude: 0-1
networks:
- name: management
subnet: 192.168.2.0/20
type: ipv4
allocationStart: 192.168.2.10
allocationStop: 192.168.2.14 # docs should specify that the range should = number of vms (to permit future expansion over multiple vino crs etc)
routes:
- network: 10.0.0.0
netmask: 255.255.255.0
gateway: $vinobridge # vino will need to populate this from the nodelabel value `airshipit.org/vino.nodebridgegw`
dns_servers: ["135.188.34.124"]
macPrefix: "52:54:00:06:00:00"
- name: pxe
subnet: 172.3.3.0/24
type: ipv4
routes:
- network: 0.0.0.0
netmask: 0.0.0.0
gateway: 172.3.3.1
allocationStart: 172.3.3.10
allocationStop: 172.3.3.199
macPrefix: "52:54:00:09:00:00"
nodes:
- name: master
count: 1
bmhLabels:
airshipit.org/k8s-role: master
networkDataTemplate:
name: "test-template"
namespace: "default"
labels:
vmFlavor: master
bootInterfaceName: pxe
networkInterfaces:
- name: vm-infra
type: bridge
network: management
mtu: 1500
- name: pxe
type: bridge
network: pxe
mtu: 1500
- name: worker
count: 4
bmhLabels:
airshipit.org/k8s-role: worker
networkDataTemplate:
name: "test-template"
namespace: "default"
labels:
vmFlavor: worker
bootInterfaceName: pxe
networkInterfaces:
- name: vm-infra
type: bridge
network: management
mtu: 1500
- name: pxe
type: bridge
network: pxe
mtu: 1500
bmcCredentials:
username: admin
password: passw0rd

View File

@ -37,6 +37,8 @@ const (
VinoDefaultGatewayBridgeLabel = "airshipit.org/vino.nodebridgegw"
// VinoNodeNetworkValuesAnnotation vino controller saves ip and mac address information for the node in it
VinoNodeNetworkValuesAnnotation = "airshipit.org/vino.network-values"
// VinoNetworkDataTemplateDefaultKey expected template key networkdata template secret for vino node
VinoNetworkDataTemplateDefaultKey = "template"
)
// Constants for BasicAuth

View File

@ -1,179 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"os"
"github.com/go-logr/logr"
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
vinov1 "vino/pkg/api/v1"
)
// TODO expand tests when network and credential secret support is implemented
var _ = Describe("Test BMH reconciliation", func() {
Context("when there are 2 k8s pods and worker count is 3", func() {
It("creates 6 BMH hosts", func() {
os.Setenv("RUNTIME_NAMESPACE", "vino-system")
defer os.Unsetenv("RUNTIME_NAMESPACE")
rackLabel := "airshipit.org/rack"
serverLabel := "airshipit.org/server"
vino := testVINO()
providedFlavorLabel := "provided-label"
providedFlavorValue := "provided-value"
vino.Spec.NodeLabelKeysToCopy = []string{rackLabel, serverLabel}
vino.Spec.Nodes = []vinov1.NodeSet{
{
Name: "worker",
BMHLabels: map[string]string{
providedFlavorLabel: providedFlavorValue,
},
Count: 3,
NetworkDataTemplate: vinov1.NamespacedName{
Name: "default-template",
Namespace: "default",
},
},
}
podList := &corev1.PodList{
Items: []corev1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node01-pod",
Namespace: "vino-system",
Labels: map[string]string{
vinov1.VinoLabelDSNameSelector: vino.Name,
vinov1.VinoLabelDSNamespaceSelector: vino.Namespace,
},
},
Spec: corev1.PodSpec{
NodeName: "node01",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node02-pod",
Namespace: "vino-system",
Labels: map[string]string{
vinov1.VinoLabelDSNameSelector: vino.Name,
vinov1.VinoLabelDSNamespaceSelector: vino.Namespace,
},
},
Spec: corev1.PodSpec{
NodeName: "node02",
},
},
},
}
networkTmplSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "default-template",
Namespace: "default",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
TemplateDefaultKey: []byte("REPLACEME"),
},
}
rack1 := "r1"
server1 := "s1"
node1Labels := map[string]string{
rackLabel: rack1,
serverLabel: server1,
vinov1.VinoDefaultGatewayBridgeLabel: "127.0.0.1",
}
node1 := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node01",
Labels: node1Labels,
Annotations: make(map[string]string),
},
Status: corev1.NodeStatus{
Addresses: []corev1.NodeAddress{
{
Type: corev1.NodeInternalIP,
Address: "10.0.0.2",
},
},
},
}
node2 := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node02",
Annotations: map[string]string{},
Labels: map[string]string{
vinov1.VinoDefaultGatewayBridgeLabel: "127.0.0.1",
},
},
Status: corev1.NodeStatus{
Addresses: []corev1.NodeAddress{
{
Type: corev1.NodeInternalIP,
Address: "10.0.0.1",
},
},
},
}
fake.NewClientBuilder()
reconciler := &VinoReconciler{
Client: fake.NewFakeClient(podList, node1, node2, vino, networkTmplSecret),
}
l := zap.New(zap.UseDevMode(true))
ctx := logr.NewContext(context.Background(), l)
Expect(reconciler.reconcileBMHs(ctx, vino)).Should(Succeed())
bmhName := "default-vino-node01-worker-1"
bmh := &metal3.BareMetalHost{
ObjectMeta: metav1.ObjectMeta{
Name: bmhName,
Namespace: "vino-system",
},
}
networkSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "default-vino-node01-worker-0-network-data",
Namespace: "vino-system",
},
}
Expect(reconciler.Get(ctx, client.ObjectKeyFromObject(bmh), bmh)).Should(Succeed())
Expect(bmh.Spec.BMC.Address).To(Equal("redfish+http://10.0.0.2:8000/redfish/v1/Systems/worker-1"))
Expect(bmh.Labels).To(HaveKeyWithValue(rackLabel, rack1))
Expect(bmh.Labels).To(HaveKeyWithValue(serverLabel, server1))
Expect(bmh.Labels).To(HaveKeyWithValue(providedFlavorLabel, providedFlavorValue))
Expect(reconciler.Get(ctx, client.ObjectKeyFromObject(networkSecret), networkSecret)).Should(Succeed())
Expect(networkSecret.StringData["networkData"]).To(Equal("REPLACEME"))
})
})
})

View File

@ -40,6 +40,7 @@ import (
vinov1 "vino/pkg/api/v1"
"vino/pkg/ipam"
"vino/pkg/managers"
)
const (
@ -198,7 +199,16 @@ func (r *VinoReconciler) ensureDaemonSet(ctx context.Context, vino *vinov1.Vino)
return err
}
if err = r.reconcileBMHs(ctx, vino); err != nil {
bmhManager := &managers.BMHManager{
Namespace: getRuntimeNamespace(),
ViNO: vino,
Client: r.Client,
Ipam: r.Ipam,
Logger: logger,
}
logger.Info("Requesting Virtual Machines from vino-builders")
if err := bmhManager.ScheduleVMs(ctx); err != nil {
return err
}
@ -206,7 +216,12 @@ func (r *VinoReconciler) ensureDaemonSet(ctx context.Context, vino *vinov1.Vino)
defer cancel()
logger.Info("Waiting for daemonset to become ready")
return r.waitDaemonSet(waitTimeoutCtx, dsReady, ds)
if err := r.waitDaemonSet(waitTimeoutCtx, dsReady, ds); err != nil {
return err
}
logger.Info("Creating BaremetalHosts")
return bmhManager.CreateBMHs(ctx)
}
func (r *VinoReconciler) decorateDaemonSet(ctx context.Context, ds *appsv1.DaemonSet, vino *vinov1.Vino) {
@ -339,31 +354,31 @@ func (r *VinoReconciler) SetupWithManager(mgr ctrl.Manager) error {
}
func (r *VinoReconciler) finalize(ctx context.Context, vino *vinov1.Vino) error {
bmhManager := &managers.BMHManager{
Namespace: getRuntimeNamespace(),
ViNO: vino,
Client: r.Client,
Ipam: r.Ipam,
Logger: logr.FromContext(ctx),
}
if err := bmhManager.UnScheduleVMs(ctx); err != nil {
return err
}
// TODO aggregate errors instead
if err := r.Delete(ctx,
&appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: r.getDaemonSetName(vino), Namespace: getRuntimeNamespace(),
},
}); err != nil {
}); err != nil && !apierror.IsNotFound(err) {
return err
}
controllerutil.RemoveFinalizer(vino, vinov1.VinoFinalizer)
return r.Update(ctx, vino)
}
func applyRuntimeObject(ctx context.Context, key client.ObjectKey, obj client.Object, c client.Client) error {
getObj := obj
err := c.Get(ctx, key, getObj)
switch {
case apierror.IsNotFound(err):
err = c.Create(ctx, obj)
case err == nil:
err = c.Patch(ctx, obj, client.MergeFrom(getObj))
}
return err
}
func getRuntimeNamespace() string {
return os.Getenv("RUNTIME_NAMESPACE")
}

View File

@ -8,7 +8,6 @@ import (
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
vinov1 "vino/pkg/api/v1"
)
@ -20,16 +19,6 @@ func testDS() *appsv1.DaemonSet {
Containers: []corev1.Container{}}}}}
}
func testVINO() *vinov1.Vino {
return &vinov1.Vino{
ObjectMeta: v1.ObjectMeta{
Name: "vino",
Namespace: "default",
},
Spec: vinov1.VinoSpec{
Networks: []vinov1.Network{}}}
}
var _ = Describe("Test Setting Env variables", func() {
Context("when daemonset is created", func() {
l := logr.Discard()

View File

@ -12,7 +12,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
package managers
import (
"bytes"
@ -25,10 +25,9 @@ import (
"github.com/go-logr/logr"
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
corev1 "k8s.io/api/core/v1"
apimeta "k8s.io/apimachinery/pkg/api/meta"
apierror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
kerror "k8s.io/apimachinery/pkg/util/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
@ -55,33 +54,108 @@ type generatedValues struct {
BootMACAdress string
}
func (r *VinoReconciler) ensureBMHs(ctx context.Context, vino *vinov1.Vino) error {
labelOpt := client.MatchingLabels{
vinov1.VinoLabelDSNameSelector: vino.Name,
vinov1.VinoLabelDSNamespaceSelector: vino.Namespace,
type BMHManager struct {
Namespace string
client.Client
ViNO *vinov1.Vino
Ipam *ipam.Ipam
Logger logr.Logger
bmhList []*metal3.BareMetalHost
networkSecrets []*corev1.Secret
credentialSecrets []*corev1.Secret
}
func (r *BMHManager) ScheduleVMs(ctx context.Context) error {
return r.requestVMs(ctx)
}
func (r *BMHManager) CreateBMHs(ctx context.Context) error {
for _, secret := range r.networkSecrets {
objKey := client.ObjectKeyFromObject(secret)
r.Logger.Info("Applying network secret", "secret", objKey)
if err := applyRuntimeObject(ctx, objKey, secret, r.Client); err != nil {
return err
}
}
nsOpt := client.InNamespace(getRuntimeNamespace())
for _, secret := range r.credentialSecrets {
objKey := client.ObjectKeyFromObject(secret)
r.Logger.Info("Applying network secret", "secret", objKey)
if err := applyRuntimeObject(ctx, objKey, secret, r.Client); err != nil {
return err
}
}
for _, bmh := range r.bmhList {
objKey := client.ObjectKeyFromObject(bmh)
r.Logger.Info("Applying BaremetalHost", "BMH", objKey)
if err := applyRuntimeObject(ctx, objKey, bmh, r.Client); err != nil {
return err
}
}
return nil
}
func (r *BMHManager) UnScheduleVMs(ctx context.Context) error {
podList, err := r.getPods(ctx)
if err != nil {
return err
}
for _, pod := range podList.Items {
k8sNode, err := r.getNode(ctx, pod)
if err != nil {
return err
}
annotations := k8sNode.GetAnnotations()
if k8sNode.GetAnnotations() == nil {
continue
}
delete(annotations, vinov1.VinoNodeNetworkValuesAnnotation)
k8sNode.SetAnnotations(annotations)
// TODO consider accumulating errors instead
if err = r.Update(ctx, k8sNode); err != nil {
return err
}
}
return nil
}
func (r *BMHManager) getPods(ctx context.Context) (*corev1.PodList, error) {
labelOpt := client.MatchingLabels{
vinov1.VinoLabelDSNameSelector: r.ViNO.Name,
vinov1.VinoLabelDSNamespaceSelector: r.ViNO.Namespace,
}
nsOpt := client.InNamespace(r.Namespace)
podList := &corev1.PodList{}
err := r.List(ctx, podList, labelOpt, nsOpt)
return podList, r.List(ctx, podList, labelOpt, nsOpt)
}
// requestVMs iterates over each vino-builder pod, and annotates a k8s node for the pod
// with a request for VMs. Each vino-builder pod waits for the annotation.
// when annotation with VM request is added to a k8s node, vino manager WaitVMs should be used before creating BMHs
func (r *BMHManager) requestVMs(ctx context.Context) error {
podList, err := r.getPods(ctx)
if err != nil {
return err
}
logger := logr.FromContext(ctx)
logger.Info("Vino daemonset pod count", "count", len(podList.Items))
r.Logger.Info("Vino daemonset pod count", "count", len(podList.Items))
for _, pod := range podList.Items {
logger.Info("Creating baremetal hosts for pod",
r.Logger.Info("Creating baremetal hosts for pod",
"pod name",
types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name},
)
err := r.createIpamNetworks(ctx, vino)
err := r.createIpamNetworks(ctx, r.ViNO)
if err != nil {
return err
}
err = r.createBMHperPod(ctx, vino, pod)
err = r.setBMHs(ctx, pod)
if err != nil {
return err
}
@ -89,45 +163,7 @@ func (r *VinoReconciler) ensureBMHs(ctx context.Context, vino *vinov1.Vino) erro
return nil
}
func (r *VinoReconciler) reconcileBMHs(ctx context.Context, vino *vinov1.Vino) error {
if err := r.ensureBMHs(ctx, vino); err != nil {
err = fmt.Errorf("could not reconcile BaremetalHosts: %w", err)
apimeta.SetStatusCondition(&vino.Status.Conditions, metav1.Condition{
Status: metav1.ConditionFalse,
Reason: vinov1.ReconciliationFailedReason,
Message: err.Error(),
Type: vinov1.ConditionTypeReady,
ObservedGeneration: vino.GetGeneration(),
})
apimeta.SetStatusCondition(&vino.Status.Conditions, metav1.Condition{
Status: metav1.ConditionFalse,
Reason: vinov1.ReconciliationFailedReason,
Message: err.Error(),
Type: vinov1.ConditionTypeBMHReady,
ObservedGeneration: vino.GetGeneration(),
})
if patchStatusErr := r.patchStatus(ctx, vino); patchStatusErr != nil {
err = kerror.NewAggregate([]error{err, patchStatusErr})
err = fmt.Errorf("unable to patch status after BaremetalHosts reconciliation failed: %w", err)
}
return err
}
apimeta.SetStatusCondition(&vino.Status.Conditions, metav1.Condition{
Status: metav1.ConditionTrue,
Reason: vinov1.ReconciliationSucceededReason,
Message: "BaremetalHosts reconciled",
Type: vinov1.ConditionTypeBMHReady,
ObservedGeneration: vino.GetGeneration(),
})
if err := r.patchStatus(ctx, vino); err != nil {
err = fmt.Errorf("unable to patch status after BaremetalHosts reconciliation succeeded: %w", err)
return err
}
return nil
}
func (r *VinoReconciler) createIpamNetworks(ctx context.Context, vino *vinov1.Vino) error {
logger := logr.FromContext(ctx)
func (r *BMHManager) createIpamNetworks(ctx context.Context, vino *vinov1.Vino) error {
for _, network := range vino.Spec.Networks {
subnetRange, err := ipam.NewRange(network.AllocationStart, network.AllocationStop)
if err != nil {
@ -135,7 +171,7 @@ func (r *VinoReconciler) createIpamNetworks(ctx context.Context, vino *vinov1.Vi
}
macPrefix := network.MACPrefix
if macPrefix == "" {
logger.Info("No MACPrefix provided; using default MACPrefix for network",
r.Logger.Info("No MACPrefix provided; using default MACPrefix for network",
"default prefix", DefaultMACPrefix, "network name", network.Name)
macPrefix = DefaultMACPrefix
}
@ -147,9 +183,7 @@ func (r *VinoReconciler) createIpamNetworks(ctx context.Context, vino *vinov1.Vi
return nil
}
func (r *VinoReconciler) createBMHperPod(ctx context.Context, vino *vinov1.Vino, pod corev1.Pod) error {
logger := logr.FromContext(ctx)
func (r *BMHManager) setBMHs(ctx context.Context, pod corev1.Pod) error {
nodeNetworkValues := map[string]generatedValues{}
k8sNode, err := r.getNode(ctx, pod)
@ -157,23 +191,18 @@ func (r *VinoReconciler) createBMHperPod(ctx context.Context, vino *vinov1.Vino,
return err
}
nodeNetworks, err := r.nodeNetworks(ctx, vino.Spec.Networks, k8sNode)
nodeNetworks, err := r.nodeNetworks(ctx, r.ViNO.Spec.Networks, k8sNode)
if err != nil {
return err
}
for _, node := range vino.Spec.Nodes {
logger.Info("Creating BMHs for vino node", "node name", node.Name, "count", node.Count)
prefix := r.getBMHNodePrefix(vino, pod)
for _, node := range r.ViNO.Spec.Nodes {
r.Logger.Info("Saving BMHs for vino node", "node name", node.Name, "count", node.Count)
prefix := r.getBMHNodePrefix(pod)
for i := 0; i < node.Count; i++ {
roleSuffix := fmt.Sprintf("%s-%d", node.Name, i)
bmhName := fmt.Sprintf("%s-%s", prefix, roleSuffix)
creds, nodeErr := r.reconcileBMHCredentials(ctx, vino)
if nodeErr != nil {
return nodeErr
}
domainNetValues, nodeErr := r.domainSpecificNetValues(ctx, bmhName, node, nodeNetworks)
if nodeErr != nil {
return nodeErr
@ -181,12 +210,12 @@ func (r *VinoReconciler) createBMHperPod(ctx context.Context, vino *vinov1.Vino,
// save domain specific generated values to a map
nodeNetworkValues[roleSuffix] = domainNetValues.Generated
netData, netDataNs, nodeErr := r.reconcileBMHNetworkData(ctx, node, vino, domainNetValues)
netData, netDataNs, nodeErr := r.setBMHNetworkSecret(ctx, node, domainNetValues)
if nodeErr != nil {
return nodeErr
}
bmcAddr, labels, nodeErr := r.getBMCAddressAndLabels(ctx, k8sNode, vino.Spec.NodeLabelKeysToCopy, roleSuffix)
bmcAddr, labels, nodeErr := r.getBMCAddressAndLabels(k8sNode, roleSuffix)
if nodeErr != nil {
return nodeErr
}
@ -195,10 +224,11 @@ func (r *VinoReconciler) createBMHperPod(ctx context.Context, vino *vinov1.Vino,
labels[label] = value
}
credentialSecretName := r.setBMHCredentials(bmhName)
bmh := &metal3.BareMetalHost{
ObjectMeta: metav1.ObjectMeta{
Name: bmhName,
Namespace: getRuntimeNamespace(),
Namespace: r.Namespace,
Labels: labels,
},
Spec: metal3.BareMetalHostSpec{
@ -208,40 +238,31 @@ func (r *VinoReconciler) createBMHperPod(ctx context.Context, vino *vinov1.Vino,
},
BMC: metal3.BMCDetails{
Address: bmcAddr,
CredentialsName: creds,
CredentialsName: credentialSecretName,
DisableCertificateVerification: true,
},
BootMACAddress: domainNetValues.Generated.BootMACAdress,
},
}
objKey := client.ObjectKeyFromObject(bmh)
logger.Info("Creating BMH", "name", objKey)
nodeErr = applyRuntimeObject(ctx, objKey, bmh, r.Client)
if nodeErr != nil {
return nodeErr
}
r.bmhList = append(r.bmhList, bmh)
}
}
logger.Info("annotating node", "node", k8sNode.Name)
if err = r.annotateNode(ctx, k8sNode, nodeNetworkValues, vino); err != nil {
return err
}
return nil
r.Logger.Info("annotating node", "node", k8sNode.Name)
return r.annotateNode(ctx, k8sNode, nodeNetworkValues)
}
// nodeNetworks returns a copy of node network with a unique per node values
func (r *VinoReconciler) nodeNetworks(ctx context.Context,
func (r *BMHManager) nodeNetworks(ctx context.Context,
globalNetworks []vinov1.Network,
k8sNode *corev1.Node) ([]vinov1.Network, error) {
bridgeIP, err := r.getBridgeIP(ctx, k8sNode)
if err != nil {
return []vinov1.Network{}, err
}
for netIndex, network := range globalNetworks {
for routeIndex, route := range network.Routes {
if route.Gateway == "$vinobridge" {
bridgeIP, err := r.getBridgeIP(ctx, k8sNode)
if err != nil {
return []vinov1.Network{}, err
}
globalNetworks[netIndex].Routes[routeIndex].Gateway = bridgeIP
}
}
@ -249,7 +270,7 @@ func (r *VinoReconciler) nodeNetworks(ctx context.Context,
return globalNetworks, nil
}
func (r *VinoReconciler) domainSpecificNetValues(
func (r *BMHManager) domainSpecificNetValues(
ctx context.Context,
bmhName string,
node vinov1.NodeSet,
@ -287,7 +308,7 @@ func (r *VinoReconciler) domainSpecificNetValues(
if iface.Name == node.BootInterfaceName {
bootMAC = macAddress
}
logr.FromContext(ctx).Info("Got MAC and IP for the network and node",
r.Logger.Info("Got MAC and IP for the network and node",
"MAC", macAddress, "IP", ipAddress, "bmh name", bmhName, "bootMAC", bootMAC)
}
return networkTemplateValues{
@ -302,16 +323,15 @@ func (r *VinoReconciler) domainSpecificNetValues(
}, nil
}
func (r *VinoReconciler) annotateNode(ctx context.Context,
func (r *BMHManager) annotateNode(ctx context.Context,
k8sNode *corev1.Node,
domainInterfaceValues map[string]generatedValues,
vino *vinov1.Vino) error {
logr.FromContext(ctx).Info("Getting GW bridge IP from node", "node", k8sNode.Name)
domainInterfaceValues map[string]generatedValues) error {
r.Logger.Info("Getting GW bridge IP from node", "node", k8sNode.Name)
builderValues := vinov1.Builder{
Domains: make(map[string]vinov1.BuilderDomain),
Networks: vino.Spec.Networks,
Nodes: vino.Spec.Nodes,
CPUConfiguration: vino.Spec.CPUConfiguration,
Networks: r.ViNO.Spec.Networks,
Nodes: r.ViNO.Spec.Nodes,
CPUConfiguration: r.ViNO.Spec.CPUConfiguration,
}
for domainName, domain := range domainInterfaceValues {
builderDomain := vinov1.BuilderDomain{
@ -341,7 +361,7 @@ func (r *VinoReconciler) annotateNode(ctx context.Context,
return r.Update(ctx, k8sNode)
}
func (r *VinoReconciler) getBridgeIP(ctx context.Context, k8sNode *corev1.Node) (string, error) {
func (r *BMHManager) getBridgeIP(ctx context.Context, k8sNode *corev1.Node) (string, error) {
ctxTimeout, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
@ -368,7 +388,7 @@ func (r *VinoReconciler) getBridgeIP(ctx context.Context, k8sNode *corev1.Node)
}
}
func (r *VinoReconciler) getNode(ctx context.Context, pod corev1.Pod) (*corev1.Node, error) {
func (r *BMHManager) getNode(ctx context.Context, pod corev1.Pod) (*corev1.Node, error) {
node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Spec.NodeName,
@ -378,21 +398,17 @@ func (r *VinoReconciler) getNode(ctx context.Context, pod corev1.Pod) (*corev1.N
return node, err
}
func (r *VinoReconciler) getBMHNodePrefix(vino *vinov1.Vino, pod corev1.Pod) string {
func (r *BMHManager) getBMHNodePrefix(pod corev1.Pod) string {
// TODO we need to do something about name length limitations
return fmt.Sprintf("%s-%s-%s", vino.Namespace, vino.Name, pod.Spec.NodeName)
return fmt.Sprintf("%s-%s-%s", r.ViNO.Namespace, r.ViNO.Name, pod.Spec.NodeName)
}
func (r *VinoReconciler) getBMCAddressAndLabels(
ctx context.Context,
func (r *BMHManager) getBMCAddressAndLabels(
node *corev1.Node,
labelKeys []string,
vmName string) (string, map[string]string, error) {
logger := logr.FromContext(ctx).WithValues("k8s node", node.Name)
logger := r.Logger.WithValues("k8s node", node.Name)
labels := map[string]string{}
for _, key := range labelKeys {
for _, key := range r.ViNO.Spec.NodeLabelKeysToCopy {
value, ok := node.Labels[key]
if !ok {
logger.Info("Kubernetes node missing label from vino CR CopyNodeLabelKeys field", "label", key)
@ -408,35 +424,27 @@ func (r *VinoReconciler) getBMCAddressAndLabels(
return "", labels, fmt.Errorf("Node %s doesn't have internal ip address defined", node.Name)
}
// reconcileBMHCredentials returns secret name with credentials and error
func (r *VinoReconciler) reconcileBMHCredentials(ctx context.Context, vino *vinov1.Vino) (string, error) {
ns := getRuntimeNamespace()
// coresponds to DS name, since we have only one DS per vino CR
credentialSecretName := fmt.Sprintf("%s-%s", r.getDaemonSetName(vino), "credentials")
netSecret := &corev1.Secret{
// setBMHCredentials returns secret name with credentials and error
func (r *BMHManager) setBMHCredentials(bmhName string) string {
credName := fmt.Sprintf("%s-%s", bmhName, "credentials")
bmhCredentialSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: credentialSecretName,
Namespace: ns,
Name: credName,
Namespace: r.Namespace,
},
StringData: map[string]string{
"username": vino.Spec.BMCCredentials.Username,
"password": vino.Spec.BMCCredentials.Password,
"username": r.ViNO.Spec.BMCCredentials.Username,
"password": r.ViNO.Spec.BMCCredentials.Password,
},
Type: corev1.SecretTypeOpaque,
}
objKey := client.ObjectKeyFromObject(netSecret)
if err := applyRuntimeObject(ctx, objKey, netSecret, r.Client); err != nil {
return "", err
}
return credentialSecretName, nil
r.credentialSecrets = append(r.credentialSecrets, bmhCredentialSecret)
return credName
}
func (r *VinoReconciler) reconcileBMHNetworkData(
func (r *BMHManager) setBMHNetworkSecret(
ctx context.Context,
node vinov1.NodeSet,
vino *vinov1.Vino,
values networkTemplateValues) (string, string, error) {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
@ -445,7 +453,7 @@ func (r *VinoReconciler) reconcileBMHNetworkData(
},
}
logger := logr.FromContext(ctx).WithValues("vino node", node.Name, "vino", client.ObjectKeyFromObject(vino))
logger := r.Logger.WithValues("vino node", node.Name, "vino", client.ObjectKeyFromObject(r.ViNO))
objKey := client.ObjectKeyFromObject(secret)
logger.Info("Looking for secret with network template for vino node", "secret", objKey)
@ -453,9 +461,11 @@ func (r *VinoReconciler) reconcileBMHNetworkData(
return "", "", err
}
rawTmpl, ok := secret.Data[TemplateDefaultKey]
rawTmpl, ok := secret.Data[vinov1.VinoNetworkDataTemplateDefaultKey]
if !ok {
return "", "", fmt.Errorf("network template secret %v has no key '%s'", objKey, TemplateDefaultKey)
return "", "", fmt.Errorf("network template secret %v has no key '%s'",
objKey,
vinov1.VinoNetworkDataTemplateDefaultKey)
}
tpl, err := template.New("net-template").Funcs(sprig.TxtFuncMap()).Parse(string(rawTmpl))
@ -463,8 +473,6 @@ func (r *VinoReconciler) reconcileBMHNetworkData(
return "", "", err
}
logger.Info("Genereated MAC Addresses values are", "GENERATED VALUES", values.Generated.MACAddresses)
buf := bytes.NewBuffer([]byte{})
err = tpl.Execute(buf, values)
if err != nil {
@ -472,24 +480,27 @@ func (r *VinoReconciler) reconcileBMHNetworkData(
}
name := fmt.Sprintf("%s-network-data", values.BMHName)
ns := getRuntimeNamespace()
netSecret := &corev1.Secret{
r.networkSecrets = append(r.networkSecrets, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
Namespace: r.Namespace,
},
StringData: map[string]string{
"networkData": buf.String(),
},
Type: corev1.SecretTypeOpaque,
}
objKey = client.ObjectKeyFromObject(netSecret)
logger.Info("Creating network secret for vino node", "secret", objKey)
if err := applyRuntimeObject(ctx, objKey, netSecret, r.Client); err != nil {
return "", "", err
}
return name, ns, nil
})
return name, r.Namespace, nil
}
func applyRuntimeObject(ctx context.Context, key client.ObjectKey, obj client.Object, c client.Client) error {
getObj := obj
err := c.Get(ctx, key, getObj)
switch {
case apierror.IsNotFound(err):
err = c.Create(ctx, obj)
case err == nil:
err = c.Patch(ctx, obj, client.MergeFrom(getObj))
}
return err
}

View File

@ -0,0 +1,29 @@
- hosts: primary
tasks:
- name: Run ensure-docker
include_role:
name: ensure-docker
- name: Install Dependent Packages
apt:
pkg:
- debconf
- make
- wget
- snapd
become: yes
- name: Set up requirements for kubernetes
include_role:
name: clear-firewall
- name: Install kubernetes, deploy vino, run test plan
shell: |
set -xe;
./tools/deployment/configure-bridges.sh
./tools/deployment/install-k8s.sh
./tools/deployment/install-airship.sh
./tools/deployment/configure-airship.sh
make docker-build-controller
./tools/deployment/run-test-plan.sh
args:
chdir: "{{ zuul.project.src_dir }}"
environment:
VINO_REPO_URL: "."

View File

@ -14,9 +14,10 @@
- name: Set up requirements for kubernetes
include_role:
name: clear-firewall
- name: Install kubernetes and Deploy Vino
- name: Install kubernetes, deploy vino, run bash integration tests
shell: |
set -xe;
./tools/deployment/configure-bridges.sh
./tools/deployment/install-k8s.sh
./tools/deployment/deploy-vino.sh
./tools/deployment/test-cr.sh

View File

@ -0,0 +1,31 @@
#!/bin/bash
set -xe
: ${MANIFEST_DIR:="${HOME}/vino-manifests"}
: ${VINO_REPO_URL:="/${HOME}/airship/vino"}
mkdir -p "${MANIFEST_DIR}"
# Workaround for testing against local changes with vino
if [ -d "${VINO_REPO_URL}" ]; then
VINO_REPO_URL=$(realpath "${VINO_REPO_URL}")
cp -r "${VINO_REPO_URL}" "${MANIFEST_DIR}/"
fi
if [ ! -f "${HOME}/.airship/config" ]; then
airshipctl config init
fi
airshipctl config set-manifest default \
--target-path "${MANIFEST_DIR}" \
--repo primary \
--metadata-path config/phases/metadata.yaml \
--url ${VINO_REPO_URL}
airshipctl config set-manifest default \
--repo airshipctl \
--url https://opendev.org/airship/airshipctl.git \
--branch master
airshipctl document pull -n

View File

@ -0,0 +1,37 @@
#!/bin/bash
set -xe
function create_bridge () {
if ! sudo brctl show| grep -q "${1}"; then
sudo brctl addbr "${1}"
sudo ip link set "${1}" up
sudo ip addr add ${2} dev "${1}"
fi;
}
VM_INFRA_BRIDGE=${VM_INFRA_BRIDGE:-"vm-infra"}
VM_INFRA_BRIDGE_IP=${VM_INFRA_BRIDGE_IP:-"192.168.2.1/24"}
VM_PXE_BRIDGE=${VM_PXE_BRIDGE:-"pxe"}
VM_PXE_BRIDGE_IP=${VM_PXE_BRIDGE_IP:-"172.3.3.1/24"}
PXE_NET="172.3.3.0/24"
export DEBCONF_NONINTERACTIVE_SEEN=true
export DEBIAN_FRONTEND=noninteractive
sudo -E apt-get update
sudo -E apt-get install -y bridge-utils
echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward
create_bridge ${VM_INFRA_BRIDGE} ${VM_INFRA_BRIDGE_IP}
create_bridge ${VM_PXE_BRIDGE} ${VM_PXE_BRIDGE_IP}
sudo iptables -A FORWARD -d ${PXE_NET} -o ${VM_PXE_BRIDGE} -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
sudo iptables -t nat -A POSTROUTING -s ${PXE_NET} -d 224.0.0.0/24 -j RETURN
sudo iptables -t nat -A POSTROUTING -s ${PXE_NET} -d 255.255.255.255/32 -j RETURN
sudo iptables -t nat -A POSTROUTING -s ${PXE_NET} ! -d ${PXE_NET} -p tcp -j MASQUERADE --to-ports 1024-65535
sudo iptables -t nat -A POSTROUTING -s ${PXE_NET} ! -d ${PXE_NET} -p udp -j MASQUERADE --to-ports 1024-65535
sudo iptables -t nat -A POSTROUTING -s ${PXE_NET} ! -d ${PXE_NET} -j MASQUERADE

View File

@ -0,0 +1,8 @@
#!/bin/bash
set -xe
export AIRSHIPCTL_VERSION=${AIRSHIPCTL_VERSION:-"2.0.0"}
airship_release_url="https://github.com/airshipit/airshipctl/releases/download/v${AIRSHIPCTL_VERSION}/airshipctl_${AIRSHIPCTL_VERSION}_linux_amd64.tar.gz"
wget -q -c "${airship_release_url}" -O - | sudo tar -xz -C /usr/local/bin/

View File

@ -0,0 +1,6 @@
#!/bin/bash
set -xe
airshipctl plan run test-plan

View File

@ -20,8 +20,7 @@ worker_copy_label="airshipit.org/k8s-role=worker"
# Label all nodes with the same rack/label. We are ok with this for this simple test.
kubectl label node --overwrite=true --all $server_label $rack_label
kubectl apply -f config/samples/vino_cr.yaml
kubectl apply -f config/samples/vino_cr_4_workers_1_cp.yaml
kubectl apply -f config/samples/ippool.yaml
kubectl apply -f config/samples/network-template-secret.yaml
@ -54,19 +53,14 @@ if ! kubectl -n vino-system rollout status ds default-vino-test-cr --timeout=10s
fi
masterCount=$(kubectl get baremetalhosts -n vino-system -l "$server_label,$server_label,$master_copy_label" -o name | wc -l)
# with this setup set up, exactly 1 master must have been created by VINO controller
[[ "$masterCount" -eq "1" ]]
workerCount=$(kubectl get baremetalhosts -n vino-system -l "$server_label,$server_label,$worker_copy_label" -o name | wc -l)
# with this setup set up, exactly 4 workers must have been created by VINO controller
[[ "$workerCount" -eq "4" ]]
kubectl get baremetalhosts -n vino-system --show-labels=true
kubectl get -o yaml -n vino-system \
$(kubectl get secret -o name -n vino-system | grep network-data)
kubectl get secret -o yaml -n vino-system default-vino-test-cr-credentials
$(kubectl get secret -o name -n vino-system | grep network-data)

View File

@ -22,9 +22,17 @@
pass-to-parent: true
- job:
name: airship-deploy-vino
name: airship-deploy-vino-bash
nodeset: ubuntu-focal-nested
run: playbooks/integration-test.yaml
run: playbooks/integration-test-bash.yaml
post-run: playbooks/vino-collect-logs.yaml
description: Deploys kubernetes and vino
timeout: 9600
- job:
name: airship-deploy-vino-airshipctl
nodeset: ubuntu-focal-nested
run: playbooks/integration-test-airshipctl.yaml
post-run: playbooks/vino-collect-logs.yaml
description: Deploys kubernetes and vino
timeout: 9600

View File

@ -20,18 +20,21 @@
image_repo: quay.io
check:
jobs:
- airship-deploy-vino
- airship-deploy-vino-bash
- airship-deploy-vino-airshipctl
- airship-vino-test-suite
- airship-vino-build-images
- airship-vino-check-github-issues
gate:
jobs:
- airship-deploy-vino
- airship-deploy-vino-bash
- airship-deploy-vino-airshipctl
- airship-vino-test-suite
- airship-vino-build-images
post:
jobs:
- airship-vino-upload-git-mirror
- airship-deploy-vino
- airship-deploy-vino-airshipctl
- airship-deploy-vino-bash
- airship-vino-test-suite
- airship-vino-publish-images