Files
vino/tools/deployment/test-cr.sh
Matt McEuen 7dddf0f7d1 Build BMH network config
This constructs a VM's BMH network config secret, based on a template.
It also integrates IPAM functionality into the controller.

TODOs for subsequent patchsets:
- manage VM mac addresses.
- implement replacement of e.g. $vino.nodebridgegw
- confirm the nameservers definition below works
  (it's a different field than we use in hostgenerator-m3)

The current patchset generates a networkData like so from the sample CRs:

links:
  - id: management
    name: management
    type: bridge
    mtu: 1500
    #  ethernet_mac_address: ??
    bridgeName: vminfra-bridge
  - id: external
    name: external
    type: sriov-bond
    mtu: 9100
    #  ethernet_mac_address: ??
    bond_miimon: 100
    bond_mode: 802.3ad
    bond_xmit_hash_policy: layer3+4
    pf: [enp29s0f0,enp219s1f1]
    vlan: 100
networks:
  - id: management
    type: ipv4
    link: management
    ip_address: 192.168.2.10
    #netmask: "TODO - see if needed when ip has CIDR range"
    dns_nameservers: [135.188.34.124]
    routes:
      - network: 10.0.0.0
        netmask: 255.255.255.0
        gateway: $vino.nodebridgegw
  - id: external
    type: ipv4
    link: external
    ip_address: 169.0.0.10
    #netmask: "TODO - see if needed when ip has CIDR range"
    dns_nameservers: []
    routes:
      - network: 0.0.0.0
        netmask: 0.0.0.0
        gateway: 169.0.0.1

Change-Id: I99b1a104764687c8b84f2495591e0712bed73ae5
2021-03-09 10:40:31 -06:00

56 lines
1.7 KiB
Bash
Executable File

#!/bin/bash
set -xe
# TODO (kkalynovskyi) remove this function when zuul is able to gather debug info by itself
function vinoDebugInfo () {
kubectl get po -A
kubectl get ds -A
local pod_name
pod_name="$(kubectl get pod -n vino-system -l control-plane=controller-manager -o name)"
kubectl logs -c manager ${pod_name} -n vino-system
exit 1
}
kubectl apply -f config/samples/vino_cr.yaml
kubectl apply -f config/samples/ippool.yaml
kubectl apply -f config/samples/network-template-secret.yaml
# Remove logs collection from here, when we will have zuul collect logs job
until [[ $(kubectl get vino vino-test-cr 2>/dev/null) ]]; do
count=$((count + 1))
if [[ ${count} -eq "30" ]]; then
echo ' Timed out waiting for vino test cr to exist'
vinoDebugInfo
return 1
fi
sleep 2
done
if ! kubectl wait --for=condition=Ready vino vino-test-cr --timeout=180s; then
vinoDebugInfo
fi
# no need to collect logs on fail, since they are already collected before
until [[ $(kubectl -n vino-system get ds default-vino-test-cr 2>/dev/null) ]]; do
count=$((count + 1))
if [[ ${count} -eq "30" ]]; then
echo ' Timed out waiting for vino builder daemonset to exist'
vinoDebugInfo
return 1
fi
sleep 2
done
if ! kubectl -n vino-system rollout status ds default-vino-test-cr --timeout=10s; then
vinoDebugInfo
fi
bmhCount=$(kubectl get baremetalhosts -n vino-system -o name | wc -l)
# with this setup set up, exactly 3 BMHs must have been created by VINO controller
[[ "$bmhCount" -eq "3" ]]
kubectl get -o yaml -n vino-system \
$(kubectl get secret -o name -n vino-system | grep network-data)
kubectl get secret -o yaml -n vino-system default-vino-test-cr-credentials