Integrate vino-builder

This patchset integrates vino-builder into the manager daemonset.
See corresponding vino-builder changes in [0].

[0] https://review.opendev.org/c/airship/images/+/778213
Change-Id: Ic14b791332ca6688f5e4a856913ca3693f33e8d0

Change-Id: I7ccd6dea9666efcd7e0ddccc7404c9f481fcd8df
This commit is contained in:
Crank, Daniel (dc6350) 2021-02-23 16:51:27 -06:00
parent 9e3c0e1a70
commit 3b685480a4
13 changed files with 458 additions and 48 deletions

View File

@ -150,23 +150,25 @@ spec:
count:
type: integer
diskDrives:
description: DiskDrivesTemplate defines disks on the VM
properties:
name:
type: string
options:
description: DiskOptions disk options
properties:
sizeGb:
type: integer
sparse:
type: boolean
type: object
path:
type: string
type:
type: string
type: object
items:
description: DiskDrivesTemplate defines disks on the VM
properties:
name:
type: string
options:
description: DiskOptions disk options
properties:
sizeGb:
type: integer
sparse:
type: boolean
type: object
path:
type: string
type:
type: string
type: object
type: array
libvirtTemplate:
description: NamespacedName to be used to spawn VMs
properties:
@ -176,7 +178,7 @@ spec:
type: string
type: object
name:
description: Parameter for Node master or worker-standard
description: Parameter for Node master or worker
type: string
networkDataTemplate:
description: NetworkDataTemplate must have a template key

View File

@ -19,7 +19,7 @@ spec:
- name: libvirt
command:
- /tmp/libvirt.sh
image: quay.io/airshipit/libvirt
image: quay.io/airshipit/libvirt:latest-ubuntu_bionic
securityContext:
privileged: true
runAsUser: 0
@ -59,6 +59,48 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vino-builder
command:
- bash
- -cex
- |
/entrypoint.sh || true
tail -f /dev/null
securityContext:
privileged: true
runAsUser: 0
readOnlyRootFilesystem: false
ports:
- containerPort: 8001
hostPort: 8001
image: quay.io/airshipit/vino-builder:latest-ubuntu_bionic
imagePullPolicy: IfNotPresent
volumeMounts:
- name: flavors
mountPath: /var/lib/vino-builder/flavors
- name: flavor-templates
mountPath: /var/lib/vino-builder/flavor-templates
- name: network-templates
mountPath: /var/lib/vino-builder/network-templates
- name: storage-templates
mountPath: /var/lib/vino-builder/storage-templates
- name: pod-tmp
mountPath: /tmp
- mountPath: /lib/modules
name: libmodules
readOnly: true
- name: var-lib-libvirt
mountPath: /var/lib/libvirt
- name: var-run-libvirt
mountPath: /var/run/libvirt
- name: run
mountPath: /run
- name: dev
mountPath: /dev
- name: cgroup
mountPath: /sys/fs/cgroup
- name: logs
mountPath: /var/log/libvirt
volumes:
- name: libmodules
hostPath:
@ -84,3 +126,21 @@ spec:
- name: var-run-libvirt
hostPath:
path: /var/run/libvirt
- name: flavors
configMap:
name: vino-flavors
defaultMode: 0555
- name: flavor-templates
configMap:
name: vino-flavor-templates
defaultMode: 0555
- name: network-templates
configMap:
name: vino-network-templates
defaultMode: 0555
- name: storage-templates
configMap:
name: vino-storage-templates
defaultMode: 0555
- name: pod-tmp
emptyDir: {}

View File

@ -0,0 +1,229 @@
flavorTemplates:
master:
domainTemplate: |
{% set nodename = 'master-' + item|string %}
<domain type="kvm">
<name>{{ nodename }}</name>
<uuid>{{ nodename | hash('md5') }}</uuid>
<metadata>
<vino:flavor>master</vino:flavor>
<vino:creationTime>{{ ansible_date_time.date }}</vino:creationTime>
</metadata>
<memory unit="GiB">{{ flavors.master.memory }}</memory>
{% if flavors.worker.hugepages is defined and flavors.worker.hugepages == true %}
<memoryBacking>
<hugepages>
<page size='1' unit='GiB' />
</hugepages>
</memoryBacking>
{% endif %}
<vcpu placement="static">{{ flavors.master.vcpus }}</vcpu>
{% if node_core_map[nodename] is defined %}
# function to produce list of cpus, in same numa (controled by bool), state will need to be tracked via file on hypervisor host. gotpl psudo:
<cputune>
<shares>8192</shares>
{% for core in node_core_map[nodename] %}
<vcpupin vcpu="{{ loop.index0 }}" cpuset="{{ core }}"/>
{% endfor %}
<emulatorpin cpuset="{{ node_core_map[nodename]|join(',') }}"/>
</cputune>
{% endif %}
<resource>
<partition>/machine</partition>
</resource>
<os>
<type arch="x86_64" machine="pc">hvm</type>
<boot dev="hd"/>
</os>
<features>
<acpi/>
<apic/>
</features>
<cpu mode="host-passthrough" />
<clock offset="utc">
<timer name="pit" tickpolicy="delay"/>
<timer name="rtc" tickpolicy="catchup"/>
<timer name="hpet" present="no"/>
</clock>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
# for each disk requested
<disk type='volume' device='disk'>
<driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
<source pool='vino-default' volume='{{ nodename }}'/>
<target dev='vde' bus='virtio'/>
</disk>
<controller type="usb" index="0" model="piix3-uhci">
<alias name="usb"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x2"/>
</controller>
<controller type="pci" index="0" model="pci-root">
<alias name="pci.0"/>
</controller>
<controller type="ide" index="0">
<alias name="ide"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x1"/>
</controller>
# for each interface defined in vino, e.g.
<interface type='bridge'>
<mac address='52:54:00:83:e9:f9'/>
<source bridge='management'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type="pty">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="isa-serial" port="0">
<model name="isa-serial"/>
</target>
<alias name="serial0"/>
</serial>
<console type="pty" tty="/dev/pts/3">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="serial" port="0"/>
<alias name="serial0"/>
</console>
<memballoon model="virtio">
<stats period="10"/>
<alias name="balloon0"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x06" function="0x0"/>
</memballoon>
</devices>
<seclabel type="dynamic" model="dac" relabel="yes">
<label>+42424:+104</label>
<imagelabel>+42424:+104</imagelabel>
</seclabel>
</domain>
volumeTemplate: |
{% set nodename = 'master-' + item|string %}
<volume>
<name>{{ nodename }}</name>
<allocation>0</allocation>
<capacity unit='G'>{{ flavors.master.rootSize }}</capacity>
<target>
<format type='qcow2'/>
</target>
</volume>
worker:
domainTemplate: |
{% set nodename = 'worker-' + item|string %}
<domain type="kvm">
<name>{{ nodename }}</name>
<uuid>{{ nodename | hash('md5') }}</uuid>
<metadata>
<vino:flavor>worker</vino:flavor>
<vino:creationTime>{{ ansible_date_time.date }}</vino:creationTime>
</metadata>
<memory unit="GiB">{{ flavors.worker.memory }}</memory>
{% if flavors.worker.hugepages is defined and flavors.worker.hugepages == true %}
<memoryBacking>
<hugepages>
<page size='1' unit='GiB' />
</hugepages>
</memoryBacking>
{% endif %}
<vcpu placement="static">{{ flavors.worker.vcpus }}</vcpu>
{% if node_core_map[nodename] is defined %}
# function to produce list of cpus, in same numa (controled by bool), state will need to be tracked via file on hypervisor host. gotpl psudo:
<cputune>
<shares>8192</shares>
{% for core in node_core_map[nodename] %}
<vcpupin vcpu="{{ loop.index0 }}" cpuset="{{ core }}"/>
{% endfor %}
<emulatorpin cpuset="{{ node_core_map[nodename]|join(',') }}"/>
</cputune>
{% endif %}
<resource>
<partition>/machine</partition>
</resource>
<os>
<type arch="x86_64" machine="pc-i440fx-xenial">hvm</type>
<boot dev="hd"/>
</os>
<features>
<acpi/>
<apic/>
</features>
<cpu mode="host-passthrough" />
<clock offset="utc">
<timer name="pit" tickpolicy="delay"/>
<timer name="rtc" tickpolicy="catchup"/>
<timer name="hpet" present="no"/>
</clock>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
# for each disk requested
<disk type='volume' device='disk'>
<driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
<source pool='vino-default' volume='{{ nodename }}'/>
<target dev='vde' bus='virtio'/>
</disk>
<controller type="usb" index="0" model="piix3-uhci">
<alias name="usb"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x2"/>
</controller>
<controller type="pci" index="0" model="pci-root">
<alias name="pci.0"/>
</controller>
<controller type="ide" index="0">
<alias name="ide"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x01" function="0x1"/>
</controller>
# for each interface defined in vino, e.g.
<interface type='bridge'>
<mac address='52:54:00:83:e9:f9'/>
<source bridge='management'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type="pty">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="isa-serial" port="0">
<model name="isa-serial"/>
</target>
<alias name="serial0"/>
</serial>
<console type="pty" tty="/dev/pts/3">
<source path="/dev/pts/3"/>
<log file="/var/lib/vino/instances/{{ nodename }}.console.log" append="off"/>
<target type="serial" port="0"/>
<alias name="serial0"/>
</console>
<memballoon model="virtio">
<stats period="10"/>
<alias name="balloon0"/>
<address type="pci" domain="0x0000" bus="0x00" slot="0x06" function="0x0"/>
</memballoon>
</devices>
<seclabel type="dynamic" model="dac" relabel="yes">
<label>+42424:+104</label>
<imagelabel>+42424:+104</imagelabel>
</seclabel>
</domain>
volumeTemplate: |
{% set nodename = 'worker-' + item|string %}
<volume>
<name>{{ nodename }}</name>
<allocation>0</allocation>
<capacity unit='G'>{{ flavors.worker.rootSize }}</capacity>
<target>
<format type='qcow2'/>
</target>
</volume>

View File

@ -0,0 +1,11 @@
flavors:
master:
vcpus: 4
memory: 4
hugepages: false
rootSize: 30
worker:
vcpus: 2
memory: 2
hugepages: false
rootSize: 10

View File

@ -6,4 +6,24 @@ configMapGenerator:
options:
disableNameSuffixHash: true
files:
- template=daemonset-template.yaml
- template=daemonset-template.yaml
- name: flavors
options:
disableNameSuffixHash: true
files:
- flavors.yaml
- name: flavor-templates
options:
disableNameSuffixHash: true
files:
- flavor-templates.yaml
- name: network-templates
options:
disableNameSuffixHash: true
files:
- network-templates.yaml
- name: storage-templates
options:
disableNameSuffixHash: true
files:
- storage-templates.yaml

View File

@ -0,0 +1,17 @@
libvirtNetworks:
- name: management
libvirtTemplate: |
<network>
<name>management</name>
<forward mode='route'/>
<bridge name='management' stp='off' delay='0'/>
<ip address='{{ networks[0].routes[0].gateway }}' netmask='255.255.240.0'>
<!-- <tftp root='/srv/tftp'/> -->
<dhcp>
<range start='{{ networks[0].allocationStart }}' end='{{ networks[0].allocationStop }}'/>
<bootp file=''/>
</dhcp>
</ip>
</network>
# - name: mobility-gn
# libvirtTemplate:

View File

@ -0,0 +1,14 @@
libvirtStorage:
- name: vino-default
libvirtTemplate: |
<pool type='dir'>
<name>vino-default</name>
<target>
<path>/var/lib/libvirt/vino</path>
<permissions>
<mode>0711</mode>
<owner>0</owner>
<group>0</group>
</permissions>
</target>
</pool>

View File

@ -2,7 +2,7 @@ apiVersion: airship.airshipit.org/v1
kind: Vino
metadata:
name: vino-test-cr
# labels: ...
labels: {}
spec:
nodeLabelKeysToCopy:
- "airshipit.org/server"
@ -11,9 +11,10 @@ spec:
matchLabels:
beta.kubernetes.io/os: linux
configuration:
cpuExclude: 0-4,54-60
redfishCredentialSecret:
name: redfishSecret
cpuExclude: 0-4,54-60
redfishCredentialSecret:
name: redfishSecret
namespace: airship-system
networks:
- name: management
subnet: 192.168.2.0/20
@ -23,7 +24,7 @@ spec:
routes:
- network: 10.0.0.0
netmask: 255.255.255.0
gateway: $vino.nodebridgegw # vino will need to populate this from the nodelabel value `airshipit.org/vino.nodebridgegw`
gateway: 192.168.2.1 # vino will need to populate this from the nodelabel value `airshipit.org/vino.nodebridgegw`
dns_servers: ["135.188.34.124"]
- name: external
subnet: 169.0.0.0/24
@ -35,16 +36,20 @@ spec:
allocationStart: 169.0.0.10
allocationStop: 169.0.0.254
macPrefix: "0A:00:00:00:00:00"
vmBridge: lo
nodes:
- name: "worker"
count: 3
- name: master
count: 1
bmhLabels:
airshipit.org/k8s-role: worker
airshipit.org/k8s-role: master
networkDataTemplate:
name: "test-template"
namespace: "default"
labels:
vmFlavor: master
# libvirtTemplate:
# name: libvirt-template-master
# namespace: vino-system
networkInterfaces:
- name: management
type: bridge
@ -63,7 +68,50 @@ spec:
bond_mode: 802.3ad
bond_xmit_hash_policy: layer3+4
bond_miimon: "100"
diskDrives:
- name: root
type: qcow2
path: /home/foobar/qemu.img
options:
sizeGb: 30
sparse: true
- name: worker
count: 4
bmhLabels:
airshipit.org/k8s-role: worker
networkDataTemplate:
name: "test-template"
namespace: "default"
labels:
vmFlavor: worker
# libvirtTemplate:
# name: libvirt-template-worker
# namespace: vino-system
networkInterfaces:
- name: management
type: bridge
network: management
mtu: 1500
options:
bridgeName: vminfra-bridge
- name: external
type: sriov-bond
network: external
mtu: 9100
options:
# this is an 'open-ended' set of k/v pairs, validation is perfomed by vino rather than crd schema.
pf: "[enp29s0f0,enp219s1f1]"
vlan: "100"
bond_mode: 802.3ad
bond_xmit_hash_policy: layer3+4
bond_miimon: "100"
diskDrives:
- name: root
type: qcow2
path: /home/foobar/qemu.img
options:
sizeGb: 10
sparse: true
bmcCredentials:
username: "admin"
password: "passw0rd"
username: admin
password: passw0rd

View File

@ -796,7 +796,7 @@ string
</em>
</td>
<td>
<p>Parameter for Node master or worker-standard</p>
<p>Parameter for Node master or worker</p>
</td>
</tr>
<tr>
@ -850,7 +850,7 @@ NamespacedName
<code>diskDrives</code><br>
<em>
<a href="#airship.airshipit.org/v1.DiskDrivesTemplate">
DiskDrivesTemplate
[]DiskDrivesTemplate
</a>
</em>
</td>

View File

@ -104,15 +104,15 @@ type VMRoutes struct {
//NodeSet node definitions
type NodeSet struct {
//Parameter for Node master or worker-standard
//Parameter for Node master or worker
Name string `json:"name,omitempty"`
Count int `json:"count,omitempty"`
// BMHLabels labels will be copied directly to BMHs that will be created
// These labels will override keys from k8s node, that are specified in vino.NodeLabelKeysToCopy
BMHLabels map[string]string `json:"bmhLabels,omitempty"`
LibvirtTemplateDefinition NamespacedName `json:"libvirtTemplate,omitempty"`
NetworkInterfaces []NetworkInterface `json:"networkInterfaces,omitempty"`
DiskDrives *DiskDrivesTemplate `json:"diskDrives,omitempty"`
BMHLabels map[string]string `json:"bmhLabels,omitempty"`
LibvirtTemplateDefinition NamespacedName `json:"libvirtTemplate,omitempty"`
NetworkInterfaces []NetworkInterface `json:"networkInterfaces,omitempty"`
DiskDrives []DiskDrivesTemplate `json:"diskDrives,omitempty"`
// NetworkDataTemplate must have a template key
NetworkDataTemplate NamespacedName `json:"networkDataTemplate,omitempty"`
}

View File

@ -324,8 +324,10 @@ func (in *NodeSet) DeepCopyInto(out *NodeSet) {
}
if in.DiskDrives != nil {
in, out := &in.DiskDrives, &out.DiskDrives
*out = new(DiskDrivesTemplate)
(*in).DeepCopyInto(*out)
*out = make([]DiskDrivesTemplate, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
out.NetworkDataTemplate = in.NetworkDataTemplate
}

View File

@ -320,7 +320,7 @@ func (r *VinoReconciler) ensureDaemonSet(ctx context.Context, vino *vinov1.Vino)
// controller should watch for changes in daemonset to reconcile if it breaks, and change status
// of the vino object
// controlleruti.SetControllerReference(vino, ds, r.scheme)
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
ctx, cancel := context.WithTimeout(ctx, time.Second*180)
defer cancel()
return r.waitDaemonSet(ctx, ds)
@ -440,7 +440,7 @@ func (r *VinoReconciler) waitDaemonSet(ctx context.Context, ds *appsv1.DaemonSet
}
logger.Info("DaemonSet is not in ready status, rechecking in 2 seconds")
}
time.Sleep(2 * time.Second)
time.Sleep(10 * time.Second)
}
}
}

View File

@ -14,7 +14,8 @@ function vinoDebugInfo () {
server_label="airshipit.org/server=s1"
rack_label="airshipit.org/rack=r1"
copyLabel="airshipit.org/k8s-role=worker"
master_copy_label="airshipit.org/k8s-role=master"
worker_copy_label="airshipit.org/k8s-role=worker"
# Label all nodes with the same rack/label. We are ok with this for this simple test.
kubectl label node --overwrite=true --all $server_label $rack_label
@ -34,7 +35,7 @@ until [[ $(kubectl get vino vino-test-cr 2>/dev/null) ]]; do
fi
sleep 2
done
if ! kubectl wait --for=condition=Ready vino vino-test-cr --timeout=180s; then
if ! kubectl wait --for=condition=Ready vino vino-test-cr --timeout=600s; then
vinoDebugInfo
fi
@ -52,11 +53,17 @@ if ! kubectl -n vino-system rollout status ds default-vino-test-cr --timeout=10s
vinoDebugInfo
fi
bmhCount=$(kubectl get baremetalhosts -n vino-system -l "$server_label,$server_label,$copyLabel" -o name | wc -l)
masterCount=$(kubectl get baremetalhosts -n vino-system -l "$server_label,$server_label,$master_copy_label" -o name | wc -l)
# with this setup set up, exactly 3 BMHs must have been created by VINO controller
# with this setup set up, exactly 1 master must have been created by VINO controller
[[ "$bmhCount" -eq "3" ]]
[[ "$masterCount" -eq "1" ]]
workerCount=$(kubectl get baremetalhosts -n vino-system -l "$server_label,$server_label,$worker_copy_label" -o name | wc -l)
# with this setup set up, exactly 4 workers must have been created by VINO controller
[[ "$workerCount" -eq "4" ]]
kubectl get baremetalhosts -n vino-system --show-labels=true