Add ansible roles for sushy and libvirt

This commit adds simple roles to manage:
 - libvirt service, domains, volumes, networks and pools.
 - redfish-emulator role installs sushy-tools from pip, together
   with support packages
Please note, that libvirt roles are not meant to be completely
idempotent, their main job is deploy temporary resources that for
ci and gating purposes, to be tore down afterwards. Roles are
specifically made to be simple to debug, and don't contain any
complex logic to make them portable, flexible or idempotent.

Change-Id: I2ff0138b5c95bea3445e242a2e5061651498f1ab
This commit is contained in:
Kostiantyn Kalynovskyi 2020-01-02 23:53:38 +00:00
parent 05b05b0c3e
commit 68157859a7
46 changed files with 1551 additions and 7 deletions

View File

@ -0,0 +1,36 @@
- hosts: primary
roles:
- role: libvirt-network
become: true
vars:
network_action: create
libvirt_network:
name: provision-network
spec:
forward:
mode: nat
nat:
port:
- start: 1024
end: 65535
bridge:
name: "prov-net-br"
stp: 'on'
delay: '0'
ip:
address: "172.22.0.1"
netmask: "255.255.255.0"
- role: libvirt-network
become: true
vars:
network_action: rebuild
libvirt_network:
name: oob-net
spec:
bridge:
name: oob-net
stp: 'on'
delay: '0'
ip:
address: "10.23.22.1"
netmask: "255.255.255.0"

View File

@ -0,0 +1,8 @@
- hosts: primary
roles:
- role: libvirt-pool
become: true
vars:
libvirt_pool:
path: /var/lib/libvirt/airship
name: airship

View File

@ -0,0 +1,32 @@
- hosts: primary
roles:
- role: libvirt-domain
become: true
vars:
libvirt_domain:
state: running
name: 'vm1'
memory_mb: 512
vcpus: 1
volumes:
- name: 'volume-1'
device: 'disk'
format: 'qcow2'
pool: 'airship'
interfaces:
- network: 'provision-network'
- role: libvirt-domain
become: true
vars:
libvirt_domain:
state: running
name: 'vm2'
memory_mb: 512
vcpus: 1
volumes:
- name: 'volume-2'
device: 'disk'
format: 'qcow2'
pool: 'airship'
interfaces:
- network: 'provision-network'

View File

@ -0,0 +1,22 @@
- hosts: primary
tasks:
- name: Create defined volumes
include_role:
name: libvirt-volume
with_items:
- name: volume-1
image: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
size: 10G
pool: airship
action: create
- name: volume-2
image: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
size: 10G
pool: airship
action: create
vars:
libvirt_volume: "{{ vol }}"
volume_action: "{{ vol.action }}"
ansible_become: true
loop_control:
loop_var: vol

View File

@ -0,0 +1,4 @@
---
- hosts: primary
roles:
- role: libvirt-install

View File

@ -0,0 +1,3 @@
- hosts: primary
roles:
- role: redfish-emulator

View File

@ -0,0 +1,173 @@
---
# The default directory in which to store VM console logs, if a VM-specific log
# file path is not given.
libvirt_vm_default_console_log_dir: "/var/log/libvirt-consoles/"
# The default location for libvirt images
libvirt_volume_default_images_path: '/var/lib/libvirt/images'
# Default type for Libvirt volumes
libvirt_volume_default_type: volume
# The default format for Libvirt volumes.
libvirt_volume_default_format: qcow2
# The default device for Libvirt volumes.
libvirt_volume_default_device: disk
# CPU architecture.
libvirt_vm_arch: x86_64
# Virtualisation engine. If not set, the role will attempt to auto-detect the
# optimal engine to use.
libvirt_vm_engine:
# Path to emulator binary. If not set, the role will attempt to auto-detect the
# correct emulator to use.
libvirt_vm_emulator:
# Default value for clock syncing. The default (false) uses <clock sync="localtime">
# to configure the instances clock synchronisation. Change to a timezone to make
# configuration use <clock offset="specified offset">
libvirt_vm_clock_offset: False
# A list of specifications of VMs to be created.
# For backwards compatibility, libvirt_vms defaults to a singleton list using
# the values of the deprecated variables below.
# See README.md or tasks/main.yml for these attributes' defaults.
libvirt_domain:
# State of the VM. May be 'present' or 'absent'.
state: "{{ libvirt_vm_state }}"
# Name of the VM.
name: "{{ libvirt_vm_name }}"
# Memory in MB.
memory_mb: "{{ libvirt_vm_memory_mb }}"
# Number of vCPUs.
vcpus: "{{ libvirt_vm_vcpus }}"
# Virtual machine type.
machine: "{{ libvirt_vm_machine }}"
# Virtual machine CPU mode.
cpu_mode: "{{ libvirt_vm_cpu_mode | default(libvirt_cpu_mode_default, true) }}"
# List of volumes.
volumes: "{{ libvirt_vm_volumes }}"
# What time should the clock be synced to on boot (utc/localtime/timezone/variable)
clock_offset: "localtime"
# List of network interfaces.
interfaces: "{{ libvirt_vm_interfaces }}"
# Path to console log file.
console_log_path: "{{ libvirt_vm_console_log_path }}"
# XML template file to source domain definition
xml_file: vm.xml.j2
# Variables to add to the enviroment that is used to execute virsh commands
libvirt_vm_virsh_default_env: "{{ { 'LIBVIRT_DEFAULT_URI': libvirt_vm_uri } if libvirt_vm_uri else {} }}"
# Override for the libvirt connection uri. Leave unset to use the default.
libvirt_vm_uri: ""
# Default CPU mode if libvirt_vm_cpu_mode or vm.cpu_mode is undefined
libvirt_cpu_mode_default: "{{ 'host-passthrough' if libvirt_vm_engine == 'kvm' else 'host-model' }}"
libvirt_domain_template_default: |
<domain type='{{ libvirt_vm_engine }}'>
<name>{{ libvirt_domain.name }}</name>
<memory>{{ libvirt_domain.memory_mb | int * 1024 }}</memory>
<vcpu>{{ libvirt_domain.vcpus }}</vcpu>
{% if libvirt_domain.clock_offset |default( libvirt_vm_clock_offset ) %}
<clock offset="{{ libvirt_domain.clock_offset }}"/>
{% else %}
<clock sync="localtime"/>
{% endif %}
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<os>
<type arch='{{ libvirt_vm_arch }}'{% if machine is not none %} machine='{{ machine }}'{% endif %}>hvm</type>
<bootmenu enable='no'/>
<boot dev='hd'/>
<boot dev='cdrom'/>
<boot dev='network'/>
<bios useserial='yes'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
{% if cpu_mode %}
<cpu mode='{{ cpu_mode }}'>
<model fallback='allow'/>
</cpu>
{% endif %}
<devices>
<emulator>{{ libvirt_vm_emulator }}</emulator>
{% for volume in volumes %}
<disk type='{{ volume.type | default(libvirt_volume_default_type) }}' device='{{ volume.device | default(libvirt_volume_default_device) }}'>
<driver name='qemu' type='{{ volume.format | default(libvirt_volume_default_format) }}'/>
{% if volume.type | default(libvirt_volume_default_type) == 'file' %}
<source file='{{ volume.file_path |default(libvirt_volume_default_images_path) }}/{{ volume.name}}'/>
{% else %}
<source pool='{{ volume.pool }}' volume='{{ volume.name }}'/>
{% endif %}
{% if volume.target is undefined %}
<target dev='vd{{ 'abcdefghijklmnopqrstuvwxyz'[loop.index - 1] }}'/>
{% else %}
<target dev='{{ volume.target }}' />
{% endif %}
</disk>
{% endfor %}
{% for interface in interfaces %}
{% if interface.type is defined and interface.type == 'direct' %}
<interface type='direct'>
<source dev='{{ interface.source.dev }}' mode='{{ interface.source.mode | default('vepa') }}'/>
{% elif interface.type is defined and interface.type == 'bridge' %}
<interface type='bridge'>
<source bridge='{{ interface.source.dev }}'/>
{% elif interface.type is not defined or interface.type == 'network' %}
<interface type='network'>
<source network='{{ interface.network }}'/>
{% endif %}
{% if interface.mac is defined %}
<mac address='{{ interface.mac }}'/>
{% endif %}
{# if the network configuration is invalid this can still appear in the xml #}
{# (say you enter 'bond' instead of 'bridge' in your variables) #}
<model type='virtio'/>
</interface>
{% endfor %}
{% if console_log_enabled | bool %}
<serial type='file'>
<source path='{{ console_log_path }}'/>
</serial>
<serial type='pty'/>
<console type='file'>
<source path='{{ console_log_path }}'/>
<target type='serial'/>
</console>
{% else %}
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
{% endif %}
{% if enable_vnc |bool %}
<graphics type='vnc' autoport='yes' listen='127.0.0.1'>
<listen type='address' address='127.0.0.1'/>
</graphics>
{% endif %}
<rng model="virtio"><backend model="random">/dev/urandom</backend></rng>
</devices>
</domain>

View File

@ -0,0 +1,65 @@
---
- name: Detect the virtualisation engine
block:
- name: Load the kvm kernel module
modprobe:
name: kvm
become: true
failed_when: false
- name: Check for the KVM device
stat:
path: /dev/kvm
register: stat_kvm
- name: Set a fact containing the virtualisation engine
set_fact:
libvirt_vm_engine: >-
{%- if ansible_architecture != libvirt_vm_arch -%}
{# Virtualisation instructions are generally available only for the host
architecture. Ideally we would test for virtualisation instructions, eg. vt-d
as it is possible that another architecture could support these even
if the emulated cpu architecture is not the same. #}
qemu
{%- elif stat_kvm.stat.exists -%}
kvm
{%- else -%}
qemu
{%- endif -%}
when: libvirt_vm_engine is none or libvirt_vm_engine | length == 0
- name: Detect the virtualisation emulator
block:
- block:
- name: Detect the KVM emulator binary path
stat:
path: "{{ item }}"
register: kvm_emulator_result
with_items:
- /usr/bin/kvm
- /usr/bin/qemu-kvm
- /usr/libexec/qemu-kvm
- name: Set a fact containing the KVM emulator binary path
set_fact:
libvirt_vm_emulator: "{{ item.item }}"
with_items: "{{ kvm_emulator_result.results }}"
when: item.stat.exists
when: libvirt_vm_engine == 'kvm'
- block:
- name: Detect the QEMU emulator binary path
shell: which qemu-system-{{ libvirt_vm_arch }}
register: qemu_emulator_result
changed_when: false
- name: Set a fact containing the QEMU emulator binary path
set_fact:
libvirt_vm_emulator: "{{ qemu_emulator_result.stdout }}"
when: libvirt_vm_engine == 'qemu'
- name: Fail if unable to detect the emulator
fail:
msg: Unable to detect emulator for engine {{ libvirt_vm_engine }}.
when: libvirt_vm_emulator is none
when: libvirt_vm_emulator is none or libvirt_vm_emulator | length == 0

View File

@ -0,0 +1,21 @@
---
- name: Check network interface has a network name
fail:
msg: >
The interface definition {{ interface }} has type 'network', but does not have
a network name defined.
when:
- interface.type is not defined or
interface.type == 'network'
- interface.network is not defined
- name: Check direct interface has an interface device name
fail:
msg: >
The interface definition {{ interface }} has type 'direct', but does not have
a host source device defined.
when:
- interface.type is defined
- interface.type == 'direct'
- interface.source is not defined or
interface.source.dev is not defined

View File

@ -0,0 +1,28 @@
---
- name: Ensure the VM console log directory exists
file:
path: "{{ libvirt_domain.console_log_path | dirname }}"
state: directory
owner: "{{ libvirt_domain.libvirt_vm_log_owner }}"
group: "{{ libvirt_domain.libvirt_vm_log_owner }}"
recurse: true
mode: 0770
when: "libvirt_domain.console_log_enabled | default('false') | bool"
- name: Validate VM interfaces
include_tasks: check-interface.yml
vars:
interface: "{{ item }}"
with_items: "{{ libvirt_domain.interfaces }}"
- name: Ensure the VM is defined
virt:
name: "{{ libvirt_domain.name }}"
command: define
xml: "{{ libvirt_domain.xml | default(libvirt_domain_template_default) }}"
- name: Ensure the VM is started at boot
virt:
name: "{{ libvirt_domain.name }}"
autostart: "{{ libvirt_domain.autostart | default(false) }}"
state: "{{ libvirt_domain.state | default('running') }}"

View File

@ -0,0 +1,16 @@
- include_tasks: autodetect.yml
- include_tasks: domain.yml
vars:
console_log_enabled: "{{ libvirt_domain.console_log_enabled | default(false) }}"
console_log_path: >-
{{ libvirt_domain.console_log_path |
default(libvirt_vm_default_console_log_dir + '/' + libvirt_domain.name + '-console.log', true) }}
machine_default: "{{ none if libvirt_vm_engine == 'kvm' else 'pc-1.0' }}"
machine: "{{ libvirt_domain.machine | default(machine_default, true) }}"
cpu_mode: "{{ libvirt_domain.cpu_mode | default(libvirt_cpu_mode_default) }}"
volumes: "{{ libvirt_domain.volumes | default([], true) }}"
interfaces: "{{ libvirt_domain.interfaces | default([], true) }}"
start: "{{ libvirt_domain.start | default(true) }}"
autostart: "{{ libvirt_domain.autostart | default(true) }}"
enable_vnc: "{{ libvirt_domain.enable_vnc | default(false) }}"

View File

@ -0,0 +1,91 @@
<domain type='{{ libvirt_vm_engine }}'>
<name>{{ libvirt_domain.name }}</name>
<memory>{{ libvirt_domain.memory_mb | int * 1024 }}</memory>
<vcpu>{{ libvirt_domain.vcpus }}</vcpu>
{% if libvirt_domain.clock_offset |default( libvirt_vm_clock_offset ) %}
<clock offset="{{ libvirt_domain.clock_offset }}"/>
{% else %}
<clock sync="localtime"/>
{% endif %}
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<os>
<type arch='{{ libvirt_vm_arch }}'{% if machine is not none %} machine='{{ machine }}'{% endif %}>hvm</type>
<bootmenu enable='no'/>
<boot dev='hd'/>
<boot dev='cdrom'/>
<boot dev='network'/>
<bios useserial='yes'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
{% if cpu_mode %}
<cpu mode='{{ cpu_mode }}'>
<model fallback='allow'/>
</cpu>
{% endif %}
<devices>
<emulator>{{ libvirt_vm_emulator }}</emulator>
{% for volume in volumes %}
<disk type='{{ volume.type | default(libvirt_volume_default_type) }}' device='{{ volume.device | default(libvirt_volume_default_device) }}'>
<driver name='qemu' type='{{ volume.format | default(libvirt_volume_default_format) }}'/>
{% if volume.type | default(libvirt_volume_default_type) == 'file' %}
<source file='{{ volume.file_path |default(libvirt_volume_default_images_path) }}/{{ volume.name}}'/>
{% else %}
<source pool='{{ volume.pool }}' volume='{{ volume.name }}'/>
{% endif %}
{% if volume.target is undefined %}
<target dev='vd{{ 'abcdefghijklmnopqrstuvwxyz'[loop.index - 1] }}'/>
{% else %}
<target dev='{{ volume.target }}' />
{% endif %}
</disk>
{% endfor %}
{% for interface in interfaces %}
{% if interface.type is defined and interface.type == 'direct' %}
<interface type='direct'>
<source dev='{{ interface.source.dev }}' mode='{{ interface.source.mode | default('vepa') }}'/>
{% elif interface.type is defined and interface.type == 'bridge' %}
<interface type='bridge'>
<source bridge='{{ interface.source.dev }}'/>
{% elif interface.type is not defined or interface.type == 'network' %}
<interface type='network'>
<source network='{{ interface.network }}'/>
{% endif %}
{% if interface.mac is defined %}
<mac address='{{ interface.mac }}'/>
{% endif %}
{# if the network configuration is invalid this can still appear in the xml #}
{# (say you enter 'bond' instead of 'bridge' in your variables) #}
<model type='virtio'/>
</interface>
{% endfor %}
{% if console_log_enabled | bool %}
<serial type='file'>
<source path='{{ console_log_path }}'/>
</serial>
<serial type='pty'/>
<console type='file'>
<source path='{{ console_log_path }}'/>
<target type='serial'/>
</console>
{% else %}
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
{% endif %}
{% if enable_vnc |bool %}
<graphics type='vnc' autoport='yes' listen='127.0.0.1'>
<listen type='address' address='127.0.0.1'/>
</graphics>
{% endif %}
<rng model="virtio"><backend model="random">/dev/urandom</backend></rng>
</devices>
</domain>

View File

@ -0,0 +1,39 @@
- name: Include test variables.
include_vars:
file: vars.yml
- name: install libvirt
include_role:
name: libvirt-install
- name: create pool
include_role:
name: libvirt-pool
vars:
ansible_become: true
- name: Create defined volumes
include_role:
name: libvirt-volume
with_items: "{{ libvirt_volumes }}"
vars:
libvirt_volume: "{{ vol }}"
volume_action: "{{ vol.action }}"
ansible_become: true
loop_control:
loop_var: vol
- name: create libvirt domains
include_role:
name: libvirt-domain
vars:
ansible_become: true
- name: save information about domain
virt:
command: info
name: "{{ libvirt_domain.name }}"
register: domain_info
become: true
- name: debug domain-info
debug:
var: domain_info
- name: make sure that vm is in correct state
assert:
that:
- domain_info[libvirt_domain.name].state == libvirt_domain.state

View File

@ -0,0 +1,44 @@
libvirt_pool:
path: /var/lib/libvirt/airship
name: airship
libvirt_volumes:
- name: volume-1
image: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
size: 10G
pool: "{{ libvirt_pool.name }}"
action: create
- name: volume-2
size: 10G
pool: "{{ libvirt_pool.name }}"
action: create
libvirt_domain:
state: running
name: 'vm1'
memory_mb: 2048
vcpus: 1
volumes:
- name: 'volume-1'
device: 'disk'
format: 'qcow2'
pool: 'airship'
interfaces:
- network: 'provision-network'
libvirt_network:
name: provision-network
spec:
forward:
mode: nat
nat:
port:
- start: 1024
end: 65535
bridge:
name: "prov-net-br"
stp: 'on'
delay: '0'
ip:
address: "172.22.0.1"
netmask: "255.255.255.0"

View File

@ -0,0 +1,38 @@
---
- block:
- name: Ensuring Libvirt, Qemu and support packages are present
become: true
when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'
yum:
name:
- libguestfs-tools
- libvirt
- libvirt-devel
- libvirt-daemon-kvm
- qemu-kvm
- virt-install
state: present
- name: Ensuring Libvirt, Qemu and support packages are present
become: true
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
apt:
name:
- qemu
- libvirt-bin
- libguestfs-tools
- qemu-kvm
- virtinst
- python-lxml
- python3-lxml
- python3-libvirt
- python-libvirt
- dnsmasq
- ebtables
state: present
- name: Start libvirtd
service:
name: libvirtd
state: started
enabled: true
become: true

View File

@ -0,0 +1,153 @@
# libvirt_network:
# name: provision-network
# spec:
# forward:
# mode: nat
# nat:
# port:
# - start: 1024
# end: 65535
# bridge:
# name: "prov-net-br"
# stp: 'on'
# delay: '0'
# ip:
# address: "172.22.0.1"
# netmask: "255.255.255.0"
# libvirt_network:
# name: "{{ bm_net_name }}"
# persistent: true
# autostart: true
# spec:
# forward:
# mode: nat
# nat:
# port:
# - start: 1024
# end: 65535
# bridge:
# name: "{{ bm_net_name }}"
# stp: 'on'
# delay: '0'
# domain:
# name: 'tests.baremetal.net'
# localOnly: 'yes'
# dns:
# - forwarder:
# domain: 'apps.tests.baremetal.net'
# addr: '127.0.0.1'
# - forwarder:
# domain: 'services.tests.baremetal.net'
# addr: '127.0.0.1'
# ip: "{{ bm_net_0_ip_cfg }}"
libvirt_network_template_default: |
<network>
<name>{{ net_yaml.name }}</name>
{% if net_yaml.forward is defined %}
{% if net_yaml.forward.mode is defined %}
<forward mode='{{ net_yaml.forward.mode }}'>
{% else %}
<forward>
{% endif %}
{% if net_yaml.forward.nat is defined %}
<nat>
{% if net_yaml.forward.nat.port is defined %}
{% for port in net_yaml.forward.nat.port %}
<port start='{{ port.start | string }}' end='{{ port.end | string }}'/>
{% endfor %}
{% endif %}
</nat>
{% endif %}
</forward>
{% endif %}
{% if net_yaml.bridge is defined %}
<bridge
{% if net_yaml.bridge.name is defined %}
name='{{ net_yaml.bridge.name }}'
{% endif %}
{% if net_yaml.bridge.stp is defined %}
stp='{{ net_yaml.bridge.stp | string }}'
{% endif %}
{% if net_yaml.bridge.delay is defined %}
delay='{{ net_yaml.bridge.delay | string }}'
{% endif %}
/>
{% endif %}
{% if net_yaml.mac is defined %}
<mac
{% if net_yaml.mac.address is defined %}
address='{{ net_yaml.mac.address }}'
{% endif %}
/>
{% endif %}
{% if net_yaml.domain is defined %}
<domain
{% if net_yaml.domain.name is defined %}
name='{{ net_yaml.domain.name }}'
{% endif %}
{% if net_yaml.domain.localOnly is defined %}
localOnly='{{ net_yaml.domain.localOnly | string }}'
{% endif %}
/>
{% endif %}
{% if net_yaml.dns is defined %}
<dns>
{% if net_yaml.dns | list %}
{% for dns_item in net_yaml.dns %}
{% if dns_item.forwarder is defined %}
<forwarder
{% if dns_item.forwarder.domain is defined %}
domain='{{ dns_item.forwarder.domain }}'
{% endif %}
{% if dns_item.forwarder.addr is defined %}
addr='{{ dns_item.forwarder.addr }}'
{% endif %}
/>
{% endif %}
{% endfor %}
{% endif %}
</dns>
{% endif %}
{% if net_yaml.ip is defined %}
<ip
{% if net_yaml.ip.address is defined %}
address='{{ net_yaml.ip.address }}'
{% endif %}
{% if net_yaml.ip.netmask is defined %}
netmask='{{ net_yaml.ip.netmask }}'
{% endif %}
>
{% if net_yaml.ip.dhcp is defined %}
<dhcp>
{% for dhcp_item in net_yaml.ip.dhcp %}
{% if dhcp_item.range is defined %}
<range
{% if dhcp_item.range.start is defined %}
start='{{ dhcp_item.range.start }}'
{% endif %}
{% if dhcp_item.range.end is defined %}
end='{{ dhcp_item.range.end }}'
{% endif %}
/>
{% endif %}
{% if dhcp_item.host is defined %}
<host
{% if dhcp_item.host.mac is defined %}
mac='{{ dhcp_item.host.mac }}'
{% endif %}
{% if dhcp_item.host.name is defined %}
name='{{ dhcp_item.host.name }}'
{% endif %}
{% if dhcp_item.host.ip is defined %}
ip='{{ dhcp_item.host.ip }}'
{% endif %}
/>
{% endif %}
{% endfor %}
</dhcp>
{% endif %}
</ip>
{% endif %}
</network>

View File

@ -0,0 +1,32 @@
# Description:
# Add given hosts to existing libvirt network
#
# Inputs:
# network_action: "add_dhcp_hosts"
# network_args:
# name: <name of network>
# hosts:
# - name:
# mac:
# ip:
# - name:
# mac:
# ip:
- name: Validate input
assert:
that:
- "network_args is defined"
- "network_args.name is defined"
- "network_args.hosts is defined"
- "network_args.hosts | list"
- name: add dhcp hosts to network
shell: >-
virsh net-update {{ network_args.name }} \
add --section ip-dhcp-host \
--xml "<host mac='{{ single_dhcp_host.mac}}' ip='{{ single_dhcp_host.name }}'/>" \
--config --live
loop: "{{ network_args.hosts }}"
loop_control:
loop_var: single_dhcp_host

View File

@ -0,0 +1,73 @@
# Description:
# Creates a libvirt network. libvirt_network are
# exactly converted to XML from YAML so there
# is no validation whether the arguments are
# correct or not. Caller must ensure that yaml
# is formulated correctly.
#
# Inputs:
# network_action: "create"
# libvirt_network:
# name: <name of network>
# persistent: <boolean>
# autostart: <boolean>
# recreate: <boolean>
# spec:
# forward:
# mode:
# nat:
# port:
# - start:
# end:
# bridge:
# name:
# stp:
# delay:
# domain:
# name:
# localOnly:
# dns:
# forwarder:
# domain:
# addr:
# mac:
# address:
# ip:
# address:
# netmask:
# dhcp:
# - range:
# start:
# end:
- name: Validate input
assert:
that:
- "libvirt_network is defined"
- "libvirt_network.name is defined"
- "libvirt_network.spec is defined"
- name: Create yaml for template
set_fact:
net_yaml: >-
{{
libvirt_network.spec
| combine({'name': libvirt_network.name}, recursive=True)
}}
- name: "Define network"
virt_net:
command: define
# If libvirt_network.xml is defined, spec will be ignored.
xml: "{{ libvirt_network.xml | default(libvirt_network_template_default) }}"
name: "{{ libvirt_network.name }}"
- name: "Start network"
virt_net:
state: active
name: "{{ libvirt_network.name }}"
- name: "Autostart network"
virt_net:
name: "{{ libvirt_network.name }}"
autostart: "{{ libvirt_network.autostart |default(true) }}"

View File

@ -0,0 +1 @@
- include_tasks: "{{ network_action }}.yml"

View File

@ -0,0 +1,17 @@
- name: "Remove network"
virt_net:
state: absent
name: "{{ libvirt_network.name }}"
- name: Create yaml for template
set_fact:
net_yaml: >-
{{
libvirt_network.spec
| combine({'name': libvirt_network.name}, recursive=True)
}}
- name: "create network"
include_tasks: "{{ network_action }}.yml"
vars:
network_action: create

View File

@ -0,0 +1,110 @@
<network>
<name>{{ net_yaml.name }}</name>
{% if net_yaml.forward is defined %}
{% if net_yaml.forward.mode is defined %}
<forward mode='{{ net_yaml.forward.mode }}'>
{% else %}
<forward>
{% endif %}
{% if net_yaml.forward.nat is defined %}
<nat>
{% if net_yaml.forward.nat.port is defined %}
{% for port in net_yaml.forward.nat.port %}
<port start='{{ port.start | string }}' end='{{ port.end | string }}'/>
{% endfor %}
{% endif %}
</nat>
{% endif %}
</forward>
{% endif %}
{% if net_yaml.bridge is defined %}
<bridge
{% if net_yaml.bridge.name is defined %}
name='{{ net_yaml.bridge.name }}'
{% endif %}
{% if net_yaml.bridge.stp is defined %}
stp='{{ net_yaml.bridge.stp | string }}'
{% endif %}
{% if net_yaml.bridge.delay is defined %}
delay='{{ net_yaml.bridge.delay | string }}'
{% endif %}
/>
{% endif %}
{% if net_yaml.mac is defined %}
<mac
{% if net_yaml.mac.address is defined %}
address='{{ net_yaml.mac.address }}'
{% endif %}
/>
{% endif %}
{% if net_yaml.domain is defined %}
<domain
{% if net_yaml.domain.name is defined %}
name='{{ net_yaml.domain.name }}'
{% endif %}
{% if net_yaml.domain.localOnly is defined %}
localOnly='{{ net_yaml.domain.localOnly | string }}'
{% endif %}
/>
{% endif %}
{% if net_yaml.dns is defined %}
<dns>
{% if net_yaml.dns | list %}
{% for dns_item in net_yaml.dns %}
{% if dns_item.forwarder is defined %}
<forwarder
{% if dns_item.forwarder.domain is defined %}
domain='{{ dns_item.forwarder.domain }}'
{% endif %}
{% if dns_item.forwarder.addr is defined %}
addr='{{ dns_item.forwarder.addr }}'
{% endif %}
/>
{% endif %}
{% endfor %}
{% endif %}
</dns>
{% endif %}
{% if net_yaml.ip is defined %}
<ip
{% if net_yaml.ip.address is defined %}
address='{{ net_yaml.ip.address }}'
{% endif %}
{% if net_yaml.ip.netmask is defined %}
netmask='{{ net_yaml.ip.netmask }}'
{% endif %}
>
{% if net_yaml.ip.dhcp is defined %}
<dhcp>
{% for dhcp_item in net_yaml.ip.dhcp %}
{% if dhcp_item.range is defined %}
<range
{% if dhcp_item.range.start is defined %}
start='{{ dhcp_item.range.start }}'
{% endif %}
{% if dhcp_item.range.end is defined %}
end='{{ dhcp_item.range.end }}'
{% endif %}
/>
{% endif %}
{% if dhcp_item.host is defined %}
<host
{% if dhcp_item.host.mac is defined %}
mac='{{ dhcp_item.host.mac }}'
{% endif %}
{% if dhcp_item.host.name is defined %}
name='{{ dhcp_item.host.name }}'
{% endif %}
{% if dhcp_item.host.ip is defined %}
ip='{{ dhcp_item.host.ip }}'
{% endif %}
/>
{% endif %}
{% endfor %}
</dhcp>
{% endif %}
</ip>
{% endif %}
</network>

View File

@ -0,0 +1,139 @@
- name: Include test variables.
include_vars:
file: vars.yml
- name: install libvirt
include_role:
name: libvirt-install
- name: create networks
include_role:
name: libvirt-network
with_items: "{{ libvirt_networks }}"
loop_control:
loop_var: libvirt_network
vars:
ansible_become: true
network_action: "{{ libvirt_network.network_action }}"
- name: install required packages
apt:
name:
- bridge-utils
state: present
become: true
- name: gather network info
virt_net:
command: info
register: libvirt_networks_info
become: true
- name: debug network list
debug:
var: libvirt_networks_info
- name: check if network is present
assert:
that:
- "'oob-net' in libvirt_networks_info.networks"
- "'provision-network' in libvirt_networks_info.networks"
## this is needed because dashes '-', are not proccessed in expected way to ansible
- name: Assign networks to separate variables
set_fact:
oob_net: "{{ libvirt_networks_info.networks['oob-net'] }}"
provision_network: "{{ libvirt_networks_info.networks['provision-network'] }}"
- name: Verify oob network is in correct state
assert:
that:
- "oob_net.autostart == 'no'"
- "oob_net.bridge == 'oob-net'"
- "oob_net.state == 'active'"
- name: register ip address of the oob-net interface
command: ip -4 a show dev oob-net
register: oob_net_device
changed_when: false
- name: debug oob-net interface
debug:
var: oob_net_device.stdout
- name: verify oob-net bridge has correct address
assert:
that: "'10.23.22.1/24' in oob_net_device.stdout"
- name: Verify provision-network is in correct state
assert:
that:
- "provision_network.autostart == 'yes'"
- "provision_network.bridge == 'prov-net-br'"
- "provision_network.state == 'active'"
- "provision_network.forward_mode == 'nat'"
- name: register ip address of the oob-net interface
command: ip -4 a show dev prov-net-br
register: prov_net_br_device
changed_when: false
- name: debug prov-net-br interface
debug:
var: prov_net_br_device.stdout
- name: verify provision-network bridge has correct address
assert:
that: "'172.22.0.1/24' in prov_net_br_device.stdout"
- name: Create virtual ethernet interface
command: ip link add name air02 type veth peer name air01
become: true
changed_when:
- "create_veth_command.rc != 2"
- "'RTNETLINK answers: File exists' not in (create_veth_command.stderr | default(''))"
register: create_veth_command
failed_when:
- "create_veth_command.rc != 0"
- "'RTNETLINK answers: File exists' not in (create_veth_command.stderr | default(''))"
- name: set interface up
become: true
command: ip link set up dev air02
# This makes task never report to be changed, it is a workaround
# because if device is already up there is no command output or different RC
changed_when: false
- name: set interface up
become: true
command: ip link set up dev air01
# This makes task never report to be changed, it is a workaround
# because if device is already up there is no command output or different RC
changed_when: false
- name: set interface already in bridge variable
set_fact:
already_in_bridge: device air02 is already a member of a bridge; can't enslave it to bridge oob-net.
- name: Add interface to libvirt managed linux bridge with dhcp
become: true
command: brctl addif oob-net air02
changed_when:
- add_if_command.rc != 1
- already_in_bridge not in (dd_if_command.stderr | default(''))
failed_when:
- add_if_command.rc != 0
- already_in_bridge not in add_if_command.stderr | default('')
register: add_if_command
- name: send dhcp request over the interface
become: true
command: timeout 20s dhclient air01
changed_when: false
- name: register ip address of the air01 interface
command: ip -4 a show dev air01
register: air01_device
changed_when: false
## this simple test checks if ip address is present in interface description
## TODO filter out the address, derive subnet and compare to expected subnet
- name: verify air02 interface has address in correct network
assert:
that:
- "'10.23.22.' in air01_device.stdout"

View File

@ -0,0 +1,32 @@
libvirt_networks:
- network_action: create
autostart: false
name: oob-net
spec:
bridge:
name: oob-net
stp: 'on'
delay: '0'
ip:
address: "10.23.22.1"
netmask: "255.255.255.0"
dhcp:
- range:
start: 10.23.22.100
end: 10.23.22.199
- network_action: create
name: provision-network
spec:
forward:
mode: nat
nat:
port:
- start: 1024
end: 65535
bridge:
name: "prov-net-br"
stp: 'on'
delay: '0'
ip:
address: "172.22.0.1"
netmask: "255.255.255.0"

View File

@ -0,0 +1,14 @@
libvirt_pool:
name: airship
path: "/var/lib/airship"
pool_action: create
libvirt_pool_template_default: |
<pool type="dir">
<name>{{ libvirt_pool.name }}</name>
{% if 'capacity' in libvirt_pool %}
<capacity>{{ libvirt_pool.capacity }}</capacity>
{% endif %}
<target>
<path>{{ libvirt_pool.path | default('placeholder_value') }}</path>
</target>
</pool>

View File

@ -0,0 +1,24 @@
---
- name: Ensure libvirt dir storage pool directories exist
file:
path: "{{ libvirt_pool.path }}"
owner: "{{ libvirt_pool.owner | default(omit) }}"
group: "{{ libvirt_pool.group | default(omit) }}"
mode: "{{ libvirt_pool.mode | default(omit) }}"
state: directory
- name: Ensure libvirt storage pools are defined
virt_pool:
name: "{{ libvirt_pool.name }}"
command: define
xml: "{{ libvirt_pool.xml | default(libvirt_pool_template_default) }}"
- name: Ensure libvirt storage pools are active
virt_pool:
name: "{{ libvirt_pool.name }}"
state: active
- name: Ensure libvirt storage pools are started on boot
virt_pool:
name: "{{ libvirt_pool.name }}"
autostart: yes

View File

@ -0,0 +1 @@
- include_tasks: "{{ pool_action }}.yml"

View File

@ -0,0 +1,9 @@
<pool type="dir">
<name>{{ libvirt_pool.name }}</name>
{% if 'capacity' in libvirt_pool %}
<capacity>{{ libvirt_pool.capacity }}</capacity>
{% endif %}
<target>
<path>{{ libvirt_pool.path | default('placeholder_value') }}</path>
</target>
</pool>

View File

@ -0,0 +1,22 @@
- name: Include test variables.
include_vars:
file: vars.yml
- name: install libvirt
include_role:
name: libvirt-install
- name: create pool
include_role:
name: libvirt-pool
vars:
ansible_become: true
- name: get pool information
virt_pool:
command: info
become: true
register: storage_pools
- name: check if pool is available and is at given directory
assert:
that:
- "storage_pools.pools.test_pool.path == '/var/lib/libvirt/my-pool'"
- "storage_pools.pools.test_pool.status == 'running'"

View File

@ -0,0 +1,3 @@
libvirt_pool:
path: /var/lib/libvirt/my-pool
name: test_pool

View File

@ -0,0 +1,5 @@
libvirt_remote_scheme_list:
- http
- https
libvirt_image_cleanup_cache: false
libvirt_image_cache_path: /tmp/airship

View File

@ -0,0 +1,4 @@
- name: Clean up cache directory
file:
path: "{{ libvirt_image_cache_path }}"
state: absent

View File

@ -0,0 +1,58 @@
- name: Get Scheme
set_fact:
image_scheme: "{{ libvirt_volume.image | urlsplit('scheme') }}"
when: "libvirt_volume.image is defined"
- name: Get Scheme
set_fact:
image_dest: "{{ libvirt_image_cache_path }}/{{ libvirt_volume.image | basename }}"
when: "libvirt_volume.image is defined"
- name: Ensure cache directories exist
file:
path: "{{ libvirt_image_cache_path }}"
state: directory
- name: Ensure remote images are downloaded
get_url:
url: "{{ libvirt_volume.image }}"
dest: "{{ image_dest }}"
checksum: "{{ libvirt_volume.checksum | default(omit) }}"
when: "image_scheme in libvirt_remote_scheme_list and libvirt_volume.image is defined"
- name: Ensure local images are copied
copy:
src: "{{ libvirt_volume.image }}"
dest: "{{ image_dest }}"
when: "image_scheme not in libvirt_remote_scheme_list and libvirt_volume.image is defined"
- name: "Create volume"
command: >-
virsh vol-create-as "{{ libvirt_volume.pool }}" \
--name "{{ libvirt_volume.name }}" \
--capacity "{{ libvirt_volume.size }}" \
--format "{{ libvirt_volume.format | default('qcow2') }}"
register: libvirt_create_volume
failed_when:
- "libvirt_create_volume.rc != 0"
- "'Failed to create vol' in libvirt_create_volume.stdout"
- "'exists already' not in libvirt_create_volume.stdout"
changed_when:
- "libvirt_create_volume.rc != 1"
- "'exists already' not in libvirt_create_volume.stdout"
- name: "Upload volume from downloaded image"
command: >-
virsh vol-upload --pool "{{ libvirt_volume.pool }}" --vol "{{ libvirt_volume.name }}" --file "{{ image_dest }}"
when:
- "libvirt_volume.image is defined"
- "libvirt_create_volume.rc == 0"
- name: "Resize volume after uploading from image"
command: >-
virsh vol-resize --vol "{{ libvirt_volume.name }}" --pool "{{ libvirt_volume.pool }}" --capacity "{{ libvirt_volume.size }}"
when:
- "libvirt_create_volume.rc == 0"
- "libvirt_volume.image is defined"

View File

@ -0,0 +1 @@
- include_tasks: "{{ volume_action }}.yml"

View File

@ -0,0 +1,33 @@
- name: Include test variables.
include_vars:
file: vars.yml
- name: install libvirt
include_role:
name: libvirt-install
- name: create pool
include_role:
name: libvirt-pool
vars:
ansible_become: true
- name: Create defined volumes
include_role:
name: libvirt-volume
with_items: "{{ libvirt_volumes }}"
vars:
libvirt_volume: "{{ vol }}"
volume_action: "{{ vol.action }}"
ansible_become: true
loop_control:
loop_var: vol
- name: save volume list
command: virsh vol-list --pool {{ libvirt_pool.name }}
register: libvirt_pool_list
changed_when: false
become: true
- name: verify volumes exist
assert:
that:
- "vol.name in libvirt_pool_list.stdout"
with_items: "{{ libvirt_volumes }}"
loop_control:
loop_var: vol

View File

@ -0,0 +1,14 @@
libvirt_pool:
path: /var/lib/libvirt/airship
name: airship
libvirt_volumes:
- name: volume-1
image: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
size: 10G
pool: "{{ libvirt_pool.name }}"
action: create
- name: volume-2
size: 10G
pool: "{{ libvirt_pool.name }}"
action: create

View File

@ -0,0 +1,3 @@
redfish_action: install
redfish_emulator_bind_ip: 127.0.0.1
redfish_emulator_bind_port: 8000

View File

@ -0,0 +1,11 @@
- name: reload systemd configuration
become: yes
systemd:
daemon_reload: yes
- name: restart sushy-emulator
become: yes
service:
name: sushy-tools
state: restarted
enabled: true

View File

@ -0,0 +1,37 @@
- block:
- name: Ensuring python3-pip and support packages are present
when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'
fail:
msg: "CentoOS or RHEL is not currently supported"
- name: Ensuring python3-pip and support packages are present
become: true
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
apt:
name:
- python3-pip
- python3-libvirt
- python-libvirt
state: present
- name: Install sushy-tools
pip:
name: sushy-tools
executable: pip3
become: true
- name: install systemd sushy service unit
become: true
template:
src: sushy-tools.service.j2
dest: /etc/systemd/system/sushy-tools.service
notify:
- reload systemd configuration
- restart sushy-emulator
- name: start sushy-emulator service
become: true
service:
name: sushy-tools
state: started
enabled: true

View File

@ -0,0 +1 @@
- include_tasks: "{{ redfish_action }}.yml"

View File

@ -0,0 +1,15 @@
# This file is part of sushy-emulator (redfish).
#
[Unit]
Description=Sushy Libvirt emulator
After=syslog.target
[Service]
Type=simple
ExecStart=/usr/local/bin/sushy-emulator -i {{ redfish_emulator_bind_ip }} -p {{ redfish_emulator_bind_port }} --libvirt-uri "qemu:///system"
StandardOutput=syslog
StandardError=syslog
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,40 @@
- name: Include test variables.
include_vars:
file: vars.yml
- name: install libvirt
include_role:
name: libvirt-install
- name: create pool
include_role:
name: libvirt-pool
vars:
ansible_become: true
- name: Create defined volumes
include_role:
name: libvirt-volume
with_items: "{{ libvirt_volumes }}"
vars:
libvirt_volume: "{{ vol }}"
volume_action: "{{ vol.action }}"
ansible_become: true
loop_control:
loop_var: vol
- name: create libvirt domains
include_role:
name: libvirt-domain
- name: install sushy-tools
include_role:
name: redfish-emulator
- name: query redfish to make sure it has runnig domains
uri:
url: http://localhost:8000/redfish/v1/Systems?format=json
method: GET
return_content: yes
register: sushy_response
- name: debug redfish machines
debug:
var: sushy_response
- name: verify that virtual machine is present in sushy tools
assert:
that:
- sushy_response.json["Members@odata.count"] == 1

View File

@ -0,0 +1,44 @@
libvirt_pool:
path: /var/lib/libvirt/airship
name: airship
libvirt_volumes:
- name: volume-1
image: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
size: 10G
pool: "{{ libvirt_pool.name }}"
action: create
- name: volume-2
size: 10G
pool: "{{ libvirt_pool.name }}"
action: create
libvirt_domain:
state: running
name: 'vm1'
memory_mb: 2048
vcpus: 1
volumes:
- name: 'volume-1'
device: 'disk'
format: 'qcow2'
pool: 'airship'
interfaces:
- network: 'provision-network'
libvirt_network:
name: provision-network
spec:
forward:
mode: nat
nat:
port:
- start: 1024
end: 65535
bridge:
name: "prov-net-br"
stp: 'on'
delay: '0'
ip:
address: "172.22.0.1"
netmask: "255.255.255.0"

View File

@ -14,20 +14,19 @@
state: present
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
- name: find files to lint
find:
paths:
- "{{ src_dir| default('../..') }}/playbooks"
- "{{ src_dir| default('../..') }}/roles"
- "{{ zuul.project.src_dir }}/playbooks"
- "{{ zuul.project.src_dir }}/roles"
patterns:
- "*.yaml"
- "*.yml"
recurse: true
register: files_to_lint
# TODO (kkalynovskyi) develop suitable ansible-lint configuration
- name: run ansible-lint against found files
command: "ansible-lint {{ item.path }}"
with_items: "{{ files_to_lint.files }}"
changed_when: false

View File

@ -0,0 +1,16 @@
---
- hosts: primary
tasks:
- name: set default roles
set_fact:
test_subject_roles_default:
- libvirt-network
- libvirt-pool
- libvirt-volume
- libvirt-domain
- redfish-emulator
- name: run tests against defined roles
include_tasks: "../../roles/{{ role_name }}/tests/main.yml"
with_items: "{{ test_subject_roles | default(test_subject_roles_default) }}"
loop_control:
loop_var: role_name

View File

@ -12,5 +12,16 @@
- job:
name: ansible-lint-airship
run: tests/ansible/lint.yaml
nodeset: ubuntu-single-airship
run: tests/ansible/lint.yml
nodeset: ubuntu-single-airship
- job:
name: zuul-airship-roles-test-libvirt
run: tests/ansible/role-test-runner.yml
vars:
test_subject_roles:
- libvirt-network
- libvirt-pool
- libvirt-volume
- libvirt-domain
nodeset: ubuntu-single-airship

View File

@ -2,6 +2,8 @@
check:
jobs:
- ansible-lint-airship
- zuul-airship-roles-test-libvirt
gate:
jobs:
- ansible-lint-airship
- zuul-airship-roles-test-libvirt