Moving roles from zuul-airship-roles.

Changes is needed because we can not make dependencies on PRs from
other repositories such as zuul-airship-roles, this would allow more
robust development in the stage we currently are in. When there will
be less activity on gating roles will be moved back to separate repo.

Change-Id: I85c9bdd47b5aaba90df5458b20c90ff5c912c05f
This commit is contained in:
Kostiantyn Kalynovskyi 2020-02-12 22:08:27 +00:00
parent 1399c569f4
commit d7df2fb571
55 changed files with 1923 additions and 1 deletions

View File

@ -0,0 +1,98 @@
- name: set libvirt log dir
set_fact:
libvirt_log_dir: "{{ logs_dir }}/libvirt"
- name: ensure directory for libvirt logs exists
file:
state: directory
path: "{{ libvirt_log_dir }}"
- name: collect all libvirt logs and resources
ignore_errors: True
block:
- name: copy hypervisor logs to tmp directory
command: "cp -r /var/log/libvirt {{ libvirt_log_dir }}/libvirt-hypervisor"
become: true
- name: copy console logs to tmp directory
command: "cp -r /var/log/libvirt-consoles/ {{ libvirt_log_dir }}/libvirt-consoles"
become: true
- name: ensure libvirt resource dump directories exist
file:
state: directory
path: "{{ libvirt_log_dir }}/{{ xml_dir }}"
with_items:
- xmls/domains
- xmls/pools
- xmls/vols
- xmls/networks
loop_control:
loop_var: xml_dir
- name: dump libvirt domain xmls
shell: |-
for dom in $(virsh list --all --name); do
virsh dumpxml "${dom}" | tee "{{ libvirt_log_dir }}/xmls/domains/${dom}.xml"
done
args:
executable: /bin/bash
- name: dump list of defined domains
shell: |-
virsh list --all | tee "{{ libvirt_log_dir }}/xmls/domains/list"
args:
executable: /bin/bash
- name: dump list of defined pools
shell: |-
virsh pool-list --all | tee "{{ libvirt_log_dir }}/xmls/pools/list"
args:
executable: /bin/bash
- name: dump libvirt pools xmls
shell: |-
for pool in $(virsh pool-list --all --name); do
virsh dumpxml "${pool}" | tee "{{ libvirt_log_dir }}/xmls/pools/${pool}.xml"
done
args:
executable: /bin/bash
- name: dump list of defined volumes
shell: |-
for pool in $(virsh pool-list --all --name); do
virsh vol-list --all --pool "${pool}" | tee "{{ libvirt_log_dir }}/xmls/vol/pool-${pool}-list"
for vol in $(virsh vol-list --pool "${pool}" | awk 'NR>2 {print $1}'); do
virsh vol-dumpxml ${vol} --pool ${pool} | tee "{{ libvirt_log_dir }}/xmls/vols/pool-${pool}-${vol}.xml"
done
done
args:
executable: /bin/bash
- name: dump libvirt network xmls
shell: |-
for net in $(virsh net-list --all --name); do
virsh net-dumpxml "${net}" | tee "{{ libvirt_log_dir }}/xmls/networks/${net}.xml"
done
args:
executable: /bin/bash
- name: dump list of defined networks
shell: |-
virsh -d 0 net-list --all | tee "{{ libvirt_log_dir }}/xmls/networks/list"
args:
executable: /bin/bash
- name: Change ownership of the logs dir
file:
state: directory
recurse: true
owner: "{{ ansible_user }}"
become: true
- name: Downloads logs to executor
synchronize:
src: "{{ libvirt_log_dir }}"
dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
mode: pull

View File

@ -0,0 +1,23 @@
- name: set redfish log dir
set_fact:
redfish_log_dir: "{{ logs_dir }}/redfish"
- name: ensure directory for redfish logs exists
file:
state: directory
path: "{{ redfish_log_dir }}"
- name: dump sushy-tool logs to directory
shell: |-
journalctl --unit sushy-tools.service > "{{ redfish_log_dir }}/sushy-tools.log"
args:
executable: /bin/bash
ignore_errors: True
become: true
- name: "Downloads logs to executor"
synchronize:
src: "{{ redfish_log_dir }}"
dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
mode: pull
ignore_errors: True

View File

@ -0,0 +1,82 @@
airship_gate_names:
provision_network: air_prov
provision_network_bridge: "prov_br"
nat_network: "air_nat"
nat_network_bridge: "nat_br"
ephemeral_vm: air-ephemeral
target_vm_prefix: "air-target"
target_separator: "-"
target_volume_prefix: "vol_target"
ephemeral_volume: "vol_ephemeral"
pool: airship
airship_gate_ipam:
nat_network:
bridge_ip: "10.23.25.1"
dhcp_start: "10.23.25.100"
dhcp_end: "10.23.25.199"
provision_network:
bridge_ip: "10.23.24.1"
airship_gate_redfish:
port: 8000
bind_address: "127.0.0.1"
airship_gate_flavors:
small:
target_vm_memory_mb: 1024
target_vm_vcpus: 1
ephemeral_vm_memory_mb: 1024
ephemeral_vm_vcpus: 1
ephemeral_disk_size: 20G
target_disk_size: 10G
disk_format: qcow2
target_vms_count: 3
airship_gate_libvirt_pools:
- path: /var/lib/libvirt/airship
name: "{{ airship_gate_names.pool }}"
airship_gate_libvirt_domain:
state: running
name: 'vm1'
memory_mb: 2048
vcpus: 1
volumes:
- name: 'volume-1'
device: 'disk'
format: 'qcow2'
pool: 'airship'
interfaces:
- network: "{{ airship_gate_names.nat_network }}"
- network: "{{ airship_gate_names.provision_network }}"
airship_gate_libvirt_networks:
- network_action: create
autostart: false
name: "{{ airship_gate_names.nat_network }}"
spec:
forward:
mode: nat
nat:
port:
- start: 1024
end: 65535
bridge:
name: "{{ airship_gate_names.nat_network_bridge }}"
stp: 'on'
delay: '0'
ip:
address: "{{ airship_gate_ipam.nat_network.bridge_ip }}"
netmask: "255.255.255.0"
- network_action: create
autostart: false
name: "{{ airship_gate_names.provision_network }}"
spec:
bridge:
name: "{{ airship_gate_names.provision_network_bridge }}"
stp: 'on'
delay: '0'
ip:
address: "{{ airship_gate_ipam.provision_network.bridge_ip }}"
netmask: "255.255.255.0"

View File

@ -0,0 +1,99 @@
- name: verify that gate flavor is defined
assert:
that:
- gate_flavor is defined
- name: set flavor variables.
set_fact:
chosen_flavor: "{{ airship_gate_flavors[gate_flavor] }}"
- name: install libvirt
include_role:
name: libvirt-install
- name: create pool
include_role:
name: libvirt-pool
vars:
libvirt_pool: "{{ item }}"
with_items: "{{ airship_gate_libvirt_pools }}"
- name: create networks
include_role:
name: libvirt-network
with_items: "{{ airship_gate_libvirt_networks }}"
vars:
libvirt_network: "{{ item }}"
network_action: create
- name: Create ephemeral volume
include_role:
name: libvirt-volume
vars:
libvirt_volume:
name: "{{ airship_gate_names.ephemeral_volume }}"
size: "{{ chosen_flavor.ephemeral_disk_size }}"
pool: "{{ airship_gate_names.pool }}"
volume_action: create
- name: Create target volumes
include_role:
name: libvirt-volume
vars:
libvirt_volume:
name: "{{ airship_gate_names.target_volume_prefix }}-{{ vm_index }}"
size: "{{ chosen_flavor.target_disk_size }}"
pool: "{{ airship_gate_names.pool }}"
format: "{{ chosen_flavor.disk_format }}"
volume_action: create
loop_control:
loop_var: vm_index
with_sequence: "start=1 end={{ chosen_flavor.target_vms_count }}"
- name: Create target domains
include_role:
name: libvirt-domain
vars:
libvirt_domain:
state: shutdown
name: "{{ airship_gate_names.target_vm_prefix }}-{{ vm_index }}"
memory_mb: "{{ chosen_flavor.target_vm_memory_mb }}"
vcpus: "{{ chosen_flavor.target_vm_vcpus }}"
volumes:
- name: "{{ airship_gate_names.target_volume_prefix }}-{{ vm_index }}"
device: "disk"
format: "{{ chosen_flavor.disk_format }}"
pool: "{{ airship_gate_names.pool }}"
interfaces:
- network: "{{ airship_gate_names.nat_network }}"
- network: "{{ airship_gate_names.provision_network }}"
loop_control:
loop_var: vm_index
with_sequence: "start=1 end={{ chosen_flavor.target_vms_count }}"
- name: Create ephemeral domain
include_role:
name: libvirt-domain
vars:
libvirt_domain:
enable_vnc: true
console_log_enabled: true
state: shutdown
name: "{{ airship_gate_names.ephemeral_vm }}"
memory_mb: "{{ chosen_flavor.ephemeral_vm_memory_mb }}"
vcpus: "{{ chosen_flavor.ephemeral_vm_vcpus }}"
volumes:
- name: "{{ airship_gate_names.ephemeral_volume }}"
device: "disk"
format: "{{ chosen_flavor.disk_format }}"
pool: "{{ airship_gate_names.pool }}"
interfaces:
- network: "{{ airship_gate_names.nat_network }}"
- network: "{{ airship_gate_names.provision_network }}"
- name: install and start redfish emulator
include_role:
name: redfish-emulator
vars:
redfish_action: "install"
redfish_emulator_bind_ip: "{{ airship_gate_redfish.bind_address }}"
redfish_emulator_bind_port: "{{ airship_gate_redfish.port }}"

View File

@ -0,0 +1 @@
- include_tasks: "{{ gate_action }}.yml"

View File

@ -0,0 +1,41 @@
- name: Include test variables.
include_vars:
file: vars.yml
- name: deploy-gate
include_role:
name: airship-libvirt-gate
vars:
gate_flavor: small
gate_action: build-infra
- name: query redfish to make sure it has runnig domains
uri:
url: http://{{ airship_gate_redfish.bind_address }}:{{ airship_gate_redfish.port }}/redfish/v1/Systems?format=json
method: GET
return_content: yes
register: redfish_response
- name: debug redfish machines
debug:
var: redfish_response
- name: save ids to list
uri:
url: "http://{{ airship_gate_redfish.bind_address }}:{{ airship_gate_redfish.port }}{{ item.value }}?format=json"
method: GET
return_content: yes
with_dict: "{{ redfish_response.json.Members }}"
register: systems_details
- name: deploy ephemeral host
set_fact:
ephemeral_domain_id: "{{ systems_details | json_query(query_string) | join('') }}"
vars:
query_string: "results[?json.Name=='{{ airship_gate_names.ephemeral_vm }}'].json.Id"
- name: verify that id is not empty
assert:
that:
- ephemeral_domain_id is defined
- (ephemeral_domain_id | length) > 1

View File

@ -0,0 +1,15 @@
airship_gate_redfish:
port: 8000
bind_address: "127.0.0.1"
airship_gate_names:
provision_network: air_prov
provision_network_bridge: "prov_br"
nat_network: "air_nat"
nat_network_bridge: "nat_br"
ephemeral_vm: air-ephemeral
target_vm_prefix: "air-target"
target_separator: "-"
target_volume_prefix: "vol_target"
ephemeral_volume: "vol_ephemeral"
pool: airship

View File

@ -0,0 +1,3 @@
http_fileserver_serve_dir: /srv/http-server
http_fileserver_serve_port: 8099
http_server_action: install

View File

@ -0,0 +1,11 @@
- name: reload systemd configuration
become: yes
systemd:
daemon_reload: yes
- name: http file server restarted
service:
name: simple-http-{{ http_fileserver_name | default('default') }}.service
state: restarted
enabled: true
become: true

View File

@ -0,0 +1,33 @@
- name: install simplehttpserver package
apt:
name:
- python3
state: present
become: true
- name: set http server systemd unit content
set_fact:
http_fileserver_unit_content: |
[Unit]
Description=Simple http server
After=syslog.target
[Service]
Type=simple
ExecStart=/usr/bin/python3 -m http.server {{ http_fileserver_serve_port }}
StandardOutput=syslog
StandardError=syslog
WorkingDirectory={{ http_fileserver_serve_dir }}
[Install]
WantedBy=multi-user.target
- name: Create systemd unit
copy:
content: "{{ http_fileserver_unit_content }}"
dest: /etc/systemd/system/simple-http-{{ http_fileserver_name | default('default') }}.service
notify:
- reload systemd configuration
- http file server restarted
become: true

View File

@ -0,0 +1 @@
- include_tasks: "{{ http_server_action }}.yml"

View File

@ -0,0 +1,6 @@
- name: ensure http service is stopped
service:
name: simple-http-{{ http_fileserver_name | default('default') }}.service
state: stopped
enabled: false
become: true

View File

@ -0,0 +1,30 @@
- name: Include test variables.
include_vars:
file: vars.yml
- name: ensure serve directory exists
file:
path: "{{ http_fileserver_serve_dir }}"
state: directory
mode: "0755"
become: true
- name: install http-fileserver
include_role:
name: http-fileserver
- name: trigger all saved handlers
meta: flush_handlers
- name: copy test file to serve directory
become: true
copy:
content: "{{ http_file_server_test_file_content }}"
dest: "{{ http_fileserver_serve_dir }}/{{ http_fileserver_test_file_name }}"
register: file_copy_command
- name: download test file
get_url:
url: "http://localhost:{{ http_fileserver_serve_port }}/{{ http_fileserver_test_file_name }}"
dest: "/tmp/{{ http_fileserver_test_file_name }}"
checksum: "sha1:{{ file_copy_command.checksum }}"

View File

@ -0,0 +1,4 @@
http_fileserver_serve_dir: /srv/test-dir
http_fileserver_serve_port: 8095
http_file_server_test_file_content: "something to test"
http_fileserver_test_file_name: "test-file"

View File

@ -0,0 +1,173 @@
---
# The default directory in which to store VM console logs, if a VM-specific log
# file path is not given.
libvirt_vm_default_console_log_dir: "/var/log/libvirt-consoles"
# The default location for libvirt images
libvirt_volume_default_images_path: '/var/lib/libvirt/images'
# Default type for Libvirt volumes
libvirt_volume_default_type: volume
# The default format for Libvirt volumes.
libvirt_volume_default_format: qcow2
# The default device for Libvirt volumes.
libvirt_volume_default_device: disk
# CPU architecture.
libvirt_vm_arch: x86_64
# Virtualisation engine. If not set, the role will attempt to auto-detect the
# optimal engine to use.
libvirt_vm_engine:
# Path to emulator binary. If not set, the role will attempt to auto-detect the
# correct emulator to use.
libvirt_vm_emulator:
# Default value for clock syncing. The default (false) uses <clock sync="localtime">
# to configure the instances clock synchronisation. Change to a timezone to make
# configuration use <clock offset="specified offset">
libvirt_vm_clock_offset: False
# A list of specifications of VMs to be created.
# For backwards compatibility, libvirt_vms defaults to a singleton list using
# the values of the deprecated variables below.
# See README.md or tasks/main.yml for these attributes' defaults.
libvirt_domain:
# State of the VM. May be 'present' or 'absent'.
state: "{{ libvirt_vm_state }}"
# Name of the VM.
name: "{{ libvirt_vm_name }}"
# Memory in MB.
memory_mb: "{{ libvirt_vm_memory_mb }}"
# Number of vCPUs.
vcpus: "{{ libvirt_vm_vcpus }}"
# Virtual machine type.
machine: "{{ libvirt_vm_machine }}"
# Virtual machine CPU mode.
cpu_mode: "{{ libvirt_vm_cpu_mode | default(libvirt_cpu_mode_default, true) }}"
# List of volumes.
volumes: "{{ libvirt_vm_volumes }}"
# What time should the clock be synced to on boot (utc/localtime/timezone/variable)
clock_offset: "localtime"
# List of network interfaces.
interfaces: "{{ libvirt_vm_interfaces }}"
# Path to console log file.
console_log_path: "{{ libvirt_vm_console_log_path }}"
# XML template file to source domain definition
xml_file: vm.xml.j2
# Variables to add to the enviroment that is used to execute virsh commands
libvirt_vm_virsh_default_env: "{{ { 'LIBVIRT_DEFAULT_URI': libvirt_vm_uri } if libvirt_vm_uri else {} }}"
# Override for the libvirt connection uri. Leave unset to use the default.
libvirt_vm_uri: ""
# Default CPU mode if libvirt_vm_cpu_mode or vm.cpu_mode is undefined
libvirt_cpu_mode_default: "{{ 'host-passthrough' if libvirt_vm_engine == 'kvm' else 'host-model' }}"
libvirt_domain_template_default: |
<domain type='{{ libvirt_vm_engine }}'>
<name>{{ libvirt_domain.name }}</name>
<memory>{{ libvirt_domain.memory_mb | int * 1024 }}</memory>
<vcpu>{{ libvirt_domain.vcpus }}</vcpu>
{% if libvirt_domain.clock_offset |default( libvirt_vm_clock_offset ) %}
<clock offset="{{ libvirt_domain.clock_offset }}"/>
{% else %}
<clock sync="localtime"/>
{% endif %}
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<os>
<type arch='{{ libvirt_vm_arch }}'{% if machine is not none %} machine='{{ machine }}'{% endif %}>hvm</type>
<bootmenu enable='no'/>
<boot dev='hd'/>
<boot dev='cdrom'/>
<boot dev='network'/>
<bios useserial='yes'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
{% if cpu_mode %}
<cpu mode='{{ cpu_mode }}'>
<model fallback='allow'/>
</cpu>
{% endif %}
<devices>
<emulator>{{ libvirt_vm_emulator }}</emulator>
{% for volume in volumes %}
<disk type='{{ volume.type | default(libvirt_volume_default_type) }}' device='{{ volume.device | default(libvirt_volume_default_device) }}'>
<driver name='qemu' type='{{ volume.format | default(libvirt_volume_default_format) }}'/>
{% if volume.type | default(libvirt_volume_default_type) == 'file' %}
<source file='{{ volume.file_path |default(libvirt_volume_default_images_path) }}/{{ volume.name}}'/>
{% else %}
<source pool='{{ volume.pool }}' volume='{{ volume.name }}'/>
{% endif %}
{% if volume.target is undefined %}
<target dev='vd{{ 'abcdefghijklmnopqrstuvwxyz'[loop.index - 1] }}'/>
{% else %}
<target dev='{{ volume.target }}' />
{% endif %}
</disk>
{% endfor %}
{% for interface in interfaces %}
{% if interface.type is defined and interface.type == 'direct' %}
<interface type='direct'>
<source dev='{{ interface.source.dev }}' mode='{{ interface.source.mode | default('vepa') }}'/>
{% elif interface.type is defined and interface.type == 'bridge' %}
<interface type='bridge'>
<source bridge='{{ interface.source.dev }}'/>
{% elif interface.type is not defined or interface.type == 'network' %}
<interface type='network'>
<source network='{{ interface.network }}'/>
{% endif %}
{% if interface.mac is defined %}
<mac address='{{ interface.mac }}'/>
{% endif %}
{# if the network configuration is invalid this can still appear in the xml #}
{# (say you enter 'bond' instead of 'bridge' in your variables) #}
<model type='virtio'/>
</interface>
{% endfor %}
{% if console_log_enabled | bool %}
<serial type='file'>
<source path='{{ console_log_path }}'/>
</serial>
<serial type='pty'/>
<console type='file'>
<source path='{{ console_log_path }}'/>
<target type='serial'/>
</console>
{% else %}
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
{% endif %}
{% if enable_vnc |bool %}
<graphics type='vnc' autoport='yes' listen='0.0.0.0'>
<listen type='address' address='0.0.0.0'/>
</graphics>
{% endif %}
<rng model="virtio"><backend model="random">/dev/urandom</backend></rng>
</devices>
</domain>

View File

@ -0,0 +1,65 @@
---
- name: Detect the virtualisation engine
block:
- name: Load the kvm kernel module
modprobe:
name: kvm
become: true
failed_when: false
- name: Check for the KVM device
stat:
path: /dev/kvm
register: stat_kvm
- name: Set a fact containing the virtualisation engine
set_fact:
libvirt_vm_engine: >-
{%- if ansible_architecture != libvirt_vm_arch -%}
{# Virtualisation instructions are generally available only for the host
architecture. Ideally we would test for virtualisation instructions, eg. vt-d
as it is possible that another architecture could support these even
if the emulated cpu architecture is not the same. #}
qemu
{%- elif stat_kvm.stat.exists -%}
kvm
{%- else -%}
qemu
{%- endif -%}
when: libvirt_vm_engine is none or libvirt_vm_engine | length == 0
- name: Detect the virtualisation emulator
block:
- block:
- name: Detect the KVM emulator binary path
stat:
path: "{{ item }}"
register: kvm_emulator_result
with_items:
- /usr/bin/kvm
- /usr/bin/qemu-kvm
- /usr/libexec/qemu-kvm
- name: Set a fact containing the KVM emulator binary path
set_fact:
libvirt_vm_emulator: "{{ item.item }}"
with_items: "{{ kvm_emulator_result.results }}"
when: item.stat.exists
when: libvirt_vm_engine == 'kvm'
- block:
- name: Detect the QEMU emulator binary path
shell: which qemu-system-{{ libvirt_vm_arch }}
register: qemu_emulator_result
changed_when: false
- name: Set a fact containing the QEMU emulator binary path
set_fact:
libvirt_vm_emulator: "{{ qemu_emulator_result.stdout }}"
when: libvirt_vm_engine == 'qemu'
- name: Fail if unable to detect the emulator
fail:
msg: Unable to detect emulator for engine {{ libvirt_vm_engine }}.
when: libvirt_vm_emulator is none
when: libvirt_vm_emulator is none or libvirt_vm_emulator | length == 0

View File

@ -0,0 +1,21 @@
---
- name: Check network interface has a network name
fail:
msg: >
The interface definition {{ interface }} has type 'network', but does not have
a network name defined.
when:
- interface.type is not defined or
interface.type == 'network'
- interface.network is not defined
- name: Check direct interface has an interface device name
fail:
msg: >
The interface definition {{ interface }} has type 'direct', but does not have
a host source device defined.
when:
- interface.type is defined
- interface.type == 'direct'
- interface.source is not defined or
interface.source.dev is not defined

View File

@ -0,0 +1,27 @@
---
- name: Ensure the VM console log directory exists
file:
path: "{{ console_log_path | dirname}}"
state: directory
recurse: true
mode: 0770
become: true
when: "libvirt_domain.console_log_enabled | default(false)"
- name: Validate VM interfaces
include_tasks: check-interface.yml
vars:
interface: "{{ item }}"
with_items: "{{ libvirt_domain.interfaces }}"
- name: Ensure the VM is defined
virt:
name: "{{ libvirt_domain.name }}"
command: define
xml: "{{ libvirt_domain.xml | default(libvirt_domain_template_default) }}"
- name: Ensure the VM is started at boot
virt:
name: "{{ libvirt_domain.name }}"
autostart: "{{ libvirt_domain.autostart | default(false) }}"
state: "{{ libvirt_domain.state | default('running') }}"

View File

@ -0,0 +1,16 @@
- include_tasks: autodetect.yml
- include_tasks: domain.yml
vars:
console_log_enabled: "{{ libvirt_domain.console_log_enabled | default(false) }}"
console_log_path: >-
{{ libvirt_domain.console_log_path |
default(libvirt_vm_default_console_log_dir + '/' + libvirt_domain.name + '-console.log', true) }}
machine_default: "{{ none if libvirt_vm_engine == 'kvm' else 'pc-1.0' }}"
machine: "{{ libvirt_domain.machine | default(machine_default, true) }}"
cpu_mode: "{{ libvirt_domain.cpu_mode | default(libvirt_cpu_mode_default) }}"
volumes: "{{ libvirt_domain.volumes | default([], true) }}"
interfaces: "{{ libvirt_domain.interfaces | default([], true) }}"
start: "{{ libvirt_domain.start | default(true) }}"
autostart: "{{ libvirt_domain.autostart | default(true) }}"
enable_vnc: "{{ libvirt_domain.enable_vnc | default(false) }}"

View File

@ -0,0 +1,91 @@
<domain type='{{ libvirt_vm_engine }}'>
<name>{{ libvirt_domain.name }}</name>
<memory>{{ libvirt_domain.memory_mb | int * 1024 }}</memory>
<vcpu>{{ libvirt_domain.vcpus }}</vcpu>
{% if libvirt_domain.clock_offset |default( libvirt_vm_clock_offset ) %}
<clock offset="{{ libvirt_domain.clock_offset }}"/>
{% else %}
<clock sync="localtime"/>
{% endif %}
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<os>
<type arch='{{ libvirt_vm_arch }}'{% if machine is not none %} machine='{{ machine }}'{% endif %}>hvm</type>
<bootmenu enable='no'/>
<boot dev='hd'/>
<boot dev='cdrom'/>
<boot dev='network'/>
<bios useserial='yes'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
{% if cpu_mode %}
<cpu mode='{{ cpu_mode }}'>
<model fallback='allow'/>
</cpu>
{% endif %}
<devices>
<emulator>{{ libvirt_vm_emulator }}</emulator>
{% for volume in volumes %}
<disk type='{{ volume.type | default(libvirt_volume_default_type) }}' device='{{ volume.device | default(libvirt_volume_default_device) }}'>
<driver name='qemu' type='{{ volume.format | default(libvirt_volume_default_format) }}'/>
{% if volume.type | default(libvirt_volume_default_type) == 'file' %}
<source file='{{ volume.file_path |default(libvirt_volume_default_images_path) }}/{{ volume.name}}'/>
{% else %}
<source pool='{{ volume.pool }}' volume='{{ volume.name }}'/>
{% endif %}
{% if volume.target is undefined %}
<target dev='vd{{ 'abcdefghijklmnopqrstuvwxyz'[loop.index - 1] }}'/>
{% else %}
<target dev='{{ volume.target }}' />
{% endif %}
</disk>
{% endfor %}
{% for interface in interfaces %}
{% if interface.type is defined and interface.type == 'direct' %}
<interface type='direct'>
<source dev='{{ interface.source.dev }}' mode='{{ interface.source.mode | default('vepa') }}'/>
{% elif interface.type is defined and interface.type == 'bridge' %}
<interface type='bridge'>
<source bridge='{{ interface.source.dev }}'/>
{% elif interface.type is not defined or interface.type == 'network' %}
<interface type='network'>
<source network='{{ interface.network }}'/>
{% endif %}
{% if interface.mac is defined %}
<mac address='{{ interface.mac }}'/>
{% endif %}
{# if the network configuration is invalid this can still appear in the xml #}
{# (say you enter 'bond' instead of 'bridge' in your variables) #}
<model type='virtio'/>
</interface>
{% endfor %}
{% if console_log_enabled | bool %}
<serial type='file'>
<source path='{{ console_log_path }}'/>
</serial>
<serial type='pty'/>
<console type='file'>
<source path='{{ console_log_path }}'/>
<target type='serial'/>
</console>
{% else %}
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
{% endif %}
{% if enable_vnc |bool %}
<graphics type='vnc' autoport='yes' listen='127.0.0.1'>
<listen type='address' address='127.0.0.1'/>
</graphics>
{% endif %}
<rng model="virtio"><backend model="random">/dev/urandom</backend></rng>
</devices>
</domain>

View File

@ -0,0 +1,40 @@
- name: Include test variables.
include_vars:
file: vars.yml
- name: install libvirt
include_role:
name: libvirt-install
- name: create networks
include_role:
name: libvirt-network
vars:
network_action: "{{ item.network_action }}"
libvirt_network: "{{ item }}"
with_items: "{{ libvirt_networks }}"
- name: create pool
include_role:
name: libvirt-pool
- name: Create defined volumes
include_role:
name: libvirt-volume
with_items: "{{ libvirt_volumes }}"
vars:
libvirt_volume: "{{ vol }}"
volume_action: "{{ vol.action }}"
loop_control:
loop_var: vol
- name: create libvirt domains
include_role:
name: libvirt-domain
- name: save information about domain
virt:
command: info
name: "{{ libvirt_domain.name }}"
register: domain_info
- name: debug domain-info
debug:
var: domain_info
- name: make sure that vm is in correct state
assert:
that:
- domain_info[libvirt_domain.name].state == libvirt_domain.state

View File

@ -0,0 +1,60 @@
libvirt_pool:
path: /var/lib/libvirt/airship
name: airship
libvirt_volumes:
- name: volume-1
image: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
size: 10G
pool: "{{ libvirt_pool.name }}"
action: create
- name: volume-2
size: 10G
pool: "{{ libvirt_pool.name }}"
action: create
libvirt_domain:
state: running
name: 'vm1'
memory_mb: 2048
vcpus: 1
volumes:
- name: 'volume-1'
device: 'disk'
format: 'qcow2'
pool: 'airship'
interfaces:
- network: 'provision-network'
libvirt_networks:
- network_action: create
autostart: false
name: oob-net
spec:
bridge:
name: oob-net
stp: 'on'
delay: '0'
ip:
address: "10.23.22.1"
netmask: "255.255.255.0"
dhcp:
- range:
start: 10.23.22.100
end: 10.23.22.199
- network_action: create
name: provision-network
spec:
forward:
mode: nat
nat:
port:
- start: 1024
end: 65535
bridge:
name: "prov-net-br"
stp: 'on'
delay: '0'
ip:
address: "172.22.0.1"
netmask: "255.255.255.0"

View File

@ -0,0 +1,47 @@
---
- block:
- name: Ensuring Libvirt, Qemu and support packages are present
become: true
when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'
yum:
name:
- libguestfs-tools
- libvirt
- libvirt-devel
- libvirt-daemon-kvm
- qemu-kvm
- virt-install
state: present
- name: Ensuring Libvirt, Qemu and support packages are present
become: true
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
apt:
name:
- qemu
- libvirt-bin
- libguestfs-tools
- qemu-kvm
- virtinst
- python-lxml
- python3-lxml
- python3-libvirt
- python-libvirt
- dnsmasq
- ebtables
state: present
- name: Add user "{{ ansible_user }}" to libvirt group
become: true
user:
name: "{{ ansible_user }}"
groups:
- libvirt
append: yes
- name: Reset ssh connection to allow user changes to affect "{{ ansible_user }}"
meta: reset_connection
- name: Start libvirtd
service:
name: libvirtd
state: started
enabled: true
become: true

View File

@ -0,0 +1,153 @@
# libvirt_network:
# name: provision-network
# spec:
# forward:
# mode: nat
# nat:
# port:
# - start: 1024
# end: 65535
# bridge:
# name: "prov-net-br"
# stp: 'on'
# delay: '0'
# ip:
# address: "172.22.0.1"
# netmask: "255.255.255.0"
# libvirt_network:
# name: "{{ bm_net_name }}"
# persistent: true
# autostart: true
# spec:
# forward:
# mode: nat
# nat:
# port:
# - start: 1024
# end: 65535
# bridge:
# name: "{{ bm_net_name }}"
# stp: 'on'
# delay: '0'
# domain:
# name: 'tests.baremetal.net'
# localOnly: 'yes'
# dns:
# - forwarder:
# domain: 'apps.tests.baremetal.net'
# addr: '127.0.0.1'
# - forwarder:
# domain: 'services.tests.baremetal.net'
# addr: '127.0.0.1'
# ip: "{{ bm_net_0_ip_cfg }}"
libvirt_network_template_default: |
<network>
<name>{{ net_yaml.name }}</name>
{% if net_yaml.forward is defined %}
{% if net_yaml.forward.mode is defined %}
<forward mode='{{ net_yaml.forward.mode }}'>
{% else %}
<forward>
{% endif %}
{% if net_yaml.forward.nat is defined %}
<nat>
{% if net_yaml.forward.nat.port is defined %}
{% for port in net_yaml.forward.nat.port %}
<port start='{{ port.start | string }}' end='{{ port.end | string }}'/>
{% endfor %}
{% endif %}
</nat>
{% endif %}
</forward>
{% endif %}
{% if net_yaml.bridge is defined %}
<bridge
{% if net_yaml.bridge.name is defined %}
name='{{ net_yaml.bridge.name }}'
{% endif %}
{% if net_yaml.bridge.stp is defined %}
stp='{{ net_yaml.bridge.stp | string }}'
{% endif %}
{% if net_yaml.bridge.delay is defined %}
delay='{{ net_yaml.bridge.delay | string }}'
{% endif %}
/>
{% endif %}
{% if net_yaml.mac is defined %}
<mac
{% if net_yaml.mac.address is defined %}
address='{{ net_yaml.mac.address }}'
{% endif %}
/>
{% endif %}
{% if net_yaml.domain is defined %}
<domain
{% if net_yaml.domain.name is defined %}
name='{{ net_yaml.domain.name }}'
{% endif %}
{% if net_yaml.domain.localOnly is defined %}
localOnly='{{ net_yaml.domain.localOnly | string }}'
{% endif %}
/>
{% endif %}
{% if net_yaml.dns is defined %}
<dns>
{% if net_yaml.dns | list %}
{% for dns_item in net_yaml.dns %}
{% if dns_item.forwarder is defined %}
<forwarder
{% if dns_item.forwarder.domain is defined %}
domain='{{ dns_item.forwarder.domain }}'
{% endif %}
{% if dns_item.forwarder.addr is defined %}
addr='{{ dns_item.forwarder.addr }}'
{% endif %}
/>
{% endif %}
{% endfor %}
{% endif %}
</dns>
{% endif %}
{% if net_yaml.ip is defined %}
<ip
{% if net_yaml.ip.address is defined %}
address='{{ net_yaml.ip.address }}'
{% endif %}
{% if net_yaml.ip.netmask is defined %}
netmask='{{ net_yaml.ip.netmask }}'
{% endif %}
>
{% if net_yaml.ip.dhcp is defined %}
<dhcp>
{% for dhcp_item in net_yaml.ip.dhcp %}
{% if dhcp_item.range is defined %}
<range
{% if dhcp_item.range.start is defined %}
start='{{ dhcp_item.range.start }}'
{% endif %}
{% if dhcp_item.range.end is defined %}
end='{{ dhcp_item.range.end }}'
{% endif %}
/>
{% endif %}
{% if dhcp_item.host is defined %}
<host
{% if dhcp_item.host.mac is defined %}
mac='{{ dhcp_item.host.mac }}'
{% endif %}
{% if dhcp_item.host.name is defined %}
name='{{ dhcp_item.host.name }}'
{% endif %}
{% if dhcp_item.host.ip is defined %}
ip='{{ dhcp_item.host.ip }}'
{% endif %}
/>
{% endif %}
{% endfor %}
</dhcp>
{% endif %}
</ip>
{% endif %}
</network>

View File

@ -0,0 +1,34 @@
# Description:
# Add given hosts to existing libvirt network
#
# Inputs:
# network_action: "add_dhcp_hosts"
# network_args:
# name: <name of network>
# hosts:
# - name:
# mac:
# ip:
# - name:
# mac:
# ip:
- name: Validate input
assert:
that:
- "network_args is defined"
- "network_args.name is defined"
- "network_args.hosts is defined"
- "network_args.hosts | list"
- name: add dhcp hosts to network
environment:
LIBVIRT_DEFAULT_URI: qemu:///system
shell: >-
virsh net-update {{ network_args.name }} \
add --section ip-dhcp-host \
--xml "<host mac='{{ single_dhcp_host.mac}}' ip='{{ single_dhcp_host.name }}'/>" \
--config --live
loop: "{{ network_args.hosts }}"
loop_control:
loop_var: single_dhcp_host

View File

@ -0,0 +1,73 @@
# Description:
# Creates a libvirt network. libvirt_network are
# exactly converted to XML from YAML so there
# is no validation whether the arguments are
# correct or not. Caller must ensure that yaml
# is formulated correctly.
#
# Inputs:
# network_action: "create"
# libvirt_network:
# name: <name of network>
# persistent: <boolean>
# autostart: <boolean>
# recreate: <boolean>
# spec:
# forward:
# mode:
# nat:
# port:
# - start:
# end:
# bridge:
# name:
# stp:
# delay:
# domain:
# name:
# localOnly:
# dns:
# forwarder:
# domain:
# addr:
# mac:
# address:
# ip:
# address:
# netmask:
# dhcp:
# - range:
# start:
# end:
- name: Validate input
assert:
that:
- "libvirt_network is defined"
- "libvirt_network.name is defined"
- "libvirt_network.spec is defined"
- name: Create yaml for template
set_fact:
net_yaml: >-
{{
libvirt_network.spec
| combine({'name': libvirt_network.name}, recursive=True)
}}
- name: "Define network"
virt_net:
command: define
# If libvirt_network.xml is defined, spec will be ignored.
xml: "{{ libvirt_network.xml | default(libvirt_network_template_default) }}"
name: "{{ libvirt_network.name }}"
- name: "Start network"
virt_net:
state: active
name: "{{ libvirt_network.name }}"
- name: "Autostart network"
virt_net:
name: "{{ libvirt_network.name }}"
autostart: "{{ libvirt_network.autostart |default(true) }}"

View File

@ -0,0 +1 @@
- include_tasks: "{{ network_action }}.yml"

View File

@ -0,0 +1,17 @@
- name: "Remove network"
virt_net:
state: absent
name: "{{ libvirt_network.name }}"
- name: Create yaml for template
set_fact:
net_yaml: >-
{{
libvirt_network.spec
| combine({'name': libvirt_network.name}, recursive=True)
}}
- name: "create network"
include_tasks: "{{ network_action }}.yml"
vars:
network_action: create

View File

@ -0,0 +1,110 @@
<network>
<name>{{ net_yaml.name }}</name>
{% if net_yaml.forward is defined %}
{% if net_yaml.forward.mode is defined %}
<forward mode='{{ net_yaml.forward.mode }}'>
{% else %}
<forward>
{% endif %}
{% if net_yaml.forward.nat is defined %}
<nat>
{% if net_yaml.forward.nat.port is defined %}
{% for port in net_yaml.forward.nat.port %}
<port start='{{ port.start | string }}' end='{{ port.end | string }}'/>
{% endfor %}
{% endif %}
</nat>
{% endif %}
</forward>
{% endif %}
{% if net_yaml.bridge is defined %}
<bridge
{% if net_yaml.bridge.name is defined %}
name='{{ net_yaml.bridge.name }}'
{% endif %}
{% if net_yaml.bridge.stp is defined %}
stp='{{ net_yaml.bridge.stp | string }}'
{% endif %}
{% if net_yaml.bridge.delay is defined %}
delay='{{ net_yaml.bridge.delay | string }}'
{% endif %}
/>
{% endif %}
{% if net_yaml.mac is defined %}
<mac
{% if net_yaml.mac.address is defined %}
address='{{ net_yaml.mac.address }}'
{% endif %}
/>
{% endif %}
{% if net_yaml.domain is defined %}
<domain
{% if net_yaml.domain.name is defined %}
name='{{ net_yaml.domain.name }}'
{% endif %}
{% if net_yaml.domain.localOnly is defined %}
localOnly='{{ net_yaml.domain.localOnly | string }}'
{% endif %}
/>
{% endif %}
{% if net_yaml.dns is defined %}
<dns>
{% if net_yaml.dns | list %}
{% for dns_item in net_yaml.dns %}
{% if dns_item.forwarder is defined %}
<forwarder
{% if dns_item.forwarder.domain is defined %}
domain='{{ dns_item.forwarder.domain }}'
{% endif %}
{% if dns_item.forwarder.addr is defined %}
addr='{{ dns_item.forwarder.addr }}'
{% endif %}
/>
{% endif %}
{% endfor %}
{% endif %}
</dns>
{% endif %}
{% if net_yaml.ip is defined %}
<ip
{% if net_yaml.ip.address is defined %}
address='{{ net_yaml.ip.address }}'
{% endif %}
{% if net_yaml.ip.netmask is defined %}
netmask='{{ net_yaml.ip.netmask }}'
{% endif %}
>
{% if net_yaml.ip.dhcp is defined %}
<dhcp>
{% for dhcp_item in net_yaml.ip.dhcp %}
{% if dhcp_item.range is defined %}
<range
{% if dhcp_item.range.start is defined %}
start='{{ dhcp_item.range.start }}'
{% endif %}
{% if dhcp_item.range.end is defined %}
end='{{ dhcp_item.range.end }}'
{% endif %}
/>
{% endif %}
{% if dhcp_item.host is defined %}
<host
{% if dhcp_item.host.mac is defined %}
mac='{{ dhcp_item.host.mac }}'
{% endif %}
{% if dhcp_item.host.name is defined %}
name='{{ dhcp_item.host.name }}'
{% endif %}
{% if dhcp_item.host.ip is defined %}
ip='{{ dhcp_item.host.ip }}'
{% endif %}
/>
{% endif %}
{% endfor %}
</dhcp>
{% endif %}
</ip>
{% endif %}
</network>

View File

@ -0,0 +1,137 @@
- name: Include test variables.
include_vars:
file: vars.yml
- name: install libvirt
include_role:
name: libvirt-install
- name: create networks
include_role:
name: libvirt-network
with_items: "{{ libvirt_networks }}"
loop_control:
loop_var: libvirt_network
vars:
network_action: "{{ libvirt_network.network_action }}"
- name: install required packages
apt:
name:
- bridge-utils
state: present
become: true
- name: gather network info
virt_net:
command: info
register: libvirt_networks_info
- name: debug network list
debug:
var: libvirt_networks_info
- name: check if network is present
assert:
that:
- "'oob-net' in libvirt_networks_info.networks"
- "'provision-network' in libvirt_networks_info.networks"
## this is needed because dashes '-', are not proccessed in expected way to ansible
- name: Assign networks to separate variables
set_fact:
oob_net: "{{ libvirt_networks_info.networks['oob-net'] }}"
provision_network: "{{ libvirt_networks_info.networks['provision-network'] }}"
- name: Verify oob network is in correct state
assert:
that:
- "oob_net.autostart == 'no'"
- "oob_net.bridge == 'oob-net'"
- "oob_net.state == 'active'"
- name: register ip address of the oob-net interface
command: ip -4 a show dev oob-net
register: oob_net_device
changed_when: false
- name: debug oob-net interface
debug:
var: oob_net_device.stdout
- name: verify oob-net bridge has correct address
assert:
that: "'10.23.22.1/24' in oob_net_device.stdout"
- name: Verify provision-network is in correct state
assert:
that:
- "provision_network.autostart == 'yes'"
- "provision_network.bridge == 'prov-net-br'"
- "provision_network.state == 'active'"
- "provision_network.forward_mode == 'nat'"
- name: register ip address of the oob-net interface
command: ip -4 a show dev prov-net-br
register: prov_net_br_device
changed_when: false
- name: debug prov-net-br interface
debug:
var: prov_net_br_device.stdout
- name: verify provision-network bridge has correct address
assert:
that: "'172.22.0.1/24' in prov_net_br_device.stdout"
- name: Create virtual ethernet interface
command: ip link add name air02 type veth peer name air01
become: true
changed_when:
- "create_veth_command.rc != 2"
- "'RTNETLINK answers: File exists' not in (create_veth_command.stderr | default(''))"
register: create_veth_command
failed_when:
- "create_veth_command.rc != 0"
- "'RTNETLINK answers: File exists' not in (create_veth_command.stderr | default(''))"
- name: set interface up
become: true
command: ip link set up dev air02
# This makes task never report to be changed, it is a workaround
# because if device is already up there is no command output or different RC
changed_when: false
- name: set interface up
become: true
command: ip link set up dev air01
# This makes task never report to be changed, it is a workaround
# because if device is already up there is no command output or different RC
changed_when: false
- name: set interface already in bridge variable
set_fact:
already_in_bridge: device air02 is already a member of a bridge; can't enslave it to bridge oob-net.
- name: Add interface to libvirt managed linux bridge with dhcp
become: true
command: brctl addif oob-net air02
changed_when:
- add_if_command.rc != 1
- already_in_bridge not in (add_if_command.stderr | default(''))
failed_when:
- add_if_command.rc != 0
- already_in_bridge not in add_if_command.stderr | default('')
register: add_if_command
- name: send dhcp request over the interface
become: true
command: timeout 20s dhclient air01
changed_when: false
- name: register ip address of the air01 interface
command: ip -4 a show dev air01
register: air01_device
changed_when: false
## this simple test checks if ip address is present in interface description
## TODO filter out the address, derive subnet and compare to expected subnet
- name: verify air02 interface has address in correct network
assert:
that:
- "'10.23.22.' in air01_device.stdout"

View File

@ -0,0 +1,32 @@
libvirt_networks:
- network_action: create
autostart: false
name: oob-net
spec:
bridge:
name: oob-net
stp: 'on'
delay: '0'
ip:
address: "10.23.22.1"
netmask: "255.255.255.0"
dhcp:
- range:
start: 10.23.22.100
end: 10.23.22.199
- network_action: create
name: provision-network
spec:
forward:
mode: nat
nat:
port:
- start: 1024
end: 65535
bridge:
name: "prov-net-br"
stp: 'on'
delay: '0'
ip:
address: "172.22.0.1"
netmask: "255.255.255.0"

View File

@ -0,0 +1,14 @@
libvirt_pool:
name: airship
path: "/var/lib/airship"
pool_action: create
libvirt_pool_template_default: |
<pool type="dir">
<name>{{ libvirt_pool.name }}</name>
{% if 'capacity' in libvirt_pool %}
<capacity>{{ libvirt_pool.capacity }}</capacity>
{% endif %}
<target>
<path>{{ libvirt_pool.path | default('placeholder_value') }}</path>
</target>
</pool>

View File

@ -0,0 +1,23 @@
---
- name: Ensure libvirt storage pools are defined
virt_pool:
name: "{{ libvirt_pool.name }}"
command: define
xml: "{{ libvirt_pool.xml | default(libvirt_pool_template_default) }}"
register: pool_info
- name: Ensure libvirt storage pools are built
virt_pool:
name: "{{ libvirt_pool.name }}"
command: build
when: pool_info.changed
- name: Ensure libvirt storage pools are active
virt_pool:
name: "{{ libvirt_pool.name }}"
state: active
- name: Ensure libvirt storage pools are started on boot
virt_pool:
name: "{{ libvirt_pool.name }}"
autostart: yes

View File

@ -0,0 +1 @@
- include_tasks: "{{ pool_action }}.yml"

View File

@ -0,0 +1,9 @@
<pool type="dir">
<name>{{ libvirt_pool.name }}</name>
{% if 'capacity' in libvirt_pool %}
<capacity>{{ libvirt_pool.capacity }}</capacity>
{% endif %}
<target>
<path>{{ libvirt_pool.path | default('placeholder_value') }}</path>
</target>
</pool>

View File

@ -0,0 +1,19 @@
- name: Include test variables.
include_vars:
file: vars.yml
- name: install libvirt
include_role:
name: libvirt-install
- name: create pool
include_role:
name: libvirt-pool
- name: get pool information
virt_pool:
command: info
register: storage_pools
- name: check if pool is available and is at given directory
assert:
that:
- "storage_pools.pools.test_pool.path == '/var/lib/libvirt/my-pool'"
- "storage_pools.pools.test_pool.status == 'running'"

View File

@ -0,0 +1,3 @@
libvirt_pool:
path: /var/lib/libvirt/my-pool
name: test_pool

View File

@ -0,0 +1,5 @@
libvirt_remote_scheme_list:
- http
- https
libvirt_image_cleanup_cache: false
libvirt_image_cache_path: /tmp/airship

View File

@ -0,0 +1,4 @@
- name: Clean up cache directory
file:
path: "{{ libvirt_image_cache_path }}"
state: absent

View File

@ -0,0 +1,75 @@
- name: Get Scheme
set_fact:
image_scheme: "{{ libvirt_volume.image | urlsplit('scheme') }}"
when: "libvirt_volume.image is defined"
- name: Get Scheme
set_fact:
image_dest: "{{ libvirt_image_cache_path }}/{{ libvirt_volume.image | basename }}"
when: "libvirt_volume.image is defined"
- name: Ensure cache directories exist
file:
path: "{{ libvirt_image_cache_path }}"
state: directory
- name: Ensure remote images are downloaded
get_url:
url: "{{ libvirt_volume.image }}"
dest: "{{ image_dest }}"
checksum: "{{ libvirt_volume.checksum | default(omit) }}"
when:
- libvirt_volume.image is defined
- image_scheme is defined
- image_scheme in libvirt_remote_scheme_list
- name: Ensure local images are copied
copy:
src: "{{ libvirt_volume.image }}"
dest: "{{ image_dest }}"
when:
- libvirt_volume.image is defined
- image_scheme not in libvirt_remote_scheme_list
- name: "Create volume"
environment:
LIBVIRT_DEFAULT_URI: qemu:///system
command: >-
virsh vol-create-as \
--pool "{{ libvirt_volume.pool }}" \
--name "{{ libvirt_volume.name }}" \
--capacity "{{ libvirt_volume.size }}" \
--format "{{ libvirt_volume.format | default('qcow2') }}"
register: libvirt_create_volume
failed_when:
- "libvirt_create_volume.rc != 0"
- "'exists already' not in libvirt_create_volume.stderr"
- "'exists already' not in libvirt_create_volume.stdout"
changed_when:
- "libvirt_create_volume.rc != 1"
- "'exists already' not in libvirt_create_volume.stderr"
- "'exists already' not in libvirt_create_volume.stdout"
- name: "Upload volume from downloaded image"
environment:
LIBVIRT_DEFAULT_URI: qemu:///system
command: >-
virsh vol-upload \
--pool "{{ libvirt_volume.pool }}" \
--vol "{{ libvirt_volume.name }}" \
--file "{{ image_dest }}"
when:
- "libvirt_volume.image is defined"
- "libvirt_create_volume.rc == 0"
- name: "Resize volume after uploading from image"
environment:
LIBVIRT_DEFAULT_URI: qemu:///system
command: >-
virsh vol-resize \
--vol "{{ libvirt_volume.name }}" \
--pool "{{ libvirt_volume.pool }}" \
--capacity "{{ libvirt_volume.size }}"
when:
- "libvirt_create_volume.rc == 0"
- "libvirt_volume.image is defined"

View File

@ -0,0 +1 @@
- include_tasks: "{{ volume_action }}.yml"

View File

@ -0,0 +1 @@
primary

View File

@ -0,0 +1,32 @@
- name: Include test variables.
include_vars:
file: vars.yml
- name: install libvirt
include_role:
name: libvirt-install
- name: create pool
include_role:
name: libvirt-pool
- name: Create defined volumes
include_role:
name: libvirt-volume
with_items: "{{ libvirt_volumes }}"
vars:
libvirt_volume: "{{ vol }}"
volume_action: "{{ vol.action }}"
loop_control:
loop_var: vol
- name: save volume list
environment:
LIBVIRT_DEFAULT_URI: qemu:///system
command: virsh vol-list --pool {{ libvirt_pool.name }}
register: libvirt_pool_list
changed_when: false
- name: verify volumes exist
assert:
that:
- "vol.name in libvirt_pool_list.stdout"
with_items: "{{ libvirt_volumes }}"
loop_control:
loop_var: vol

View File

@ -0,0 +1,14 @@
libvirt_pool:
path: /var/lib/libvirt/airship
name: airship
libvirt_volumes:
- name: volume-1
image: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
size: 10G
pool: "{{ libvirt_pool.name }}"
action: create
- name: volume-2
size: 10G
pool: "{{ libvirt_pool.name }}"
action: create

View File

@ -0,0 +1,3 @@
redfish_action: install
redfish_emulator_bind_ip: 127.0.0.1
redfish_emulator_bind_port: 8000

View File

@ -0,0 +1,11 @@
- name: reload systemd configuration
become: yes
systemd:
daemon_reload: yes
- name: restart sushy-emulator
become: yes
service:
name: sushy-tools
state: restarted
enabled: true

View File

@ -0,0 +1,37 @@
- block:
- name: Ensuring python3-pip and support packages are present
when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'
fail:
msg: "CentoOS or RHEL is not currently supported"
- name: Ensuring python3-pip and support packages are present
become: true
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
apt:
name:
- python3-pip
- python3-libvirt
- python-libvirt
state: present
- name: Install sushy-tools
pip:
name: sushy-tools
executable: pip3
become: true
- name: install systemd sushy service unit
become: true
template:
src: sushy-tools.service.j2
dest: /etc/systemd/system/sushy-tools.service
notify:
- reload systemd configuration
- restart sushy-emulator
- name: start sushy-emulator service
become: true
service:
name: sushy-tools
state: started
enabled: true

View File

@ -0,0 +1 @@
- include_tasks: "{{ redfish_action }}.yml"

View File

@ -0,0 +1,15 @@
# This file is part of sushy-emulator (redfish).
#
[Unit]
Description=Sushy Libvirt emulator
After=syslog.target
[Service]
Type=simple
ExecStart=/usr/local/bin/sushy-emulator -i {{ redfish_emulator_bind_ip }} -p {{ redfish_emulator_bind_port }} --libvirt-uri "qemu:///system"
StandardOutput=syslog
StandardError=syslog
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1 @@
primary

View File

@ -0,0 +1,37 @@
- name: Include test variables.
include_vars:
file: vars.yml
- name: install libvirt
include_role:
name: libvirt-install
- name: create pool
include_role:
name: libvirt-pool
- name: Create defined volumes
include_role:
name: libvirt-volume
with_items: "{{ libvirt_volumes }}"
vars:
libvirt_volume: "{{ vol }}"
volume_action: "{{ vol.action }}"
loop_control:
loop_var: vol
- name: create libvirt domains
include_role:
name: libvirt-domain
- name: install sushy-tools
include_role:
name: redfish-emulator
- name: query redfish to make sure it has runnig domains
uri:
url: http://localhost:8000/redfish/v1/Systems?format=json
method: GET
return_content: yes
register: sushy_response
- name: debug redfish machines
debug:
var: sushy_response
- name: verify that virtual machine is present in sushy tools
assert:
that:
- sushy_response.json["Members@odata.count"] == 1

View File

@ -0,0 +1,44 @@
libvirt_pool:
path: /var/lib/libvirt/airship
name: airship
libvirt_volumes:
- name: volume-1
image: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
size: 10G
pool: "{{ libvirt_pool.name }}"
action: create
- name: volume-2
size: 10G
pool: "{{ libvirt_pool.name }}"
action: create
libvirt_domain:
state: running
name: 'vm1'
memory_mb: 2048
vcpus: 1
volumes:
- name: 'volume-1'
device: 'disk'
format: 'qcow2'
pool: 'airship'
interfaces:
- network: 'provision-network'
libvirt_network:
name: provision-network
spec:
forward:
mode: nat
nat:
port:
- start: 1024
end: 65535
bridge:
name: "prov-net-br"
stp: 'on'
delay: '0'
ip:
address: "172.22.0.1"
netmask: "255.255.255.0"

View File

@ -0,0 +1 @@
remote_work_dir: "/tmp/airship-manifests"

View File

@ -0,0 +1,28 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The role is used to copy manifest directory to remote host
# Durning tests in zuul, zuul copies git repository to target node, however
# When running outside zuul/opendev ci env, we want to copy manifests from
# commit to remote machine as well. `local_src_dir` should be set from command
# line in anisble-playbook -e option,
# eg: ansible-playbook -e local_src_dir="/home/ubuntu/airshipctl"
- name: sync repository
synchronize:
delete: true
dest: "{{ remote_work_dir }}"
recursive: true
# trailing slash "/" is needed to copy contents of the directory not directory itself.
src: "{{ local_src_dir }}/"
no_log: true

View File

@ -39,7 +39,6 @@
attempts: 1
timeout: 3600
roles:
- zuul: airship/zuul-airship-roles
- zuul: openstack/openstack-helm-infra
pre-run:
- playbooks/airship-airshipctl-deploy-docker.yaml