tripleo-quickstart/roles/libvirt/setup/undercloud/tasks/main.yml

332 lines
12 KiB
YAML

# We're going to try putting files in `local_working_dir`, so make
# sure it exists first.
- name: Ensure local working dir exists
delegate_to: localhost
file:
path: "{{ local_working_dir }}"
state: directory
# Generate MAC addresses for the undercloud node.
- name: get MACs for the undercloud
generate_macs:
nodes:
- "{{ undercloud_node }}"
networks: "{{ networks }}"
register: undercloud_mac_map
# Check if the undercloud volume exists. If not, we call out to
# [fetch_image.yml](fetch_image.yml.html) to download the image.
- name: Check if undercloud volume exists
command: >
virsh vol-info --pool '{{ libvirt_volume_pool }}'
'{{ undercloud_node.name }}.qcow2'
ignore_errors: true
changed_when: false
register: undercloud_vol_check
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
- when: undercloud_vol_check|failed
block:
# Conditionally include a playbook for all the images specified
# in options that downloads, cache and extract if tar archived
# only if the images aren't already in volume pool
- name: Fetch the images
include_role:
name: fetch-images
# Conditionally include a playbook for all the images specified
# in options that updates images to the latest delorean version
- include: inject_repos.yml
when: devmode|bool
# inject the gating repo generated by ansible-role-tripleo-gate
- include: inject_gating_repo.yml
when: compressed_gating_repo is defined
# Converts an overcloud-full.qcow2 into a undercloud.qcow2
- include: convert_image.yml
when: overcloud_as_undercloud|bool
# Update images after we have converted the overcloud-full to an
# undercloud image when using devmode. This also clones tripleo-ci
# on the undercloud image.
- include: update_image.yml
when: devmode|bool
# Inject updated overcloud and ipa images into our converted undercloud
# image
- name: Inject additional images
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload {{ working_dir }}/{{ item }}:/home/stack/{{ item }}
--run-command 'chown stack:stack /home/stack/{{ item }}'
changed_when: true
environment:
LIBGUESTFS_BACKEND: direct
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
with_items: "{{ inject_images | default('') }}"
when: overcloud_as_undercloud|bool
# This copies the `instackenv.json` configuration file that we
# generated in the overcloud setup role to the undercloud host.
- name: Copy instackenv.json to appliance
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload {{ working_dir }}/instackenv.json:/home/stack/instackenv.json
--run-command 'chown stack:stack /home/stack/instackenv.json'
environment:
LIBGUESTFS_BACKEND: direct
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
# Copy the undercloud public key to the virthost, because we're going
# to inject it into the undercloud image in the next task.
- name: Copy undercloud ssh public key to working dir
copy:
src: "{{ undercloud_key }}.pub"
dest: "{{ working_dir }}/id_rsa_undercloud.pub"
# Copy the virt host private key to `$HOME/.ssh/id_rsa_virt_power` for
# VirtualBMC be able to access the hypervisor where the VMs are located
- name: Copy virt host ssh private key to working dir
when: release not in ['liberty', 'mitaka', 'newton']
copy:
src: "{{ virt_power_key }}"
dest: "{{ working_dir }}/id_rsa_virt_power"
# Copy the public key to `$HOME/.ssh/authorized_keys` for the `root`
# and `stack` user on the undercloud.
- name: Inject undercloud ssh public key to appliance
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--mkdir {{item.homedir}}/.ssh/
--upload '{{ working_dir }}/id_rsa_undercloud.pub:{{item.homedir}}/.ssh/authorized_keys'
--run-command 'chown -R {{item.owner}}:{{item.group}} {{item.homedir}}/.ssh'
--run-command 'chmod 0700 {{item.homedir}}/.ssh'
--run-command 'chmod 0600 {{item.homedir}}/.ssh/authorized_keys'
environment:
LIBGUESTFS_BACKEND: direct
with_items:
- homedir: /root
owner: root
group: root
- homedir: /home/stack
owner: stack
group: stack
# This copies the `id_rsa_virt_power` private key that we generated
# in the overcloud setup role to the undercloud host to be used by
# VirtualBMC+libvirt to access the virthost.
- name: Copy id_rsa_virt_power to appliance
when: release not in ['liberty', 'mitaka', 'newton']
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload '{{ working_dir }}/id_rsa_virt_power:/root/.ssh/id_rsa_virt_power'
--run-command 'chown root:root /root/.ssh/id_rsa_virt_power'
--run-command 'chmod 0600 /root/.ssh/id_rsa_virt_power'
environment:
LIBGUESTFS_BACKEND: direct
- name: Create undercloud customize script
template:
src: "{{ undercloud_customize_script }}"
dest: "{{ working_dir}}/undercloud-customize.sh"
mode: 0755
when: undercloud_customize_script is defined
# This allows to run a customization script on the
# undercloud image, to cover any extra needs.
- name: Perform extra undercloud customizations
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--run '{{ working_dir }}/undercloud-customize.sh'
when: undercloud_customize_script is defined
# This allows to run a customization script on the
# overcloud image, to cover any extra needs.
- name: Perform extra overcloud customizations
include: customize_overcloud.yml
when: overcloud_customize_script is defined
# Perform an SELinux relabel on the undercloud image to avoid problems
# caused by bad labelling, since by default the undercloud runs in
# enforcing mode.
- name: Perform selinux relabel on undercloud image
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--selinux-relabel
environment:
LIBGUESTFS_BACKEND: direct
# NOTE(trown) Nested blocks do not seem to work as expected so instead using
# conditionals with AND to simulate the same thing.
# Resize the undercloud image if it was not converted from an overcloud
# image
- when:
- undercloud_vol_check|failed
- not overcloud_as_undercloud|bool
block:
- name: >
Determine if the undercloud image is a whole disk image
so we can resize it appropriately
command: >
virt-filesystems -a {{ working_dir }}/undercloud.qcow2
environment:
LIBGUESTFS_BACKEND: direct
register: undercloud_partitions
- when:
- undercloud_vol_check|failed
- not overcloud_as_undercloud|bool
- undercloud_partitions.stdout=='/dev/sda1'
block:
# Handle the resize for the whole disk image case
- name: Resize undercloud image (create target image)
command: >
qemu-img create -f qcow2 -o preallocation=off
'{{ working_dir }}/undercloud-resized.qcow2'
'{{ flavors[undercloud_node.flavor].disk }}G'
- name: Resize undercloud image (call virt-resize)
command: >
virt-resize --expand /dev/sda1
'{{ working_dir }}/undercloud.qcow2'
'{{ working_dir }}/undercloud-resized.qcow2'
environment:
LIBGUESTFS_BACKEND: direct
- name: Rename resized image to original name
command: >
mv -f '{{ working_dir }}/undercloud-resized.qcow2'
'{{ working_dir }}/undercloud.qcow2'
- when:
- undercloud_vol_check|failed
- not overcloud_as_undercloud|bool
- undercloud_partitions.stdout=='/dev/sda'
block:
# Handle the resize for the partition image case
- name: Resize undercloud image (expand the image)
command: >
qemu-img resize
'{{ working_dir }}/undercloud.qcow2'
'{{ flavors[undercloud_node.flavor].disk }}G'
- name: Resize undercloud image (expand the FS)
command: >
virt-customize -a '{{ working_dir }}/undercloud.qcow2'
--run-command 'FS_TYPE=`findmnt -o FSTYPE -fn /`;
if [ "$FS_TYPE" = "xfs" ]; then xfs_growfs /;
elif [ "$FS_TYPE" = "ext4" ]; then resize2fs /dev/sda;
else echo "ERROR: Unknown filesystem $FSTYPE, cannot resize.";
exit 1; fi'
environment:
LIBGUESTFS_BACKEND: direct
# NOTE(trown) We use the overcloud-full initramfs and kernel as DIB
# seems a bit smarter about extracting them than virt-get-kernel and
# the partition image is simply a converted overcloud-full
- name: Extract the kernel and initramfs from the undercloud image
command: >
virt-copy-out -a '{{ working_dir }}/undercloud.qcow2'
/home/stack/overcloud-full.vmlinuz
/home/stack/overcloud-full.initrd
'{{ working_dir }}'
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
# NOTE(trown): This is a bit of a hack to get the undercloud vm
# template to use the external kernel and initrd. We should
# instead use a different var for this and set it in the devmode
# case as well.
- name: Set overcloud_as_undercloud to true
set_fact: overcloud_as_undercloud=true
- when: undercloud_vol_check|failed
block:
# Create a libvirt volume and upload the undercloud image to
# libvirt.
- name: Create undercloud volume
command: >
virsh vol-create-as {{ libvirt_volume_pool}}
{{ undercloud_node.name }}.qcow2
{{ flavors[undercloud_node.flavor].disk }}G --format qcow2
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
- name: Upload undercloud volume to storage pool
command: >
virsh -k 0 vol-upload --pool '{{ libvirt_volume_pool }}'
'{{ undercloud_node.name }}.qcow2'
'{{ working_dir }}/undercloud.qcow2'
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
async: 600
poll: 10
# Define (but do no start) the undercloud virtual machine.
- name: Define undercloud vm
virt:
name: "{{ undercloud_node.name }}"
command: define
xml: "{{ lookup('template', 'undercloudvm.xml.j2') }}"
uri: "{{ libvirt_uri }}"
# Start the undercloud virtual machine.
- name: Start undercloud vm
virt:
name: "{{ undercloud_node.name }}"
command: start
state: running
uri: "{{ libvirt_uri }}"
# Configure the undercloud virtual machine to be
# automatically started at boot.
- name: Configure undercloud vm to start at virthost boot
virt:
name: "{{ undercloud_node.name }}"
command: autostart
uri: "{{ libvirt_uri }}"
# Get the ip address of the undercloud. This will retry several times
# (`undercloud_ip_retries`) until the undercloud is ready. The script
# works by getting the MAC address of the first undercloud interface,
# and then looking that up in the kernel ARP table.
- name: Get undercloud vm ip address
script: "get-undercloud-ip.sh {{ undercloud_node.name }}"
register: undercloud_vm_ip_result
until: undercloud_vm_ip_result|success
retries: "{{ undercloud_ip_retries }}"
delay: 10
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
- name: Set_fact for undercloud ip
set_fact:
undercloud_ip: "{{ undercloud_vm_ip_result.stdout_lines[0] }}"
- name: Wait until ssh is available on undercloud node
wait_for:
host: "{{ undercloud_ip }}"
state: started
port: 22
timeout: 600
# Add the undercloud to the in-memory inventory.
- name: Add undercloud vm to inventory
add_host:
name: undercloud
groups: undercloud
ansible_host: undercloud
ansible_fqdn: undercloud
ansible_user: stack
ansible_private_key_file: "{{ undercloud_key }}"
ansible_ssh_extra_args: '-F "{{local_working_dir}}/ssh.config.ansible"'
undercloud_ip: "{{ undercloud_ip }}"
- name: Generate ssh configuration
delegate_to: localhost
template:
src: ssh.config.j2
dest: "{{ local_working_dir }}/ssh.config.ansible"