lots and lots of comments (1/2)

the first of a series of patches that adds comments to virtually all
of our roles.

Change-Id: Id3d7409521013af5b11c5f9df328630b1a3845f5
This commit is contained in:
Lars Kellogg-Stedman 2016-04-28 22:26:09 -04:00 committed by John Trowbridge
parent f85e5d456d
commit 1bc289f3e3
22 changed files with 211 additions and 20 deletions

View File

@ -1,9 +1,18 @@
# Deploy an HA openstack environment.
#
# This will require (6144 * 4) == approx. 24GB for the overcloud
# nodes, plus another 8GB for the undercloud, for a total of around
# 32GB.
control_memory: 6144
compute_memory: 6144
undercloud_memory: 8192
# Giving the undercloud additional CPUs can greatly improve heat's
# performance (and result in a shorter deploy time).
undercloud_vcpu: 2
# Create three controller nodes and one compute node.
overcloud_nodes:
- name: control_0
flavor: control
@ -15,6 +24,9 @@ overcloud_nodes:
- name: compute_0
flavor: compute
# We don't need introspection in a virtual environment (because we are
# creating all the "hardware" we really know the necessary
# information).
introspect: false
# Tell tripleo about our environment.

View File

@ -1,7 +1,8 @@
# We set introspection to true and use only the minimal amount of nodes
# for this job, but test all defaults otherwise
# for this job, but test all defaults otherwise.
step_introspect: true
# Define a single controller node and a single compute node.
overcloud_nodes:
- name: control_0
flavor: control

View File

@ -2,6 +2,7 @@
# for this job, but test all defaults otherwise
step_introspect: true
# Define a single controller node and a single compute node.
overcloud_nodes:
- name: control_0
flavor: control

View File

@ -1,14 +1,20 @@
# This playbooks is responsible for preparing a target host.
#
# We start by creating an inventory entry for the target host.
- name: Add virthost to inventory
hosts: localhost
roles:
- provision/local
# Next, we create a non-root user on the remote system.
- name: Create target user on virt host
hosts: virthost
roles:
- provision/teardown
- provision/remote
# Regenerate the inventory file for use in subsequent
# `ansible-playbook` runs.
- name: Rebuild inventory
hosts: localhost
roles:
@ -16,7 +22,7 @@
# We need to force-refresh facts because we are now connecting
# as a different user ('stack' instead of 'root'), which affects
# things like ansible_user_id and friends.
# things like ansible_user_id and other facts.
- name: Tear down environment
hosts: virthost
pre_tasks:

View File

@ -1,8 +1,14 @@
---
# This is the playbook used by the `quickstart.sh` script.
# The [provision.yml](provision.yml.html) playbook is responsible for
# creating an inventory entry for our `virthost` and for creating an
# unprivileged user on that host for use by our virtual environment.
- include: provision.yml
tags:
- provision
# The `environment/setup` role performs any tasks that require `root`
# access on the target host.
- name: Install libvirt packages and configure networks
hosts: virthost
tags:
@ -10,20 +16,25 @@
roles:
- environment/setup
- name: Setup undercloud and baremetal vms and networks in libvirt
# The `libvirt/setup` role creates the undercloud and overcloud
# virtual machines.
- name: Setup undercloud and overcloud vms
hosts: virthost
gather_facts: yes
roles:
- libvirt/teardown
- libvirt/setup
# This adds the undercloud node to the generated
# Add the undercloud node to the generated
# inventory.
- name: Rebuild inventory
hosts: localhost
roles:
- rebuild-inventory
# DEPLOY ALL THE THINGS! Depending on the currently selected set of
# tags, this will deploy the undercloud, deploy the overcloud, and
# perform some validation tests on the overcloud.
- name: Install undercloud and deploy overcloud
hosts: undercloud
gather_facts: no

View File

@ -103,6 +103,7 @@ ipv6: false
enable_vnc_console: false
# We have some version specific behaviors, so we need a release variable
#
# TODO(trown): It would be better to write a release into the image itself
# and set this variable from there.
release: mitaka
release: mitaka

View File

@ -1,2 +1,3 @@
# Include the `common` role as a dependency.
dependencies:
- common

View File

@ -1,3 +1,5 @@
# Include the roles for installing KVM and libvirt (as well as
# anything required by our parent `environment` module).
dependencies:
- parts/kvm
- parts/libvirt

View File

@ -1,3 +1,5 @@
# Create the global, root-managed libvirt networks to which we will
# attach the undercoud and overcloud virtual machines.
- name: Create libvirt networks
virt_net:
command: define
@ -22,7 +24,12 @@
with_items: "{{ networks }}"
become: true
# See: http://wiki.qemu.org/Features-Done/HelperNetworking
# Whitelist the bridges associated with these networks for
# access using qemu [helper networking][helper]. Later on we
# create virtual machines use an unprivileged `qemu://session`
# connection, and we connect to the networks using the bridge names.
#
# [helper]: http://wiki.qemu.org/Features-Done/HelperNetworking
- name: Whitelist bridges for unprivileged access
lineinfile:
dest: "{{ qemu_bridge_conf }}"

View File

@ -1,5 +1,8 @@
# NB: We use "virsh" here instead of the "virt_net" module because
# these tasks may be called before the dependencies of the "virt"
# Tear down the virtual environment that was created by the
# `environment/setup` role.
#
# NB: We use `virsh` here instead of the `virt_net` module because
# these tasks may be called before the dependencies of the `virt_net`
# module are satisfied.
- name: Check if libvirt is available
@ -8,9 +11,11 @@
ignore_errors: true
register: libvirt_check
# If libvirt is not available, we can skip the rest of the tasks.
- when: libvirt_check|success
block:
# Check to see if the networks exist.
- name: Check libvirt networks
command: >
virsh net-uuid "{{ item.name }}"
@ -19,6 +24,8 @@
ignore_errors: true
become: true
# If the networks exist, stop them, undefine them, and remove the
# bridges devices from the qemu whitelist.
- name: Stop libvirt networks
command: >
virsh net-destroy "{{ item.item.name }}"

View File

@ -1,13 +1,32 @@
# This is where we store generated artifacts (like ssh config files,
# keys, etc).
working_dir: "{{ ansible_user_dir }}/.quickstart"
# This is where we store the downloaded underloud image.
image_cache_dir: "{{ ansible_user_dir }}/oooq_cache"
# If `use_cached_image` is `true`, don't check for a more recent
# undercloud image. Otherwise, we also check for a new undercloud
# image and will only use the cached version if the remote MD5 is the
# same.
use_cached_image: false
# Which image should we download?
release: mitaka
image_url: http://artifacts.ci.centos.org/rdo/images/{{ release }}/delorean/stable/undercloud.qcow2
# Which power manager should we use?
nova_power_manager: 'nova.virt.baremetal.virtual_power_driver.VirtualPowerManager'
# These are keys that we generate; `virt_power_key` is used *by the
# undercloud* to start/stop virtual machines on the virthost.
# `undercloud_key` is used to log in to the undercloud.
virt_power_key: "{{ local_working_dir }}/id_rsa_virt_power"
undercloud_key: "{{ local_working_dir }}/id_rsa_undercloud"
# Which libvirt session should we use? Using `qemu://session` does
# not require privileged access (but does require the setup performed by the
# `environment/setup` role).
libvirt_uri: qemu:///session
libvirt_volume_path: "{{ working_dir }}/pool"
libvirt_volume_pool: oooq_pool

View File

@ -1,3 +1,5 @@
# Include the `common` role as a dependency. This makes sure the
# variables defined in that role are available here.
dependencies:
- common

View File

@ -1,4 +1,14 @@
---
# Include settings from the libvirt role, and include all the
# `setup/*` roles. This means that when your playbook has:
#
# roles:
# - libvirt/setup
#
# You also get:
#
# - `libvirt/setup/user`
# - `libvirt/setup/overcloud`
# - `libvirt/setup/undercloud`
dependencies:
- role: libvirt
- role: setup/user

View File

@ -1,2 +1,4 @@
# Include the `common` role as a dependency. This makes sure the
# variables defined in that role are available here.
dependencies:
- common

View File

@ -1,8 +1,13 @@
# Create the volume pool directory if it doesn't already exist. This
# will be the target of the libvirt volume pool we create in the next
# task.
- name: Ensure volume pool directory exists
file:
path: "{{ libvirt_volume_path }}"
state: directory
# Create a libvirt volume pool. This is where we'll be creating
# images for the undercloud and overcloud.
- name: Define volume pool
virt_pool:
command: define
@ -27,12 +32,17 @@
- when: overcloud_nodes
block:
# Generate MAC addresses that we'll use for the overcloud nodes.
# By generating these in advance we can populate the
# `instackenv.json` file with MAC addresses without running
# introspection.
- name: get a list of MACs to use
generate_macs:
nodes: "{{ overcloud_nodes }}"
networks: "{{ networks }}"
register: node_mac_map
# Create libvirt volumes for the overcloud hosts.
- name: Check if overcloud volumes exist
command: >
virsh vol-info --pool '{{libvirt_volume_pool}}' '{{item.name}}.qcow2'
@ -52,6 +62,8 @@
when: item|failed
with_items: "{{ overcloud_vol_check.results }}"
# Define (but do not start) the overcloud nodes. These will be
# booted later by ironic during the provisioning process.
- name: Define overcloud vms
virt:
name: "{{ item.name }}"
@ -60,6 +72,10 @@
uri: "{{ libvirt_uri }}"
with_items: "{{ overcloud_nodes }}"
# Generate the `instackenv.json` configuration file. Note that this
# task *must* occur after the above overcloud tasks, because if
# `overcloud_nodes` is defined the template depends on the
# `node_mac_map` variable.
- name: Write instackenv script
template:
src: "{{ undercloud_instackenv_template }}"

View File

@ -1,5 +1,5 @@
# Fetching the undercloud images can take a long time. This
# tasklist caches images in {{ image_cache_dir }} if an image is
# tasklist caches images in `{{ image_cache_dir }}` if an image is
# (a) downloaded successfully and (b) successfully verifies against
# the checksum. Images are cached using the checksum as the filename,
# and subsequent playbook runs will use the cached copy rather than
@ -10,6 +10,8 @@
path: "{{ image_cache_dir }}"
state: directory
# This looks for the `latest.qcow2` symlink that may have been created
# by a previous run of this tasklist.
- name: Check if we have a latest.qcow2
command: >
test -f latest.qcow2
@ -19,6 +21,9 @@
register: latest_exists
changed_when: false
# If we want to use the most recent image in the local cache
# (`use_cached_image` is `true`) *and* such an image exists, point
# `image_cache_path` at `latest.qcow2`.
- when: use_cached_image|bool and latest_exists|success
block:
@ -27,9 +32,11 @@
image_cache_path: "{{ image_cache_dir }}/latest.qcow2"
when: latest_exists|success and use_cached_image|bool
# Otherwise, check if there's a new image available.
- when: not use_cached_image|bool or latest_exists|failed
block:
# Get the expected checksum for the remote image.
- name: Get undercloud image expected checksum
command: >
curl -sf {{ image_url }}.md5
@ -39,6 +46,7 @@
set_fact:
image_cache_path: "{{ image_cache_dir }}/{{ undercloud_md5_expected.stdout.split()[0] }}.qcow2"
# See if a matching image exists locally.
- name: Check for undercloud image in cache
command: >
test -f {{ image_cache_path }}
@ -48,9 +56,15 @@
register: image_exists
changed_when: false
# Looks like we're going to have to download the image after all.
- when: image_exists is defined and image_exists|failed
block:
# This task will download the image. We're using `curl` here
# rather than `wget` because while `wget` has built-in retry
# capabilities, it is unable to handle `file://` URLs. We instead
# use an ansible `until` loop, combined with curl's `-C-` option
# to continue interrupted downloads.
- name: Get undercloud image
command: >
curl -sf -C- -o _undercloud.qcow2 {{ image_url }}
@ -61,6 +75,7 @@
retries: 20
delay: 5
# Compute the md5 checksum of the image we just downloaded
- name: Get actual md5 checksum of undercloud image
command: >
md5sum _undercloud.qcow2
@ -68,6 +83,7 @@
chdir: "{{ image_cache_dir }}"
register: undercloud_md5_actual
# Verify that what we have is what we wanted.
- name: Verify undercloud image checksum
fail:
msg: undercloud image checksum does not match
@ -90,12 +106,17 @@
rescue:
# This is a workaround for ansible issue [15625][].
#
# [15625]: https://github.com/ansible/ansible/issues/15625
- name: Note that there was a failure.
set_fact:
image_fetch_failed: true
always:
# Ensure that even if there are failures we still clean up our
# temporary image file.
- name: Clean up temporary image file
file:
path: "{{ image_cache_dir }}/_undercloud.qcow2"
@ -105,6 +126,8 @@
fail:
when: image_fetch_failed|default(false)
# Use `image_cache_path`, which was set by one of the above tasks, and
# copy it to `undercloud.qcow2 in our `{{ working_dir }}`.
- name: Get undercloud image from cache
command: >
cp {{ image_cache_path }} {{ working_dir }}/undercloud.qcow2

View File

@ -1,9 +1,12 @@
# We're going to try putting files in `local_working_dir`, so make
# sure it exists first.
- name: Ensure local working dir exists
delegate_to: localhost
file:
path: "{{ local_working_dir }}"
state: directory
# Generate MAC addresses for the undercloud node.
- name: get MACs for the undercloud
generate_macs:
nodes:
@ -11,6 +14,8 @@
networks: "{{ networks }}"
register: undercloud_mac_map
# Check if the undercloud volume exists. If not, we call out to
# [fetch_image.yml](fetch_image.yml.html) to download the image.
- name: Check if undercloud volume exists
command: >
virsh vol-info --pool '{{ libvirt_volume_pool }}'
@ -23,6 +28,8 @@
- include: fetch_image.yml
when: undercloud_vol_check|failed
# This copies the `instackenv.json` configuration file that we
# generated in the overcloud setup role to the undercloud host.
- name: Copy instackenv.json to appliance
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
@ -32,12 +39,16 @@
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
when: undercloud_vol_check|failed
# Copy the undercloud public key to the virthost, because we're going
# to inject it into the undercloud image in the next task.
- name: Copy undercloud ssh public key to working dir
copy:
src: "{{ undercloud_key }}.pub"
dest: "{{ working_dir }}/id_rsa_undercloud.pub"
when: undercloud_vol_check|failed
# Copy the public key to `$HOME/.ssh/authorized_keys` for the `root`
# and `stack` user on the undercloud.
- name: Inject undercloud ssh public key to appliance
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
@ -57,6 +68,9 @@
group: stack
when: undercloud_vol_check|failed
# Perform an SELinux relabel on the undercloud image to avoid problems
# caused by bad labelling, since by default the undercloud runs in
# enforcing mode.
- name: Perform selinux relabel on undercloud image
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
@ -65,6 +79,7 @@
LIBGUESTFS_BACKEND: direct
when: undercloud_vol_check|failed
# Resize the undercloud image to our desired size.
- name: Resize undercloud image (create target image)
command: >
qemu-img create -f qcow2 -o preallocation=off
@ -81,6 +96,8 @@
LIBGUESTFS_BACKEND: direct
when: undercloud_vol_check|failed
# Create a libvirt volume and upload the undercloud image to
# libvirt.
- name: Create undercloud volume
command: >
virsh vol-create-as {{ libvirt_volume_pool}}
@ -101,6 +118,7 @@
poll: 10
when: undercloud_vol_check|failed
# Define (but do no start) the undercloud virtual machine.
- name: Define undercloud vm
virt:
name: "{{ undercloud_node.name }}"
@ -108,6 +126,7 @@
xml: "{{ lookup('template', 'undercloudvm.xml.j2') }}"
uri: "{{ libvirt_uri }}"
# Start the undercloud virtual machine.
- name: Start undercloud vm
virt:
name: "{{ undercloud_node.name }}"
@ -115,6 +134,10 @@
state: running
uri: "{{ libvirt_uri }}"
# Get the ip address of the undercloud. This will retry several times
# (`undercloud_ip_retries`) until the undercloud is ready. The script
# works by getting the MAC address of the first undercloud interface,
# and then looking that up in the kernel ARP table.
- name: Get undercloud vm ip address
script: "scripts/get-undercloud-ip.sh {{ undercloud_node.name }}"
register: undercloud_vm_ip_result
@ -135,6 +158,7 @@
port: 22
timeout: 300
# Add the undercloud to the in-memory inventory.
- name: Add undercloud vm to inventory
add_host:
name: undercloud
@ -151,8 +175,8 @@
src: ssh.config.j2
dest: "{{ local_working_dir }}/ssh.config.ansible"
# Ironic defaults to using qemu:///system. When running libvirtd
# unprivileged we need to use qemu:///session. This allows us to pass
# Ironic defaults to using `qemu:///system`. When running libvirtd
# unprivileged we need to use `qemu:///session`. This allows us to pass
# the value of libvirt_uri into /etc/ironic/ironic.conf.
- name: Configure Ironic pxe_ssh driver
delegate_to: undercloud

View File

@ -1,14 +1,21 @@
# We're going to want to store things in `working_dir` so ensure it
# exists first. `working_dir` is a directory on the target host.
- name: Ensure remote working dir exists
file:
path: "{{ working_dir }}"
state: directory
# Also make sure `local_working_dir` exists. This is a directory on
# the ansible control host.
- name: Ensure local working dir exists
delegate_to: localhost
file:
path: "{{ local_working_dir }}"
state: directory
# Create ssh keypairs. `virt_power_key` is used by ironic on the
# undercloud to control libvirt on the physical host, and
# `undercloud_key` is used to log in to the undercloud.
- name: Generate ssh keys
delegate_to: localhost
command: >

View File

@ -1,4 +1,13 @@
---
# Include settings from the libvirt role, and include all the
# `teardown/*` roles. This means that when your playbook has:
#
# roles:
# - libvirt/teardown
#
# You also get:
#
# - `libvirt/teardown/nodes`
# - `libvirt/teardown/user`
dependencies:
- role: libvirt
- role: teardown/nodes

View File

@ -1,5 +1,5 @@
# NB: We use "virsh" here instead of the "virt" module because
# these tasks may be called before the dependencies of the "virt"
# NB: We use `virsh` here instead of the `virt` module because
# these tasks may be called before the dependencies of the `virt`
# module are satisfied.
- name: Check if libvirt is available
@ -10,9 +10,11 @@
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
# If libvirt isn't available we can skip everything else.
- when: libvirt_check|success
block:
# Check if the overcloud nodes exist.
- name: Check overcloud vms
command: >
virsh domid "{{ item.name }}"
@ -22,6 +24,7 @@
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
# Destroy and undefine the overcloud nodes.
- name: Destroy overcloud vms
command:
virsh destroy "{{ item.item.name }}"
@ -39,6 +42,7 @@
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
# Do the same thing to the undercloud node.
- name: Check undercloud vm
command: >
virsh domid "{{ undercloud_node.name }}"
@ -62,8 +66,10 @@
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
# the virsh vol-dumpxml ... > /dev/null is here (and elsewhere) due to
# https://bugzilla.redhat.com/show_bug.cgi?id=1293804
# The `virsh vol-dumpxml ... > /dev/null` is here (and elsewhere) due to
# [1293804].
#
# [1293804]: https://bugzilla.redhat.com/show_bug.cgi?id=1293804
- name: Delete baremetal vm storage
shell: |
virsh vol-dumpxml --pool '{{ libvirt_volume_pool }}' \

View File

@ -1,3 +1,5 @@
# This removes the `virt_power_key` from the remote `authorized_keys`
# file.
- name: Remove virt_power_key from remote authorized_keys
authorized_key:
user: "{{ ansible_user_id }}"

View File

@ -1,12 +1,17 @@
# Grab CPU flags from `/proc/cpuinfo` and put the result into the
# `cpu_flags_cmd` variable.
- name: get cpu flags
command: >
awk -F: '/^flags/ {print $2; exit}' /proc/cpuinfo
register: cpu_flags_cmd
# Extract the flags into a list variable named `cpu_flags`.
- name: set cpu flags fact
set_fact:
cpu_flags: "{{ cpu_flags_cmd.stdout.split() }}"
# Identify the processor manufacturer by looking for "Intel" or "AMD"
# in `/proc/cpuinfo`.
- name: check if Intel processor
command: grep -q Intel /proc/cpuinfo
ignore_errors: true
@ -17,12 +22,27 @@
ignore_errors: true
register: is_amd
# Enable [nested virtualization][nested]. Set the `nested_virt` fact
# to `true` if we have Intel processors and the `vmx` flag or we have
# AMD processors and the `svm` flag.
#
# [nested]: https://www.kernel.org/doc/Documentation/virtual/kvm/nested-vmx.txt
- name: set nested_virt fact
set_fact:
nested_virt: >-
{{ ((is_intel and 'vmx' in cpu_flags)
or (is_amd and 'svm' in cpu_flags))|bool }}
# Configure appropriate options for the vendor-specific kvm module
# in `/etc/modprobe.d/kvm.conf`. This will result in either:
#
# options kvm_intel nested=1
#
# Or:
#
# options kvm_adm nested=1
#
# (Depending on the processor vendor)
- name: configure kvm module
copy:
dest: /etc/modprobe.d/kvm.conf
@ -32,6 +52,7 @@
when: "{{ nested_virt }}"
become: true
# Immediately load the appropriate kvm vendor module.
- name: load kvm module [intel]
command: modprobe kvm_intel
when: is_intel|success
@ -42,6 +63,9 @@
when: is_amd|success
become: true
# List the appropriate kvm vendor module in
# `/etc/modules-load.d/ooo_kvm.conf`. This will cause the module
# to be loaded automatically next time the system boots.
- name: arrange for kvm module to load at boot [intel]
copy:
content: kvm_intel
@ -55,5 +79,3 @@
dest: /etc/modules-load.d/ooo_kvm.conf
when: is_amd|success
become: true