Lint fixes for playbooks and roles

This normalizes the formatting of files from playbooks and roles
folders in order to allow further hardening of linting.

Fixing linting was too big to be made in a single commit as it would
involve too many files to review and could merge conflicts with
existing changes. Thus doing it in few chunks would makes it possible.

Original full change is at https://review.openstack.org/#/c/627545/ and
will be the one merging the last.

Change-Id: I73abb9ea78b8bea8d73eb95c52daacdb45f407b8
This commit is contained in:
Sorin Sbarnea
2018-12-28 10:02:39 +00:00
parent d868cbee68
commit 3d02514778
82 changed files with 589 additions and 598 deletions

View File

@@ -1,4 +1,3 @@
---
- import_playbook: build-images.yml
- import_playbook: quickstart-extras.yml

View File

@@ -6,13 +6,13 @@
- name: Add the virthost node to the generated inventory
hosts: localhost
gather_facts: yes
gather_facts: true
roles:
- tripleo-inventory
- name: Setup the virthost to build images then build them
- name: Setup the virthost to build images then build them
hosts: virthost
gather_facts: yes
gather_facts: true
roles:
- parts/kvm
- parts/libvirt
@@ -22,5 +22,4 @@
# with a different meaning. We should namespace the
# variables better so that they can both be set separately
# in a config file or extra-vars passed to ansible.
- { role: "image-build", working_dir: "/var/lib/oooq-images" }
- {role: "image-build", working_dir: "/var/lib/oooq-images"}

View File

@@ -4,16 +4,16 @@
---
- name: Destroy previous setup
hosts: virthost
gather_facts: yes
gather_facts: true
vars:
- libvirt_nodepool: true
roles:
- libvirt/teardown/nodes
become: true
- name: Setup undercloud and baremetal vms and networks in libvirt
- name: Setup undercloud and baremetal vms and networks in libvirt
hosts: virthost
gather_facts: yes
gather_facts: true
vars:
- libvirt_nodepool: true
roles:
@@ -21,14 +21,13 @@
environment:
SUPERMIN_KERNEL_VERSION: "{{ lookup('env', 'SUPERMIN_KERNEL_VERSION') }}"
SUPERMIN_KERNEL: "{{ lookup('env', 'SUPERMIN_KERNEL') }}"
SUPERMIN_MODULES: "{{ lookup('env', 'SUPERMIN_MODULES') }}"
SUPERMIN_MODULES: "{{ lookup('env', 'SUPERMIN_MODULES') }}"
LIBGUESTFS_BACKEND: "{{ lookup('env', 'LIBGUESTFS_BACKEND') }}"
LIBGUESTFS_BACKEND_SETTINGS: "{{ lookup('env', 'LIBGUESTFS_BACKEND_SETTINGS') }}"
become: true
- name: Add nodes to the generated inventory
hosts: localhost
gather_facts: yes
gather_facts: true
roles:
- tripleo-inventory

View File

@@ -1,13 +1,12 @@
---
- name: Setup undercloud and baremetal vms and networks in libvirt
- name: Setup undercloud and baremetal vms and networks in libvirt
hosts: virthost
gather_facts: yes
gather_facts: true
roles:
- libvirt/setup
- name: Add nodes to the generated inventory
hosts: localhost
gather_facts: yes
gather_facts: true
roles:
- tripleo-inventory

View File

@@ -1,7 +1,6 @@
---
- name: Teardown previous libvirt setup
- name: Teardown previous libvirt setup
hosts: virthost
gather_facts: no
gather_facts: false
roles:
- libvirt/teardown

View File

@@ -5,4 +5,3 @@
hosts: localhost
tasks:
- debug: msg="noop"

View File

@@ -25,7 +25,7 @@
- name: Add the virthost node to the generated inventory
hosts: localhost
gather_facts: yes
gather_facts: true
roles:
- tripleo-inventory
tags:
@@ -51,4 +51,3 @@
tasks:
- name: Force-refresh facts
setup:

View File

@@ -7,7 +7,7 @@
tasks:
- name: Add virthost
add_host:
name: "{{virthost}}"
name: "{{ virthost }}"
groups: "virthost"
ansible_fqdn: "{{ virthost }}"
ansible_user: "root"
@@ -36,7 +36,7 @@
# access on the target host.
- name: Install libvirt packages and configure networks
hosts: virthost
gather_facts: yes
gather_facts: true
tags:
- environment
roles:
@@ -44,9 +44,9 @@
# The `libvirt/setup` role creates the undercloud and overcloud
# virtual machines.
- name: Setup undercloud, overcloud, and supplemental vms
- name: Setup undercloud, overcloud, and supplemental vms
hosts: virthost
gather_facts: yes
gather_facts: true
tags:
- libvirt
roles:
@@ -55,7 +55,7 @@
- name: Add the undercloud node to the generated inventory
hosts: localhost
gather_facts: yes
gather_facts: true
tags:
- undercloud-inventory
roles:
@@ -65,7 +65,7 @@
# This must be done after inventory is run for the first time
- name: Create the virtual BMC
hosts: undercloud
gather_facts: yes
gather_facts: true
tags:
- libvirt
roles:

View File

@@ -1,7 +1,7 @@
# This will optionally setup yum repos on the virthost
- name: setup yum repos on virthost
hosts: virthost
gather_facts: yes
gather_facts: true
roles:
- repo-setup
tags:

View File

@@ -6,4 +6,3 @@
tags:
- teardown-environment
- teardown-all

View File

@@ -1,11 +1,10 @@
# This teardown role will destroy all vms defined in the overcloud_nodes
# key, and the undercloud
- name: Tear down undercloud and overcloud vms
- name: Tear down undercloud and overcloud vms
hosts: virthost
gather_facts: yes
gather_facts: true
roles:
- libvirt/teardown
tags:
- teardown-nodes
- teardown-all

View File

@@ -7,4 +7,3 @@
tags:
- teardown-provision
- teardown-all

View File

@@ -1,8 +1,8 @@
# This teardown role will destroy all vms defined in the overcloud_nodes
# key, and the undercloud
- name: Teardown undercloud and overcloud vms
- name: Teardown undercloud and overcloud vms
hosts: virthost
gather_facts: yes
gather_facts: true
roles:
- libvirt/teardown
tags:
@@ -27,4 +27,3 @@
tags:
- teardown-provision
- teardown-all

View File

@@ -19,7 +19,7 @@ working_dir: "/home/{{ undercloud_user }}"
# undercloud image.
image_cache_dir: "/var/cache/tripleo-quickstart/images/{{ release }}"
image_fetch_dir: "{{ working_dir}}"
image_fetch_dir: "{{ working_dir }}"
# This determines whether to download a pre-built undercloud.qcow2 or
# whether to instead use an overcloud-full.qcow2 and convert it on
@@ -63,36 +63,36 @@ undercloud_vcpu: 6
# the pre-defined flavors created by `openstack install undercloud`.
flavors:
compute:
memory: '{{compute_memory|default(default_memory)}}'
disk: '{{compute_disk|default(default_disk)}}'
vcpu: '{{compute_vcpu|default(default_vcpu)}}'
memory: '{{ compute_memory|default(default_memory) }}'
disk: '{{ compute_disk|default(default_disk) }}'
vcpu: '{{ compute_vcpu|default(default_vcpu) }}'
control:
memory: '{{control_memory|default(default_memory)}}'
disk: '{{control_disk|default(default_disk)}}'
vcpu: '{{control_vcpu|default(default_vcpu)}}'
memory: '{{ control_memory|default(default_memory) }}'
disk: '{{ control_disk|default(default_disk) }}'
vcpu: '{{ control_vcpu|default(default_vcpu) }}'
ceph:
memory: '{{ceph_memory|default(default_memory)}}'
disk: '{{ceph_disk|default(default_disk)}}'
vcpu: '{{ceph_vcpu|default(default_vcpu)}}'
memory: '{{ ceph_memory|default(default_memory) }}'
disk: '{{ ceph_disk|default(default_disk) }}'
vcpu: '{{ ceph_vcpu|default(default_vcpu) }}'
extradisks: true
blockstorage:
memory: '{{block_memory|default(default_memory)}}'
disk: '{{block_disk|default(default_disk)}}'
vcpu: '{{block_vcpu|default(default_vcpu)}}'
memory: '{{ block_memory|default(default_memory) }}'
disk: '{{ block_disk|default(default_disk) }}'
vcpu: '{{ block_vcpu|default(default_vcpu) }}'
objectstorage:
memory: '{{objectstorage_memory|default(default_memory)}}'
disk: '{{objectstorage_disk|default(default_disk)}}'
vcpu: '{{objectstorage_vcpu|default(default_vcpu)}}'
memory: '{{ objectstorage_memory|default(default_memory) }}'
disk: '{{ objectstorage_disk|default(default_disk) }}'
vcpu: '{{ objectstorage_vcpu|default(default_vcpu) }}'
extradisks: true
undercloud:
memory: '{{undercloud_memory|default(undercloud_memory)}}'
disk: '{{undercloud_disk|default(undercloud_disk)}}'
vcpu: '{{undercloud_vcpu|default(undercloud_vcpu)}}'
memory: '{{ undercloud_memory|default(undercloud_memory) }}'
disk: '{{ undercloud_disk|default(undercloud_disk) }}'
vcpu: '{{ undercloud_vcpu|default(undercloud_vcpu) }}'
# We create a single undercloud node.
undercloud_node:
@@ -149,7 +149,7 @@ networks:
- 1024
- 65535
#Enable network isolation with single-nic-vlans for virtualized deployments
# Enable network isolation with single-nic-vlans for virtualized deployments
undercloud_network_cidr: 192.168.24.0/24
undercloud_external_network_cidr: >-
{%- if overcloud_ipv6|bool %}2001:db8:fd00:1000::/64{% else %}10.0.0.1/24{% endif -%}
@@ -222,12 +222,12 @@ non_root_chown: false
enable_port_forward_for_tripleo_ui: false
# This enables the run of several tripleo-validations tests through Mistral
run_tripleo_validations: False
run_tripleo_validations: false
# This enables the run of tripleo-validations negative tests through shell
# scripts
run_tripleo_validations_negative_tests: False
run_tripleo_validations_negative_tests: false
# Exit tripleo-quickstart on validations failures
exit_on_validations_failure: False
exit_on_validations_failure: false
# Update undercloud and overcloud images with the repos provided via the
# release config.

View File

@@ -22,4 +22,3 @@ not needing to maintain/host a specific undercloud image.
set for the root user on the overcloud-full image. The
resulting overcloud and undercloud instances will have
the password set.

View File

@@ -1,4 +1,3 @@
# Include the `common` role as a dependency.
dependencies:
- common

View File

@@ -1,11 +1,11 @@
- name: generate convert script
template:
src: "{{ convert_image_template }}"
dest: "{{ convert_image_working_dir}}/convert_image.sh"
dest: "{{ convert_image_working_dir }}/convert_image.sh"
- name: check if we have an overcloud-full.qcow2 to start from
stat:
path: "{{ convert_image_working_dir}}/overcloud-full.qcow2"
path: "{{ convert_image_working_dir }}/overcloud-full.qcow2"
register: overcloud_full_qcow2
# This tasks is not be used in CI or on any public systems
@@ -13,7 +13,7 @@
- name: set root password for image
command: >
virt-customize --smp 2 -m {{ convert_image_host_memory }}
-a {{ convert_image_working_dir}}/overcloud-full.qcow2
-a {{ convert_image_working_dir }}/overcloud-full.qcow2
--root-password password:{{ overcloud_full_root_pwd }}
when:
- overcloud_full_root_pwd is defined
@@ -21,8 +21,8 @@
- name: copy overcloud-full.qcow2 to undercloud.qcow2
command: >
cp {{ convert_image_working_dir}}/overcloud-full.qcow2
{{ convert_image_working_dir}}/undercloud.qcow2
cp {{ convert_image_working_dir }}/overcloud-full.qcow2
{{ convert_image_working_dir }}/undercloud.qcow2
changed_when: true
when: overcloud_full_qcow2.stat.exists and overcloud_as_undercloud|bool
@@ -54,4 +54,3 @@
environment:
LIBGUESTFS_BACKEND: direct
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"

View File

@@ -55,4 +55,3 @@ echo "/swapfile swap swap defaults 0 0" >> /etc/fstab
sed -i 's/SELINUX=enforcing/SELINUX=permissive/' /etc/selinux/config
{% endif %}

View File

@@ -1,3 +1,2 @@
dependencies:
- common

View File

@@ -4,4 +4,3 @@ dependencies:
- parts/kvm
- parts/libvirt
- environment

View File

@@ -48,7 +48,7 @@
# There is a bug w/ virt_net and RHEL where the network xml
# file is not written to /etc/libvirt/qemu/networks/ This causes
# network to be considered transient.
- when: net_autostart.changed != true
- when: not net_autostart.changed
block:
- name: Check if "virsh net-autostart" was successful
@@ -66,14 +66,14 @@
# copy the xml to a file
- name: copy network-xml to file
copy: content={{ item.get_xml }} dest=/tmp/network-{{item.item.name}}.xml
copy: content={{ item.get_xml }} dest=/tmp/network-{{ item.item.name }}.xml
with_items: "{{ net_xml.results }}"
become: true
# redefine the network w/ virsh, this will write the xml file to
# /etc/libvirt/qemu/networks/ and it will no longer be transient
- name: redefine the libvirt networks so the config is written to /etc/libvirt
command: virsh net-define /tmp/network-{{item.name}}.xml
command: virsh net-define /tmp/network-{{ item.name }}.xml
with_items: "{{ networks }}"
become: true
@@ -94,7 +94,7 @@
- name: Whitelist bridges for unprivileged access
lineinfile:
dest: "{{ qemu_bridge_conf }}"
line: "allow {{item.bridge}}"
line: "allow {{ item.bridge }}"
with_items: "{{ networks }}"
become: true

View File

@@ -31,4 +31,3 @@
</ip>
{% endif %}
</network>

View File

@@ -7,4 +7,3 @@
- name: Include version specific vars
include_vars: "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower }}.yml"
ignore_errors: true

View File

@@ -1,4 +1,3 @@
---
dependencies:
- environment

View File

@@ -16,7 +16,7 @@
- when: libvirt_check is success
block:
# Check to see if the networks exist.
# Check to see if the networks exist.
- name: Check libvirt networks
command: >
virsh net-uuid "{{ item.name }}"
@@ -46,7 +46,7 @@
- name: Remove bridge whitelisting from qemu bridge helper
lineinfile:
dest: "{{ qemu_bridge_conf }}"
line: "allow {{item.bridge}}"
line: "allow {{ item.bridge }}"
state: absent
with_items: "{{ networks }}"
become: true

View File

@@ -1,2 +1 @@
qemu_bridge_conf: /etc/qemu-kvm/bridge.conf

View File

@@ -1,2 +1 @@
qemu_bridge_conf: /etc/qemu/bridge.conf

View File

@@ -1,2 +1 @@
qemu_bridge_conf: /etc/qemu/bridge.conf

View File

@@ -1,2 +1 @@
qemu_bridge_conf: /etc/qemu-kvm/bridge.conf

View File

@@ -1,3 +1,2 @@
dependencies:
- common

View File

@@ -27,7 +27,7 @@
# If we want to use the most recent image in the local cache
# (`_force_cached_image` is `true`) *and* such an image exists, point
# `image_cache_path` at `latest-{{image.name}}.qcow2`.
# `image_cache_path` at `latest-{{ image.name }}.qcow2`.
- name: Set path to cached image [local]
set_fact:
image_cache_path: "{{ _latest }}"
@@ -109,7 +109,7 @@
# to continue interrupted downloads.
- name: Get image
command: >
curl -skfL -C- -o _{{ image.name }}.{{ image.type}} {{ image.url }}
curl -skfL -C- -o _{{ image.name }}.{{ image.type }} {{ image.url }}
args:
chdir: "{{ image_cache_dir }}"
register: curl_result
@@ -120,7 +120,7 @@
# Compute the md5 checksum of the image we just downloaded
- name: Get actual md5 checksum of image
command: >
md5sum -b _{{ image.name }}.{{ image.type}}
md5sum -b _{{ image.name }}.{{ image.type }}
args:
chdir: "{{ image_cache_dir }}"
register: md5_actual
@@ -140,7 +140,7 @@
- name: Cache image by checksum
command: >
mv _{{ image.name }}.{{ image.type}} {{ image_cache_path }}
mv _{{ image.name }}.{{ image.type }} {{ image_cache_path }}
args:
chdir: "{{ image_cache_dir }}"
@@ -189,9 +189,9 @@
- name: Get tar images from cache
unarchive:
src: "{{ image_cache_path }}"
copy: no
copy: false
dest: "{{ image_fetch_dir }}"
list_files: yes
list_files: true
when: image.type == "tar"
- name: Clean image cache directory

View File

@@ -35,4 +35,3 @@
vars:
image: "{{ item }}"
with_items: "{{ images }}"

View File

@@ -11,9 +11,9 @@ force_cached_images: false
# You can also control the caching behavior per-image by setting the
# `force_cached` key.
images:
- name: undercloud
url: "{{ undercloud_image_url }}"
type: qcow2
- name: undercloud
url: "{{ undercloud_image_url }}"
type: qcow2
# These are keys that we generate; `virt_power_key` is used *by the
# undercloud* to start/stop virtual machines on the virthost.

View File

@@ -2,4 +2,3 @@
# variables defined in that role are available here.
dependencies:
- common

View File

@@ -11,8 +11,8 @@
# - `libvirt/setup/undercloud`
# - `libvirt/setup/supplemental`
dependencies:
- { role: libvirt }
- { role: setup/user }
- { role: setup/overcloud }
- { role: setup/undercloud }
- { role: setup/supplemental, when: deploy_supplemental_node|bool }
- {role: libvirt}
- {role: setup/user}
- {role: setup/overcloud}
- {role: setup/undercloud}
- {role: setup/supplemental, when: deploy_supplemental_node|bool}

View File

@@ -23,4 +23,3 @@ if [ -z "$ip" ]; then
fi
echo $ip

View File

@@ -2,3 +2,12 @@ dependencies:
- libvirt
- common
- libvirt/setup/common
galaxy_info:
author: Red Hat, Inc.
license: Apache
description: libvirt setup overcloud
platforms:
- name: CentOS
versions:
- 7
min_ansible_version: 2.4

View File

@@ -51,7 +51,11 @@
register: hash
- name: Copy generated password to file
local_action: copy content={{ hash.stdout }} dest="{{ working_dir }}/pwtemp" mode=0600
delegate_to: localhost
copy:
content: "{{ hash.stdout }}"
dest: "{{ working_dir }}/pwtemp"
mode: 0600
- name: Inject password into the image
command: >
@@ -123,22 +127,22 @@
- environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
block:
# Create libvirt volumes and upload them to libvirt.
- name: Create libvirt nodepool volumes
command: >
virsh vol-create-as {{ libvirt_volume_pool}}
{{ item.name }}.qcow2
{{ flavors[item.flavor].disk }}G --format qcow2
with_items: "{{ overcloud_nodes }}"
# Create libvirt volumes and upload them to libvirt.
- name: Create libvirt nodepool volumes
command: >
virsh vol-create-as {{ libvirt_volume_pool }}
{{ item.name }}.qcow2
{{ flavors[item.flavor].disk }}G --format qcow2
with_items: "{{ overcloud_nodes }}"
- name: Upload the volume to storage pool
command: >
virsh -k 0 vol-upload --pool '{{ libvirt_volume_pool }}'
'{{ item.name }}.qcow2'
'{{ local_working_dir }}/undercloud.qcow2'
async: 600
poll: 10
with_items: "{{ overcloud_nodes }}"
- name: Upload the volume to storage pool
command: >
virsh -k 0 vol-upload --pool '{{ libvirt_volume_pool }}'
'{{ item.name }}.qcow2'
'{{ local_working_dir }}/undercloud.qcow2'
async: 600
poll: 10
with_items: "{{ overcloud_nodes }}"
- name: Start libvirt nodepool nodes
virt:

View File

@@ -8,7 +8,7 @@
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
block:
#ensure python-netaddr is installed for next task
# ensure python-netaddr is installed for next task
- name: ensure python-netaddr
become: true
package:
@@ -28,7 +28,7 @@
# Create libvirt volumes for the overcloud hosts.
- name: Check if overcloud volumes exist
command: >
virsh vol-info --pool '{{libvirt_volume_pool}}' '{{item.name}}.qcow2'
virsh vol-info --pool '{{ libvirt_volume_pool }}' '{{ item.name }}.qcow2'
register: overcloud_vol_check
ignore_errors: true
with_items: "{{ overcloud_nodes }}"

View File

@@ -1,2 +1,11 @@
dependencies:
- common
galaxy_info:
author: Red Hat, Inc.
license: Apache
description: libvirt setup supplemental
platforms:
- name: CentOS
versions:
- 7
min_ansible_version: 2.4

View File

@@ -45,41 +45,41 @@
- when: supplemental_vol_check is failed
block:
# TODO(hrybacki): Update fetch-images role to handle supplemental images
- name: Fetch centos image for ipa
get_url:
url: '{{ supplemental_base_image_url }}'
dest: '{{ image_cache_dir }}/supplemental_base.qcow2'
# TODO(hrybacki): Update fetch-images role to handle supplemental images
- name: Fetch centos image for ipa
get_url:
url: '{{ supplemental_base_image_url }}'
dest: '{{ image_cache_dir }}/supplemental_base.qcow2'
- name: Ensure virt-manager in installed on virthost
package:
name: 'virt-install'
state: 'present'
become: true
- name: Ensure virt-manager in installed on virthost
package:
name: 'virt-install'
state: 'present'
become: true
- name: Prepare TLS everywhere provisoner script
template:
src: tls_everywhere_provisioner.sh.j2
dest: '~/tls_everywhere_provisioner.sh'
mode: 0700
when: enable_tls_everywhere|bool
- name: Prepare TLS everywhere provisoner script
template:
src: tls_everywhere_provisioner.sh.j2
dest: '~/tls_everywhere_provisioner.sh'
mode: 0700
when: enable_tls_everywhere|bool
- name: Execute tls everywhere provisioner script
shell: 'bash ~/tls_everywhere_provisioner.sh &> ~/tls_everywhere_provisioner.log'
when: enable_tls_everywhere|bool
- name: Execute tls everywhere provisioner script
shell: 'bash ~/tls_everywhere_provisioner.sh &> ~/tls_everywhere_provisioner.log'
when: enable_tls_everywhere|bool
- when: supplemental_provisioning_script is defined and not enable_tls_everywhere|bool
block:
- name: Move scripts to virthost
copy:
src: '{{ supplemental_provisioning_script }}'
dest: '~/supplemental_node_provisioner.sh'
mode: 0744
- name: Provision script execution
shell: >
'bash ~/supplemental_node_provisioner.sh'
tags:
- skip_ansible_lint
- name: Move scripts to virthost
copy:
src: '{{ supplemental_provisioning_script }}'
dest: '~/supplemental_node_provisioner.sh'
mode: 0744
- name: Provision script execution
shell: >
'bash ~/supplemental_node_provisioner.sh'
tags:
- skip_ansible_lint
# Start the supplemental node virtual machine.
- name: Start supplemental node vm
@@ -87,7 +87,7 @@
name: '{{ supplemental_node.name }}'
command: start
state: running
autostart: True
autostart: true
uri: '{{ libvirt_uri }}'
- name: Wait for VM to come online

View File

@@ -23,4 +23,3 @@ if [ -z "$ip" ]; then
fi
echo $ip

View File

@@ -1,3 +1,12 @@
dependencies:
- common
- libvirt/setup/common
galaxy_info:
author: Red Hat, Inc.
license: Apache
description: libvirt setup undercloud
platforms:
- name: CentOS
versions:
- 7
min_ansible_version: 2.4

View File

@@ -1,5 +1,4 @@
---
- name: indirect role include (workaround to https://github.com/ansible/ansible/issues/19472)
include_role:
name: convert-image

View File

@@ -8,28 +8,27 @@
LIBGUESTFS_BACKEND: direct
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
block:
# We need to extract the overcloud image, if it's not already extracted.
# so we can inject the gating repo into it.
- name: check if overcloud image is already extracted
stat:
path: '{{ working_dir }}/overcloud-full.qcow2'
register: overcloud_image_stat_for_customize
# We need to extract the overcloud image, if it's not already extracted.
# so we can inject the gating repo into it.
- name: check if overcloud image is already extracted
stat:
path: '{{ working_dir }}/overcloud-full.qcow2'
register: overcloud_image_stat_for_customize
- name: Extract overcloud-full image
command: >
virt-copy-out -a {{ working_dir }}/undercloud.qcow2
/home/{{ undercloud_user }}/overcloud-full.qcow2 {{ working_dir }}
when: not overcloud_image_stat_for_customize.stat.exists
- name: Extract overcloud-full image
command: >
virt-copy-out -a {{ working_dir }}/undercloud.qcow2
/home/{{ undercloud_user }}/overcloud-full.qcow2 {{ working_dir }}
when: not overcloud_image_stat_for_customize.stat.exists
# only customize overcloud-full image if that is not going to be
# used as undercloud
- name: Perform extra overcloud customizations
command: >
virt-customize -a {{ working_dir}}/overcloud-full.qcow2
--run '{{ working_dir}}/overcloud-customize.sh'
- name: Copy updated overcloud-full image back to undercloud
command: >
virt-copy-in -a {{ working_dir }}/undercloud.qcow2
{{ working_dir }}/overcloud-full.qcow2 /home/{{ undercloud_user }}/
# only customize overcloud-full image if that is not going to be
# used as undercloud
- name: Perform extra overcloud customizations
command: >
virt-customize -a {{ working_dir }}/overcloud-full.qcow2
--run '{{ working_dir }}/overcloud-customize.sh'
- name: Copy updated overcloud-full image back to undercloud
command: >
virt-copy-in -a {{ working_dir }}/undercloud.qcow2
{{ working_dir }}/overcloud-full.qcow2 /home/{{ undercloud_user }}/

View File

@@ -8,36 +8,35 @@
LIBGUESTFS_BACKEND: direct
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
block:
# We need to extract the overcloud image, if it's not already extracted.
# so we can inject the gating repo into it.
- name: check if overcloud image is already extracted
stat:
path: '{{ working_dir }}/overcloud-full.qcow2'
register: overcloud_image_stat
# We need to extract the overcloud image, if it's not already extracted.
# so we can inject the gating repo into it.
- name: check if overcloud image is already extracted
stat:
path: '{{ working_dir }}/overcloud-full.qcow2'
register: overcloud_image_stat
- name: Extract overcloud-full image
command: >
virt-copy-out -a {{ working_dir }}/undercloud.qcow2
/home/{{ undercloud_user }}/overcloud-full.qcow2 {{ working_dir }}
register: overcloud_image_extracted
when: not overcloud_image_stat.stat.exists
- name: Extract overcloud-full image
command: >
virt-copy-out -a {{ working_dir }}/undercloud.qcow2
/home/{{ undercloud_user }}/overcloud-full.qcow2 {{ working_dir }}
register: overcloud_image_extracted
when: not overcloud_image_stat.stat.exists
- name: Inject the gating repo (overcloud-full)
command: >
virt-customize -a {{ working_dir }}/overcloud-full.qcow2
--upload {{ compressed_gating_repo }}:/tmp/gating_repo.tar.gz
--run '{{ working_dir }}/inject_gating_repo.sh'
- name: Inject the gating repo (overcloud-full)
command: >
virt-customize -a {{ working_dir }}/overcloud-full.qcow2
--upload {{ compressed_gating_repo }}:/tmp/gating_repo.tar.gz
--run '{{ working_dir }}/inject_gating_repo.sh'
- name: Copy updated overcloud-full image back to undercloud
command: >
virt-copy-in -a {{ working_dir }}/undercloud.qcow2
{{ working_dir }}/overcloud-full.qcow2 /home/{{ undercloud_user }}/
when: overcloud_image_extracted is defined and overcloud_image_extracted|changed
- name: Inject the gating repo (undercloud)
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload {{ compressed_gating_repo }}:/tmp/gating_repo.tar.gz
--run '{{ working_dir }}/inject_gating_repo.sh'
when: not overcloud_as_undercloud|bool
- name: Copy updated overcloud-full image back to undercloud
command: >
virt-copy-in -a {{ working_dir }}/undercloud.qcow2
{{ working_dir }}/overcloud-full.qcow2 /home/{{ undercloud_user }}/
when: overcloud_image_extracted is defined and overcloud_image_extracted|changed
- name: Inject the gating repo (undercloud)
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload {{ compressed_gating_repo }}:/tmp/gating_repo.tar.gz
--run '{{ working_dir }}/inject_gating_repo.sh'
when: not overcloud_as_undercloud|bool

View File

@@ -14,4 +14,3 @@
repo_setup_dir: "{{ working_dir }}"
with_items: "{{ qcow_images.files | default([]) }}"
changed_when: true

View File

@@ -31,136 +31,136 @@
LIBGUESTFS_BACKEND: direct
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
block:
# Conditionally include a playbook for all the images specified
# in options that downloads, cache and extract if tar archived
# only if the images aren't already in volume pool
- name: Fetch the images
include_role:
name: fetch-images
# Conditionally include a playbook for all the images specified
# in options that downloads, cache and extract if tar archived
# only if the images aren't already in volume pool
- name: Fetch the images
include_role:
name: fetch-images
# Conditionally include a playbook for all the images specified
# in options that updates images with the repos provided via the
# release config.
- include_tasks: inject_repos.yml
when: update_images|bool or devmode|bool
# Conditionally include a playbook for all the images specified
# in options that updates images with the repos provided via the
# release config.
- include_tasks: inject_repos.yml
when: update_images|bool or devmode|bool
# inject the gating repo generated by ansible-role-tripleo-gate
- include_tasks: inject_gating_repo.yml
when: compressed_gating_repo is defined and compressed_gating_repo
# inject the gating repo generated by ansible-role-tripleo-gate
- include_tasks: inject_gating_repo.yml
when: compressed_gating_repo is defined and compressed_gating_repo
# Converts an overcloud-full.qcow2 into a undercloud.qcow2
- include_tasks: convert_image.yml
when: overcloud_as_undercloud|bool or baseos_as_undercloud|bool
# Converts an overcloud-full.qcow2 into a undercloud.qcow2
- include_tasks: convert_image.yml
when: overcloud_as_undercloud|bool or baseos_as_undercloud|bool
# Update images after we have converted the overcloud-full to an
# undercloud image when using devmode. This also clones tripleo-ci
# on the undercloud image.
- include_tasks: update_image.yml
when: devmode|bool
# Update images after we have converted the overcloud-full to an
# undercloud image when using devmode. This also clones tripleo-ci
# on the undercloud image.
- include_tasks: update_image.yml
when: devmode|bool
# Inject updated overcloud and ipa images into our converted undercloud
# image
- name: Inject additional images
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload {{ working_dir }}/{{ item }}:/home/{{ undercloud_user }}/{{ item }}
--run-command 'chown {{ undercloud_user }}:{{ undercloud_user }} /home/{{ undercloud_user }}/{{ item }}'
changed_when: true
with_items: "{{ inject_images | default('') }}"
when:
- overcloud_as_undercloud|bool or use_external_images|bool
- inject_images|length > 0
# Inject updated overcloud and ipa images into our converted undercloud
# image
- name: Inject additional images
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload {{ working_dir }}/{{ item }}:/home/{{ undercloud_user }}/{{ item }}
--run-command 'chown {{ undercloud_user }}:{{ undercloud_user }} /home/{{ undercloud_user }}/{{ item }}'
changed_when: true
with_items: "{{ inject_images | default('') }}"
when:
- overcloud_as_undercloud|bool or use_external_images|bool
- inject_images|length > 0
# This copies the `instackenv.json` configuration file that we
# generated in the overcloud setup role to the undercloud host.
- name: Copy instackenv.json to appliance
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload {{ working_dir }}/instackenv.json:/home/{{ undercloud_user }}/instackenv.json
--run-command 'chown {{ undercloud_user }}:{{ undercloud_user }} /home/{{ undercloud_user }}/instackenv.json'
when: inject_instackenv|bool
# This copies the `instackenv.json` configuration file that we
# generated in the overcloud setup role to the undercloud host.
- name: Copy instackenv.json to appliance
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload {{ working_dir }}/instackenv.json:/home/{{ undercloud_user }}/instackenv.json
--run-command 'chown {{ undercloud_user }}:{{ undercloud_user }} /home/{{ undercloud_user }}/instackenv.json'
when: inject_instackenv|bool
# Copy the undercloud public key to the virthost, because we're going
# to inject it into the undercloud image in the next task.
- name: Copy undercloud ssh public key to working dir
copy:
src: "{{ undercloud_key }}.pub"
dest: "{{ working_dir }}/id_rsa_undercloud.pub"
# Copy the undercloud public key to the virthost, because we're going
# to inject it into the undercloud image in the next task.
- name: Copy undercloud ssh public key to working dir
copy:
src: "{{ undercloud_key }}.pub"
dest: "{{ working_dir }}/id_rsa_undercloud.pub"
# Copy the virt host private key to `$HOME/.ssh/id_rsa_virt_power` for
# VirtualBMC be able to access the hypervisor where the VMs are located
- name: Copy virt host ssh private key to working dir
when: release not in ['newton']
copy:
src: "{{ virt_power_key }}"
dest: "{{ working_dir }}/id_rsa_virt_power"
# Copy the virt host private key to `$HOME/.ssh/id_rsa_virt_power` for
# VirtualBMC be able to access the hypervisor where the VMs are located
- name: Copy virt host ssh private key to working dir
when: release not in ['newton']
copy:
src: "{{ virt_power_key }}"
dest: "{{ working_dir }}/id_rsa_virt_power"
# When using qemu:///system, the vbmc will need to ssh back to the virthost
# as the root user to perform power operations
- name: Add virt power key to root authorized keys if using qemu:///system
authorized_key:
user: root
key: "{{ lookup('file', virt_power_key|quote + '.pub')|default('') }}"
when: libvirt_uri == "qemu:///system"
become: true
# When using qemu:///system, the vbmc will need to ssh back to the virthost
# as the root user to perform power operations
- name: Add virt power key to root authorized keys if using qemu:///system
authorized_key:
user: root
key: "{{ lookup('file', virt_power_key|quote + '.pub')|default('') }}"
when: libvirt_uri == "qemu:///system"
become: true
# Copy the public key to `$HOME/.ssh/authorized_keys` for the `root`
# and `undercloud_user` user on the undercloud.
- name: Inject undercloud ssh public key to appliance
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--mkdir {{item.homedir}}/.ssh/
--upload '{{ working_dir }}/id_rsa_undercloud.pub:{{item.homedir}}/.ssh/authorized_keys'
--run-command 'chown -R {{item.owner}}:{{item.group}} {{item.homedir}}/.ssh'
--run-command 'chmod 0700 {{item.homedir}}/.ssh'
--run-command 'chmod 0600 {{item.homedir}}/.ssh/authorized_keys'
with_items:
- homedir: /root
owner: root
group: root
- homedir: '/home/{{ undercloud_user }}'
owner: '{{ undercloud_user }}'
group: '{{ undercloud_user }}'
# Copy the public key to `$HOME/.ssh/authorized_keys` for the `root`
# and `undercloud_user` user on the undercloud.
- name: Inject undercloud ssh public key to appliance
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--mkdir {{ item.homedir }}/.ssh/
--upload '{{ working_dir }}/id_rsa_undercloud.pub:{{ item.homedir }}/.ssh/authorized_keys'
--run-command 'chown -R {{ item.owner }}:{{ item.group }} {{ item.homedir }}/.ssh'
--run-command 'chmod 0700 {{ item.homedir }}/.ssh'
--run-command 'chmod 0600 {{ item.homedir }}/.ssh/authorized_keys'
with_items:
- homedir: /root
owner: root
group: root
- homedir: '/home/{{ undercloud_user }}'
owner: '{{ undercloud_user }}'
group: '{{ undercloud_user }}'
# This copies the `id_rsa_virt_power` private key that we generated
# in the overcloud setup role to the undercloud host to be used by
# VirtualBMC+libvirt to access the virthost.
- name: Copy id_rsa_virt_power to appliance
when: release not in ['newton']
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload '{{ working_dir }}/id_rsa_virt_power:/root/.ssh/id_rsa_virt_power'
--run-command 'chown root:root /root/.ssh/id_rsa_virt_power'
--run-command 'chmod 0600 /root/.ssh/id_rsa_virt_power'
# This copies the `id_rsa_virt_power` private key that we generated
# in the overcloud setup role to the undercloud host to be used by
# VirtualBMC+libvirt to access the virthost.
- name: Copy id_rsa_virt_power to appliance
when: release not in ['newton']
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload '{{ working_dir }}/id_rsa_virt_power:/root/.ssh/id_rsa_virt_power'
--run-command 'chown root:root /root/.ssh/id_rsa_virt_power'
--run-command 'chmod 0600 /root/.ssh/id_rsa_virt_power'
- name: Create undercloud customize script
template:
src: "{{ undercloud_customize_script }}"
dest: "{{ working_dir}}/undercloud-customize.sh"
mode: 0755
when: undercloud_customize_script is defined
- name: Create undercloud customize script
template:
src: "{{ undercloud_customize_script }}"
dest: "{{ working_dir }}/undercloud-customize.sh"
mode: 0755
when: undercloud_customize_script is defined
# This allows to run a customization script on the
# undercloud image, to cover any extra needs.
- name: Perform extra undercloud customizations
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--run '{{ working_dir }}/undercloud-customize.sh'
when: undercloud_customize_script is defined
# This allows to run a customization script on the
# undercloud image, to cover any extra needs.
- name: Perform extra undercloud customizations
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--run '{{ working_dir }}/undercloud-customize.sh'
when: undercloud_customize_script is defined
# This allows to run a customization script on the
# overcloud image, to cover any extra needs.
- name: Perform extra overcloud customizations
include_tasks: customize_overcloud.yml
when: overcloud_customize_script is defined
# This allows to run a customization script on the
# overcloud image, to cover any extra needs.
- name: Perform extra overcloud customizations
include_tasks: customize_overcloud.yml
when: overcloud_customize_script is defined
# Perform an SELinux relabel on the undercloud image to avoid problems
# caused by bad labelling, since by default the undercloud runs in
# enforcing mode.
- name: Perform selinux relabel on undercloud image
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--selinux-relabel
# Perform an SELinux relabel on the undercloud image to avoid problems
# caused by bad labelling, since by default the undercloud runs in
# enforcing mode.
- name: Perform selinux relabel on undercloud image
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--selinux-relabel
# NOTE(trown) Nested blocks do not seem to work as expected so instead using
# conditionals with AND to simulate the same thing.
@@ -170,129 +170,129 @@
- undercloud_vol_check is failed
- not overcloud_as_undercloud|bool
block:
- name: >
Determine if the undercloud image is a whole disk image
so we can resize it appropriately
command: >
virt-filesystems -a {{ working_dir }}/undercloud.qcow2
environment:
LIBGUESTFS_BACKEND: direct
register: undercloud_partitions
- name: >
Determine if the undercloud image is a whole disk image
so we can resize it appropriately
command: >
virt-filesystems -a {{ working_dir }}/undercloud.qcow2
environment:
LIBGUESTFS_BACKEND: direct
register: undercloud_partitions
- when:
- undercloud_vol_check is failed
- not overcloud_as_undercloud|bool
- undercloud_partitions.stdout=='/dev/sda1'
block:
# Handle the resize for the whole disk image case
- name: Resize undercloud image (create target image)
command: >
qemu-img create -f qcow2 -o preallocation=off
'{{ working_dir }}/undercloud-resized.qcow2'
'{{ flavors[undercloud_node.flavor].disk }}G'
# Handle the resize for the whole disk image case
- name: Resize undercloud image (create target image)
command: >
qemu-img create -f qcow2 -o preallocation=off
'{{ working_dir }}/undercloud-resized.qcow2'
'{{ flavors[undercloud_node.flavor].disk }}G'
- name: Resize undercloud image (call virt-resize)
command: >
virt-resize --expand /dev/sda1
'{{ working_dir }}/undercloud.qcow2'
'{{ working_dir }}/undercloud-resized.qcow2'
environment:
LIBGUESTFS_BACKEND: direct
LIBGUESTFS_DEBUG: 1
LIBGUESTFS_TRACE: 1
- name: Resize undercloud image (call virt-resize)
command: >
virt-resize --expand /dev/sda1
'{{ working_dir }}/undercloud.qcow2'
'{{ working_dir }}/undercloud-resized.qcow2'
environment:
LIBGUESTFS_BACKEND: direct
LIBGUESTFS_DEBUG: 1
LIBGUESTFS_TRACE: 1
- name: Rename resized image to original name
command: >
mv -f '{{ working_dir }}/undercloud-resized.qcow2'
'{{ working_dir }}/undercloud.qcow2'
- name: Rename resized image to original name
command: >
mv -f '{{ working_dir }}/undercloud-resized.qcow2'
'{{ working_dir }}/undercloud.qcow2'
- when:
- undercloud_vol_check is failed
- not overcloud_as_undercloud|bool
- undercloud_partitions.stdout=='/dev/sda'
block:
# Handle the resize for the partition image case
- name: Resize undercloud image (expand the image)
command: >
qemu-img resize
'{{ working_dir }}/undercloud.qcow2'
'{{ flavors[undercloud_node.flavor].disk }}G'
# Handle the resize for the partition image case
- name: Resize undercloud image (expand the image)
command: >
qemu-img resize
'{{ working_dir }}/undercloud.qcow2'
'{{ flavors[undercloud_node.flavor].disk }}G'
- name: Resize undercloud image (expand the FS)
command: >
virt-customize -a '{{ working_dir }}/undercloud.qcow2'
--run-command 'FS_TYPE=`findmnt -o FSTYPE -fn /`;
if [ "$FS_TYPE" = "xfs" ]; then xfs_growfs /;
elif [ "$FS_TYPE" = "ext4" ]; then resize2fs /dev/sda;
else echo "ERROR: Unknown filesystem $FSTYPE, cannot resize.";
exit 1; fi'
environment:
LIBGUESTFS_BACKEND: direct
LIBGUESTFS_DEBUG: 1
LIBGUESTFS_TRACE: 1
- name: Resize undercloud image (expand the FS)
command: >
virt-customize -a '{{ working_dir }}/undercloud.qcow2'
--run-command 'FS_TYPE=`findmnt -o FSTYPE -fn /`;
if [ "$FS_TYPE" = "xfs" ]; then xfs_growfs /;
elif [ "$FS_TYPE" = "ext4" ]; then resize2fs /dev/sda;
else echo "ERROR: Unknown filesystem $FSTYPE, cannot resize.";
exit 1; fi'
environment:
LIBGUESTFS_BACKEND: direct
LIBGUESTFS_DEBUG: 1
LIBGUESTFS_TRACE: 1
- name: Set libvirt environment when using root to run tasks
set_fact:
libvirt_environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
LIBGUESTFS_BACKEND: "direct"
cacheable: true
when: ssh_user == "root"
- name: Set libvirt environment when using root to run tasks
set_fact:
libvirt_environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
LIBGUESTFS_BACKEND: "direct"
cacheable: true
when: ssh_user == "root"
- name: Set libvirt environment when not using root to run tasks
set_fact:
libvirt_environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
cacheable: true
when: ssh_user != "root"
- name: Set libvirt environment when not using root to run tasks
set_fact:
libvirt_environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
cacheable: true
when: ssh_user != "root"
# NOTE(trown) We use the overcloud-full initramfs and kernel as DIB
# seems a bit smarter about extracting them than virt-get-kernel and
# the partition image is simply a converted overcloud-full
- name: Extract the kernel and initramfs from the undercloud image
command: >
virt-copy-out -a '{{ working_dir }}/undercloud.qcow2'
'/home/{{ undercloud_user }}/overcloud-full.vmlinuz'
'/home/{{ undercloud_user }}/overcloud-full.initrd'
'{{ working_dir }}'
environment: "{{ libvirt_environment }}"
when: not undercloud_use_custom_boot_images|bool
# NOTE(trown) We use the overcloud-full initramfs and kernel as DIB
# seems a bit smarter about extracting them than virt-get-kernel and
# the partition image is simply a converted overcloud-full
- name: Extract the kernel and initramfs from the undercloud image
command: >
virt-copy-out -a '{{ working_dir }}/undercloud.qcow2'
'/home/{{ undercloud_user }}/overcloud-full.vmlinuz'
'/home/{{ undercloud_user }}/overcloud-full.initrd'
'{{ working_dir }}'
environment: "{{ libvirt_environment }}"
when: not undercloud_use_custom_boot_images|bool
- when:
- not undercloud_use_custom_boot_images|bool
- not overcloud_as_undercloud|bool
block:
# NOTE(ykarel) This is required to get the undercloud specific
# kernel when not using overcloud_as_undercloud.
- name: Extract the kernel and initramfs from the undercloud image
command: >
virt-get-kernel -a '{{ working_dir }}/undercloud.qcow2' --unversioned-names
--output '{{ working_dir }}'
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
# NOTE(ykarel) This is required to get the undercloud specific
# kernel when not using overcloud_as_undercloud.
- name: Extract the kernel and initramfs from the undercloud image
command: >
virt-get-kernel -a '{{ working_dir }}/undercloud.qcow2' --unversioned-names
--output '{{ working_dir }}'
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
# NOTE(trown) The undercloudvm template expects this to be
# named overcloud-full.vmlinuz. We can update the devmode case
# to not require this step
- name: rename undercloud kernel
command: >
mv '{{ working_dir }}/vmlinuz'
'{{ working_dir }}/overcloud-full.vmlinuz'
# NOTE(trown) The undercloudvm template expects this to be
# named overcloud-full.vmlinuz. We can update the devmode case
# to not require this step
- name: rename undercloud kernel
command: >
mv '{{ working_dir }}/vmlinuz'
'{{ working_dir }}/overcloud-full.vmlinuz'
# NOTE(trown) The undercloudvm template expects this to be
# named overcloud-full.initrd. We can update the devmode case
# to not require this step
- name: rename undercloud initramfs
command: >
mv '{{ working_dir }}/initramfs'
'{{ working_dir }}/overcloud-full.initrd'
# NOTE(trown) The undercloudvm template expects this to be
# named overcloud-full.initrd. We can update the devmode case
# to not require this step
- name: rename undercloud initramfs
command: >
mv '{{ working_dir }}/initramfs'
'{{ working_dir }}/overcloud-full.initrd'
# NOTE(trown): This is a bit of a hack to get the undercloud vm
# template to use the external kernel and initrd. We should
# instead use a different var for this and set it in the devmode
# case as well.
- name: Set overcloud_as_undercloud to true
set_fact:
# NOTE(trown): This is a bit of a hack to get the undercloud vm
# template to use the external kernel and initrd. We should
# instead use a different var for this and set it in the devmode
# case as well.
- name: Set overcloud_as_undercloud to true
set_fact:
overcloud_as_undercloud: true
cacheable: true
@@ -300,21 +300,21 @@
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
block:
# Create a libvirt volume and upload the undercloud image to
# libvirt.
- name: Create undercloud volume
command: >
virsh vol-create-as {{ libvirt_volume_pool}}
{{ undercloud_node.name }}.qcow2
{{ flavors[undercloud_node.flavor].disk }}G --format qcow2
# Create a libvirt volume and upload the undercloud image to
# libvirt.
- name: Create undercloud volume
command: >
virsh vol-create-as {{ libvirt_volume_pool }}
{{ undercloud_node.name }}.qcow2
{{ flavors[undercloud_node.flavor].disk }}G --format qcow2
- name: Upload undercloud volume to storage pool
command: >
virsh -k 0 vol-upload --pool '{{ libvirt_volume_pool }}'
'{{ undercloud_node.name }}.qcow2'
'{{ working_dir }}/undercloud.qcow2'
async: 600
poll: 10
- name: Upload undercloud volume to storage pool
command: >
virsh -k 0 vol-upload --pool '{{ libvirt_volume_pool }}'
'{{ undercloud_node.name }}.qcow2'
'{{ working_dir }}/undercloud.qcow2'
async: 600
poll: 10
# Define (but do no start) the undercloud virtual machine.
- name: Define undercloud vm
@@ -331,7 +331,7 @@
owner: "{{ non_root_user }}"
group: "{{ non_root_user }}"
mode: "a+x"
recurse: yes
recurse: true
state: 'directory'
when: non_root_chown|bool
@@ -399,7 +399,7 @@
ansible_fqdn: undercloud
ansible_user: '{{ undercloud_user }}'
ansible_private_key_file: "{{ undercloud_key }}"
ansible_ssh_extra_args: '-F "{{local_working_dir}}/ssh.config.ansible"'
ansible_ssh_extra_args: '-F "{{ local_working_dir }}/ssh.config.ansible"'
undercloud_ip: "{{ undercloud_ip }}"
- name: Generate ssh configuration
@@ -411,51 +411,51 @@
- when: enable_port_forward_for_tripleo_ui|bool
block:
# TO-DO weshayutin
# In the upcoming release of ansible 2.4 this should be moved to
# iptables_raw
# - name: ensure the required tcp ports are open on the virthost
- name: configure iptables
iptables:
table: filter
chain: INPUT
action: insert
protocol: tcp
match: tcp
ctstate: NEW
jump: ACCEPT
destination_port: "{{ item }}"
become: true
with_items:
- 6385
- 5000
- 5050
- 8004
- 8080
- 9000
- 8989
- 8774
- 3000
- 8181
- 8443
- 443
# TO-DO weshayutin
# In the upcoming release of ansible 2.4 this should be moved to
# iptables_raw
# - name: ensure the required tcp ports are open on the virthost
- name: configure iptables
iptables:
table: filter
chain: INPUT
action: insert
protocol: tcp
match: tcp
ctstate: NEW
jump: ACCEPT
destination_port: "{{ item }}"
become: true
with_items:
- 6385
- 5000
- 5050
- 8004
- 8080
- 9000
- 8989
- 8774
- 3000
- 8181
- 8443
- 443
- name: Create ssh tunnel systemd service
template:
src: "{{ ssh_tunnel_service_file }}"
dest: "/etc/systemd/system/ssh-tunnel.service"
mode: 0644
become: true
- name: Create ssh tunnel systemd service
template:
src: "{{ ssh_tunnel_service_file }}"
dest: "/etc/systemd/system/ssh-tunnel.service"
mode: 0644
become: true
- name: reload the systemctl daemon after file update
shell: systemctl daemon-reload
become: true
tags:
- skip_ansible_lint
- name: reload the systemctl daemon after file update
shell: systemctl daemon-reload
become: true
tags:
- skip_ansible_lint
- name: Enable ssh tunnel service
service:
name: ssh-tunnel
enabled: true
state: restarted
become: true
- name: Enable ssh tunnel service
service:
name: ssh-tunnel
enabled: true
state: restarted
become: true

View File

@@ -8,7 +8,7 @@
- name: generate image specific update script
template:
src: update_image.sh.j2
dest: "{{ working_dir}}/update_image-{{ item.inode }}.sh"
dest: "{{ working_dir }}/update_image-{{ item.inode }}.sh"
with_items: "{{ qcow_images.files | default([]) }}"
- name: run update
@@ -22,4 +22,3 @@
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
with_items: "{{ qcow_images.files | default([]) }}"
changed_when: true

View File

@@ -22,4 +22,3 @@ EOF
{% if gating_repo_enabled|bool %}
{{ ansible_pkg_mgr }} --disablerepo="*" --enablerepo="gating-repo" update -y
{% endif %}

View File

@@ -31,4 +31,3 @@ timeout -s 15 -k 1200 900 bash << EOS
{% endif %}
EOS

View File

@@ -1,3 +1,11 @@
dependencies:
- libvirt
galaxy_info:
author: Red Hat, Inc.
license: Apache
description: libvirt setup overcloud
platforms:
- name: CentOS
versions:
- 7
min_ansible_version: 2.4

View File

@@ -33,7 +33,7 @@
mode: 0600
- name: Read virt_power private key
no_log: True
no_log: true
set_fact:
virt_power_key_pvt: "{{ lookup('file', virt_power_key)|default('') }}"
cacheable: true
@@ -43,4 +43,3 @@
user: "{{ ansible_user_id }}"
key: "{{ lookup('file', virt_power_key|quote + '.pub')|default('') }}"
manage_dir: true

View File

@@ -12,4 +12,3 @@ dependencies:
- role: libvirt
- role: teardown/nodes
- role: teardown/user

View File

@@ -4,7 +4,7 @@
- name: Include vars for libvirt-nodepool
include_vars:
file: ../roles/libvirt/setup/overcloud/tasks/vars/libvirt_nodepool_vars.yml
file: ../roles/libvirt/setup/overcloud/tasks/vars/libvirt_nodepool_vars.yml
when: libvirt_nodepool|default(false)
- name: Check if libvirt is available
@@ -25,62 +25,62 @@
- when: overcloud_nodes
block:
# Check if the overcloud nodes exist.
- name: Check overcloud vms
command: >
virsh domid "{{ item.name }}"
with_items: "{{ overcloud_nodes }}"
ignore_errors: true
register: overcloud_check
# Check if the overcloud nodes exist.
- name: Check overcloud vms
command: >
virsh domid "{{ item.name }}"
with_items: "{{ overcloud_nodes }}"
ignore_errors: true
register: overcloud_check
# Destroy and undefine the overcloud nodes.
- name: Destroy overcloud vms
command:
virsh destroy "{{ item.item.name }}"
when: item is success
with_items: "{{ overcloud_check.results }}"
ignore_errors: true
# Destroy and undefine the overcloud nodes.
- name: Destroy overcloud vms
command:
virsh destroy "{{ item.item.name }}"
when: item is success
with_items: "{{ overcloud_check.results }}"
ignore_errors: true
- name: Undefine overcloud vms
command:
virsh undefine "{{ item.item.name }}"
when: item is success
with_items: "{{ overcloud_check.results }}"
- name: Undefine overcloud vms
command:
virsh undefine "{{ item.item.name }}"
when: item is success
with_items: "{{ overcloud_check.results }}"
# The `virsh vol-dumpxml ... > /dev/null` is here (and elsewhere) due to
# [1293804].
#
# [1293804]: https://bugzilla.redhat.com/show_bug.cgi?id=1293804
- name: Delete baremetal vm storage
shell: |
virsh vol-dumpxml --pool '{{ libvirt_volume_pool }}' \
'{{ item.name }}'.qcow2 2>&1 > /dev/null
virsh vol-delete --pool '{{ libvirt_volume_pool }}' \
'{{ item.name }}'.qcow2
with_items: "{{ overcloud_nodes }}"
ignore_errors: true
# The `virsh vol-dumpxml ... > /dev/null` is here (and elsewhere) due to
# [1293804].
#
# [1293804]: https://bugzilla.redhat.com/show_bug.cgi?id=1293804
- name: Delete baremetal vm storage
shell: |
virsh vol-dumpxml --pool '{{ libvirt_volume_pool }}' \
'{{ item.name }}'.qcow2 2>&1 > /dev/null
virsh vol-delete --pool '{{ libvirt_volume_pool }}' \
'{{ item.name }}'.qcow2
with_items: "{{ overcloud_nodes }}"
ignore_errors: true
# Do the same thing to the supplemental node.
- environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
block:
- name: Check undercloud vm
command: >
virsh domid "{{ supplemental_node.name|default('') }}"
ignore_errors: true
register: supplemental_check
- name: Check undercloud vm
command: >
virsh domid "{{ supplemental_node.name|default('') }}"
ignore_errors: true
register: supplemental_check
- name: Destroy supplemental vm
command: >
virsh destroy "{{ supplemental_node.name|default('') }}"
when: supplemental_check is success
ignore_errors: true
- name: Destroy supplemental vm
command: >
virsh destroy "{{ supplemental_node.name|default('') }}"
when: supplemental_check is success
ignore_errors: true
- name: Undefine supplemental vm
command: >
virsh undefine "{{ supplemental_node.name|default('') }}" --remove-all-storage
when: supplemental_check is success
ignore_errors: true
- name: Undefine supplemental vm
command: >
virsh undefine "{{ supplemental_node.name|default('') }}" --remove-all-storage
when: supplemental_check is success
ignore_errors: true
# Do the same thing to the undercloud node.
- name: Check undercloud vm
@@ -150,4 +150,3 @@
path: "/run/user/{{ pool_uid.stdout }}/libvirt/storage/run/{{ libvirt_volume_pool }}.xml"
state: absent
when: pool_check is success

View File

@@ -9,4 +9,3 @@
with_file:
- "{{ virt_power_key }}.pub"
ignore_errors: true

View File

@@ -1,2 +1 @@
nested: true

View File

@@ -79,4 +79,3 @@
fail:
msg: "Cannot change the state of nested virtualization. Please shut down any running VMs."
when: nested|bool != cpu_nested_enabled|bool

View File

@@ -3,5 +3,4 @@
virt_host_key: "{{ local_working_dir }}/id_rsa_virt_host"
# Exit the playbook when a non suported linux distro is found on the virthost
supported_distro_check: yes
supported_distro_check: true

View File

@@ -1,3 +1,2 @@
dependencies:
- provision

View File

@@ -2,7 +2,7 @@
- name: Check that virthost is set
fail:
msg: "You need to set virthost before running these playbooks."
when: virthost|default("") == ""
when: virthost|default("") | length == 0
- name: Get current user group for localhost
command: "id -gn"
@@ -17,23 +17,23 @@
- provision
- block:
- name: Ensure local working dir exists
file:
path: "{{ local_working_dir }}"
state: directory
owner: "{{ ansible_env.USER }}"
group: "{{ current_group_local }}"
- name: Ensure local working dir exists
file:
path: "{{ local_working_dir }}"
state: directory
owner: "{{ ansible_env.USER }}"
group: "{{ current_group_local }}"
rescue:
# if it fails we try again as with become, become must be fallback because
# otherwise first attempt will fail on local machines without sudo, a
# use case that we want to support.
- name: Ensure local working dir exists
file:
path: "{{ local_working_dir }}"
state: directory
owner: "{{ ansible_env.USER }}"
group: "{{ current_group_local }}"
become: true
# if it fails we try again as with become, become must be fallback because
# otherwise first attempt will fail on local machines without sudo, a
# use case that we want to support.
- name: Ensure local working dir exists
file:
path: "{{ local_working_dir }}"
state: directory
owner: "{{ ansible_env.USER }}"
group: "{{ current_group_local }}"
become: true
# This file needs to exist because it will later be referenced in some
# ssh command lines.
@@ -46,9 +46,8 @@
# written out to disk unless you call the `tripleo-inventory` role.
- name: Add the virthost to the inventory
add_host:
name: "{{virthost}}"
name: "{{ virthost }}"
groups: "virthost"
ansible_fqdn: "{{ virthost }}"
ansible_user: "root"
ansible_host: "{{ virthost }}"

View File

@@ -1,3 +1,2 @@
dependencies:
- common

View File

@@ -1,3 +1,2 @@
dependencies:
- provision

View File

@@ -29,7 +29,7 @@
- name: Retrieve current tuned profile
command: tuned-adm active
register: tuned
changed_when: False
changed_when: false
- name: Set tuned profile if not already set
command: tuned-adm profile "{{ tuned_profile }}"
@@ -43,7 +43,7 @@
name: "{{ non_root_user }}"
state: present
shell: /bin/bash
create_home: yes
create_home: true
become: true
- name: Get the non-root user UID
@@ -139,4 +139,3 @@
owner: "{{ non_root_user }}"
group: "{{ non_root_group }}"
become: true

View File

@@ -1,3 +1,2 @@
dependencies:
- provision

View File

@@ -5,6 +5,5 @@
please see https://github.com/openstack/tripleo-quickstart .
To run quickstart with out this check set 'quickstart.sh -e supported_distro_check=false'"
failed_when: supported_distro_check == true
failed_when: supported_distro_check
when: not (ansible_distribution == 'CentOS' or ansible_distribution == 'RedHat')

View File

@@ -1,3 +1,2 @@
dependencies:
- provision

View File

@@ -63,4 +63,3 @@
with_fileglob:
- /run/user/{{ non_root_uid.stdout }}/*
become: true

View File

@@ -1,3 +1,2 @@
dependencies:
- provision

View File

@@ -16,4 +16,3 @@
create_home: true
become: true
when: not chrooted|bool

View File

@@ -86,7 +86,7 @@ parameters) is always nice for users too:
---
- name: Run repo setup
hosts: undercloud
gather_facts: no
gather_facts: false
roles:
- repo-setup

View File

@@ -58,4 +58,3 @@ known_hash_tags:
- current-tripleo-rdo
- current-tripleo-rdo-internal
- current-passed-ci

View File

@@ -28,7 +28,7 @@
cacheable: true
# TODO: Fix for fedora
- when: overcloud_release is defined and overcloud_release != ""
- when: overcloud_release is defined and (overcloud_release | length != 0)
block:
- name: Get DLRN overcloud hash

View File

@@ -5,4 +5,4 @@
| tee -a {{ repo_setup_dir }}/{{ repo_setup_log }}
become: true
register: result
no_log: result.rc == 0
no_log: result.rc == 0

View File

@@ -1,4 +1,12 @@
# Include the `common` role as a dependency.
dependencies:
- { role: common }
- {role: common}
galaxy_info:
author: Red Hat, Inc.
license: Apache
description: libvirt setup supplemental
platforms:
- name: CentOS
versions:
- 7
min_ansible_version: 2.4

View File

@@ -2,14 +2,14 @@
- when: inventory == 'all'
block:
#required for liberty based deployments
# required for liberty based deployments
- name: copy get-overcloud-nodes.py to undercloud
template:
src: 'get-overcloud-nodes.py.j2'
dest: '{{ working_dir }}/get-overcloud-nodes.py'
mode: 0755
#required for liberty based deployments
# required for liberty based deployments
- name: fetch overcloud node names and IPs
shell: >
source {{ working_dir }}/stackrc;
@@ -23,7 +23,7 @@
fetch:
src: '{{ working_dir }}/.ssh/id_rsa'
dest: '{{ overcloud_key }}'
flat: yes
flat: true
mode: 0400
# add host to the ansible group formed from its type
@@ -55,7 +55,7 @@
ansible_user: "{{ lookup('env','USER') }}"
ansible_private_key_file: "/etc/nodepool/id_rsa"
#required for regeneration of ssh.config.ansible
# required for regeneration of ssh.config.ansible
- name: set_fact for undercloud ip
set_fact:
undercloud_ip: "{{ hostvars['undercloud'].undercloud_ip }}"
@@ -75,7 +75,7 @@
ansible_fqdn: supplemental
ansible_user: '{{ supplemental_user }}'
ansible_private_key_file: '{{ local_working_dir }}/id_rsa_supplemental'
ansible_ssh_extra_args: '-F "{{local_working_dir}}/ssh.config.ansible"'
ansible_ssh_extra_args: '-F "{{ local_working_dir }}/ssh.config.ansible"'
supplemental_node_ip: "{{ supplemental_node_ip }}"
when: supplemental_node_ip is defined
@@ -85,7 +85,7 @@
cacheable: true
when: hostvars['supplemental'] is defined and hostvars['supplemental'].supplemental_node_ip is defined
#readd the undercloud to reset the ansible_ssh parameters set in quickstart
# readd the undercloud to reset the ansible_ssh parameters set in quickstart
- name: Add undercloud vm to inventory
add_host:
name: undercloud
@@ -97,7 +97,7 @@
undercloud_ip: "{{ undercloud_ip }}"
when: not virthost_with_private_key and undercloud_ip is defined
#required for regeneration of ssh.config.ansible
# required for regeneration of ssh.config.ansible
- name: set undercloud ssh proxy command
set_fact:
undercloud_ssh_proxy_command: "ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
@@ -107,7 +107,7 @@
cacheable: true
when: virthost_with_private_key and undercloud_ip is defined
#required for regeneration of ssh.config.ansible
# required for regeneration of ssh.config.ansible
- name: set undercloud ssh proxy command
set_fact:
undercloud_ssh_proxy_command: "ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
@@ -159,7 +159,7 @@
set_fact:
id_rsa_virt_power_exists: true
cacheable: true
when: undercloud_ip is not defined and result_stat_id_rsa_virt_power.stat.exists == True
when: undercloud_ip is not defined and result_stat_id_rsa_virt_power.stat.exists
- name: regenerate ssh config, if no undercloud has been launched.
delegate_to: localhost

View File

@@ -4,7 +4,7 @@
setup:
filter: "*"
delegate_to: localhost
delegate_facts: True
delegate_facts: true
when: hostvars['localhost'].ansible_user_dir is not defined
- include_tasks: inventory.yml

View File

@@ -18,30 +18,29 @@
# The `libvirt/setup` role creates the undercloud and overcloud
# virtual machines.
- name: Setup undercloud and overcloud vms
- name: Setup undercloud and overcloud vms
hosts: virthost
gather_facts: yes
gather_facts: true
roles:
- libvirt/teardown
- libvirt/setup
# Add the undercloud node to the generated
# inventory.
- name: Inventory the undercloud
- name: Inventory the undercloud
hosts: undercloud
gather_facts: no
gather_facts: false
vars:
inventory: undercloud
inventory: undercloud
roles:
- tripleo-inventory
# DEPLOY ALL THE THINGS! Depending on the currently selected set of
# tags, this will deploy the undercloud, deploy the overcloud, and
# perform some validation tests on the overcloud.
- name: Install undercloud and deploy overcloud
- name: Install undercloud and deploy overcloud
hosts: undercloud
gather_facts: no
gather_facts: false
roles:
- tripleo/undercloud
- tripleo/overcloud

View File

@@ -3,4 +3,3 @@
remote_user: root
roles:
- tripleo-inventory

View File

@@ -1,3 +1,4 @@
---
- name: deprecation message
vars:
deprecation_warning_msg: |
@@ -14,4 +15,3 @@
- name: include the tripleo-quickstart-extras undercloud-deploy
include_role:
name: undercloud-deploy

View File

@@ -1,3 +1,3 @@
# Use the vbmc
# moved to common
# enable_vbmc: true
# enable_vbmc: true

View File

@@ -1,2 +1,3 @@
---
dependencies:
- common