Files
tripleo-quickstart-extras/roles/overcloud-prep-images/templates/overcloud-prep-images.sh.j2
Dan Prince 86ceb2b356 Fix the undercloud-containers job
The undercloud installer (when using --heat-native) can configure
its own docker settings.

Also, use yum install to add required packages until we can get these
installed via RDO.

Ensure the /etc/hosts file is cleaned out for the undercloud deployment

Co-Authored-By: Wes Hayutin <weshayutin@gmail.com>

Depends-On: I2d569eef136254dc81bdee93a7869fd361a8400d
Change-Id: I583128ef8fb986a08ea372344ed072e506b97d89
2017-11-17 17:01:21 -05:00

204 lines
5.6 KiB
Django/Jinja

#!/bin/bash
set -eux
### --start_docs
## Prepare images for deploying the overcloud
## ==========================================
## Prepare Your Environment
## ------------------------
## * Source in the undercloud credentials.
## ::
source {{ working_dir }}/stackrc
{% if download_overcloud_image|bool %}
## * Download specific Overcloud images
## ::
if [ -f overcloud-full.qcow2 ]; then
sudo rm -rf overcloud-full*
fi
curl -L -O "{{ overcloud_image_url }}"
tar xvfp {{ overcloud_full_tar_name }}
{% endif %}
{% if step_overcloud_image|bool %}
## * Upload images to glance.
## ::
openstack overcloud image upload {% if bash_deploy_ramdisk|bool %}--old-deploy-image{% endif %} {% if whole_disk_images|bool %}--whole-disk{% endif %}
{% endif %}
{% if step_glance_upload|bool %}
## * Create and upload image to glance, this step is specific to nodepool based deployments.
## ::
rpm -q qemu-img || sudo yum install -y qemu-img
qemu-img create -f qcow2 overcloud-full.qcow2 1G
glance image-create --container-format bare \
--disk-format qcow2 \
--name overcloud-full \
--file overcloud-full.qcow2
{% endif %}
{% if step_register|bool %}
## * Register nodes with Ironic.
## ::
{% if release in ['mitaka'] %}
openstack baremetal import --json instackenv.json
openstack baremetal configure boot
{% elif step_introspect|bool %}
openstack overcloud node import instackenv.json
{% else %}
openstack overcloud node import instackenv.json --provide
{% endif %}
{% endif %}
{% if step_root_device_size|bool %}
## * Get nodes UUID
## ::
export items="$( ironic node-list | awk '/power/ {print $2}' )"
## * Find disk size from instackenv.json
## ::
export DISK_SIZE="$( jq '.["nodes"][]["disk"] | tonumber' instackenv.json )"
## * Update nodes with disk size hint
## ::
count=0
ARRAY_DISK_SIZE=($(echo $DISK_SIZE))
ROOT_DEVICE_SIZE={{ disk_root_device_size }}
for item in $items; do
if [ $ROOT_DEVICE_SIZE -ge ${ARRAY_DISK_SIZE[$count]} ]; then
declare i ADS
ADS=${ARRAY_DISK_SIZE[$count]}
ironic node-update $item add properties/root_device='{"size": '$ADS'}'
fi
count=$((count+1))
done
# Temporary step to avoid introspection failure
# Workaround for: https://bugs.launchpad.net/tripleo/+bug/1649350
sudo mistral-db-manage populate
{% endif %}
{% if step_root_device_hints|bool %}
## * Get nodes UUID
## ::
export ironic_nodes="$( ironic node-list | awk '/power/ {print $2}' )"
for ironic_node in $ironic_nodes; do
# extract IP from ironic node details
ip_address=$(ironic node-show $ironic_node --fields driver_info | sed -n "s/.*ipmi_address': u'\([0-9\.]*\)'.*/\1/p")
# get information for the matching template
{% for node in root_device_hints %}
NODE_IP="{{ node['ip'] }}"
if [ "$NODE_IP" == "$ip_address" ]; then
# set property
ironic node-update $ironic_node add properties/root_device='{"{{ node['key'] }}": "{{ node['value'] }}"}'
fi
{% endfor %}
done
# Temporary step to avoid introspection failure
# Workaround for: https://bugs.launchpad.net/tripleo/+bug/1649350
sudo mistral-db-manage populate
{% endif %}
{% if step_introspect|bool %}
## * Introspect hardware attributes of nodes.
## ::
{% if release in ['mitaka'] %}
openstack baremetal introspection bulk start
{% else %}
openstack overcloud node introspect --all-manageable
openstack overcloud node provide --all-manageable
{% endif %}
{% endif %}
{% if step_introspect_with_retry|bool %}
## * Introspect all manageable nodes with a caller provided timeout.
## then move all nodes that power off after a successful introspection
## back to available so we don't introspect them again. This is useful
## for large deployments (think 10+ nodes) where bulk introspection
## can be troublesome. It's also useful in environments where connection
## problems may spuriously fail a deployment. Related-Bug: #1651127
## ::
introspect()
{
for node in `ironic --json node-list | jq -r '.[]| select(.["provision_state"] == "manageable")| .["uuid"]'`; do
openstack baremetal introspection start $node
sleep 30s
done
manageable_count=1
on_count=0
while [ $on_count -eq 0 ] && [ $manageable_count -gt 0 ]; do
manageable_count=$(ironic --json node-list | jq -r '.[]| select(.["provision_state"] == "manageable")| .["uuid"]' | wc -l)
on_count=$(ironic --json node-list|jq -r '.[]| select(.["power_state"] == "power on")| .["uuid"]' | wc -l)
sleep 30
done
set +e
timeout $1 bash -c -- 'source $HOME/stackrc; \
on_count=$(ironic --json node-list|jq -r ".[]| select(.[\"power_state\"] == \"power on\")| .[\"uuid\"]" | wc -l) ; \
while [ $on_count -gt 0 ]; do \
sleep 30s; \
on_count=$(ironic --json node-list|jq -r ".[]| select(.[\"power_state\"] == \"power on\")| .[\"uuid\"]" | wc -l) ; \
done'
set -e
for node in `ironic --json node-list | jq -r '.[]| select(.["power_state"] == "power off")| select(.["provision_state"] == "manageable")|.["uuid"]'`; do
ironic node-set-provision-state $node provide
done
}
## * Introspect hardware attributes of nodes in a robust manner
## retrying up to three times on any given node. This should
## only be used in cases where deployment using bulk introspection
## has reliability issues.
## ::
for node in `ironic --json node-list | jq -r '.[]| select(.["provision_state"] == "available")| .["uuid"]'`; do
ironic node-set-provision-state $node manage
done
introspect 15m
introspect 30m
introspect 60m
{% endif %}
### --stop_docs