Remove unused scripts

All these scripts are replaced by python-tripleoclient or manual docs
(in the tempest case). For better or worse, I don't think a deprecation
period is necessary here as I think we're way past that point with the
reality being that these scripts don't actually even work any longer.

Change-Id: I9d4f0e055a19bf662661f47ea328ec919e03ddda
This commit is contained in:
James Slagle 2016-01-05 14:52:57 -05:00 committed by Dmitry Tantsur
parent c18e09fbbc
commit c2d5c0f862
7 changed files with 0 additions and 1465 deletions

View File

@ -1,340 +0,0 @@
#!/bin/bash
set -eux
set -o pipefail
export image_build=${1:-"default"}
# Attempt to be smart about detecting the path to the instack-undercloud
# elements
INSTACKUNDERCLOUDELEMENTS=/usr/share/instack-undercloud
if [ ! -d $INSTACKUNDERCLOUDELEMENTS ]; then
INSTACKUNDERCLOUDELEMENTS=$(dirname $0)/../elements
fi
TRIPLEOPUPPETELEMENTS=/usr/share/tripleo-puppet-elements
if [ ! -d $TRIPLEOPUPPETELEMENTS ]; then
# Assume it's checked out in the current directory
TRIPLEOPUPPETELEMENTS=$PWD/tripleo-puppet-elements/elements
fi
export ELEMENTS_PATH=${ELEMENTS_PATH:-"\
$TRIPLEOPUPPETELEMENTS:\
$INSTACKUNDERCLOUDELEMENTS:\
/usr/share/tripleo-image-elements:\
/usr/share/diskimage-builder/elements:\
/usr/share/openstack-heat-templates/software-config/elements"}
# Override TMP_DIR for image build.
# It defaults /tmp. But, /tmp is usually tmpfs mounted on Fedora, and dib will
# use a tmpfs on it's own if there is enough free RAM.
export TMP_DIR=${TMP_DIR:-/var/tmp}
# We need to define this here, see:
# https://review.openstack.org/179502
export DIB_DEFAULT_INSTALLTYPE=${DIB_DEFAULT_INSTALLTYPE:-package}
export NODE_ARCH=${NODE_ARCH:-amd64}
export USE_DELOREAN_TRUNK=${USE_DELOREAN_TRUNK:-0}
export DELOREAN_TRUNK_REPO=${DELOREAN_TRUNK_REPO:-"http://trunk.rdoproject.org/kilo/centos7/latest-RDO-kilo-CI/"}
export DELOREAN_REPO_FILE=${DELOREAN_REPO_FILE:-"delorean-kilo.repo"}
export NODE_DIST=${NODE_DIST:-""}
# Attempt to detect $NODE_DIST, exit 1 if we can't
if [ -z "$NODE_DIST" ]; then
if $(grep -Eqs 'Red Hat Enterprise Linux' /etc/redhat-release); then
export NODE_DIST=${NODE_DIST:-rhel7}
elif $(grep -Eqs 'CentOS' /etc/redhat-release); then
export NODE_DIST=${NODE_DIST:-centos7}
elif $(grep -Eqs 'Fedora' /etc/redhat-release); then
export NODE_DIST=${NODE_DIST:-fedora}
else
echo "Could not detect distribution from /etc/redhat-release!"
exit 1
fi
fi
# Set specific variables based on $NODE_DIST
if [ "$NODE_DIST" = "rhel7" ]; then
export REG_METHOD=${REG_METHOD:-disable}
# EPEL/rdo get enabled when RHOS=0
export RHOS=${RHOS:-"0"}
# But we still actually need it to enable base RHEL repos.
export RUN_RHOS_RELEASE=${RUN_RHOS_RELEASE:-"0"}
if [ "${RUN_RHOS_RELEASE}" = "1" ]; then
export RHOS_RELEASE=${RHOS_RELEASE:-6}
export DIB_COMMON_ELEMENTS=rhos-release
fi
export DELOREAN_REPO_URL=$DELOREAN_TRUNK_REPO
elif [ "$NODE_DIST" = "centos7" ]; then
export DELOREAN_REPO_URL=$DELOREAN_TRUNK_REPO
# SELinux permissive for CentOS for now
export DIB_COMMON_ELEMENTS="selinux-permissive centos-cloud-repo"
# lshw has moved from EPEL to the CentOS cr repo. However, at this time adding
# the cr repo to the overcloud images results in openvswitch crashes, so we can
# only add it to the discovery image.
export DISCOVERY_IMAGE_ELEMENT="delorean-rdo-management ironic-discoverd-ramdisk-instack centos-cr"
fi
export DEPLOY_IMAGE_ELEMENT=${DEPLOY_IMAGE_ELEMENT:-deploy-ironic}
export DEPLOY_NAME=${DEPLOY_NAME:-deploy-ramdisk-ironic}
# Include delorean-rdo-management with the discovery ramdisk build so that we
# can install python-hardware from somewhere.
if [ "${RHOS:-0}" = "0" ]; then
export DISCOVERY_IMAGE_ELEMENT=${DISCOVERY_IMAGE_ELEMENT:-"delorean-rdo-management ironic-discoverd-ramdisk-instack"}
else
export DISCOVERY_IMAGE_ELEMENT=${DISCOVERY_IMAGE_ELEMENT:-"ironic-discoverd-ramdisk-instack"}
fi
export DISCOVERY_NAME=${DISCOVERY_NAME:-discovery-ramdisk}
export DIB_COMMON_ELEMENTS=${DIB_COMMON_ELEMENTS:-""}
export DIB_COMMON_ELEMENTS="$DIB_COMMON_ELEMENTS \
element-manifest \
network-gateway \
"
if [[ "rhel7 centos7" =~ "$NODE_DIST" ]]; then
# Default filesystem type is XFS for RHEL 7
export FS_TYPE=${FS_TYPE:-xfs}
fi
if [ "$NODE_DIST" = "rhel7" ]; then
export RHOS_RELEASE=${RHOS_RELEASE:-"0"}
if [ "$RHOS" = "0" ]; then
export RDO_RELEASE=kilo
export DIB_COMMON_ELEMENTS="$DIB_COMMON_ELEMENTS epel rdo-release"
elif [ ! "$RHOS_RELEASE" = "0" ]; then
export DIB_COMMON_ELEMENTS="$DIB_COMMON_ELEMENTS rhos-release"
fi
elif [ "$NODE_DIST" = "centos7" ]; then
export RDO_RELEASE=kilo
export DIB_COMMON_ELEMENTS="$DIB_COMMON_ELEMENTS epel rdo-release"
fi
export PACKAGES=${PACKAGES:-"1"}
if [ "$PACKAGES" = "1" ]; then
export DIB_COMMON_ELEMENTS="$DIB_COMMON_ELEMENTS undercloud-package-install pip-and-virtualenv-override"
fi
# Puppet overcloud-specific configuration
export PUPPET_COMMON_ELEMENTS="\
sysctl \
hosts \
baremetal \
dhcp-all-interfaces \
os-collect-config \
heat-config-puppet \
heat-config-script \
puppet-modules \
hiera \
os-net-config \
stable-interface-names \
grub2-deprecated \
-p python-psutil,python-debtcollector \
"
if [ "$USE_DELOREAN_TRUNK" = "1" ]; then
export PUPPET_COMMON_ELEMENTS="$PUPPET_COMMON_ELEMENTS delorean-repo"
fi
export OVERCLOUD_FULL_DIB_EXTRA_ARGS=${OVERCLOUD_FULL_DIB_EXTRA_ARGS:-"\
$PUPPET_COMMON_ELEMENTS \
overcloud-full \
overcloud-controller \
overcloud-compute \
overcloud-ceph-storage \
"}
# There are new deps on python-psutil and python-debtcollector but the Nova and Cinder
# packages are not updated yet to actually require them.
export OVERCLOUD_CONTROL_DIB_EXTRA_ARGS=${OVERCLOUD_CONTROL_DIB_EXTRA_ARGS:-"\
$PUPPET_COMMON_ELEMENTS
overcloud-controller \
"}
export OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS=${OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS:-"\
$PUPPET_COMMON_ELEMENTS
overcloud-compute
"}
export OVERCLOUD_CEPHSTORAGE_DIB_EXTRA_ARGS=${OVERCLOUD_CEPHSTORAGE_DIB_EXTRA_ARGS:-"\
$PUPPET_COMMON_ELEMENTS
overcloud-ceph-storage \
"}
export OVERCLOUD_CINDER_DIB_EXTRA_ARGS=${OVERCLOUD_CINDER_DIB_EXTRA_ARGS:-"\
baremetal \
base \
cinder-lio \
common-venv \
dhcp-all-interfaces \
hosts \
ntp \
os-collect-config \
pip-cache \
pypi-openstack \
snmpd \
stable-interface-names \
use-ephemeral \
sysctl \
"}
export OVERCLOUD_SWIFT_DIB_EXTRA_ARGS=${OVERCLOUD_SWIFT_DIB_EXTRA_ARGS:-"\
pip-cache \
pypi-openstack \
swift-storage \
os-collect-config \
baremetal \
base \
common-venv \
dhcp-all-interfaces \
hosts \
ntp \
snmpd \
stable-interface-names \
use-ephemeral \
os-refresh-config-reboot \
sysctl \
"}
function deploy-ramdisk {
if [ ! -f $DEPLOY_NAME.initramfs -o \
! -f $DEPLOY_NAME.kernel ]; then
ramdisk-image-create \
-a $NODE_ARCH \
-o $DEPLOY_NAME \
--ramdisk-element dracut-ramdisk \
$NODE_DIST $DEPLOY_IMAGE_ELEMENT \
$DIB_COMMON_ELEMENTS \
2>&1 | tee dib-deploy.log
fi
}
function discovery-ramdisk {
if [ ! -f $DISCOVERY_NAME.initramfs -o \
! -f $DISCOVERY_NAME.kernel ]; then
ramdisk-image-create \
-a $NODE_ARCH \
-o $DISCOVERY_NAME \
--ramdisk-element dracut-ramdisk \
$NODE_DIST $DISCOVERY_IMAGE_ELEMENT \
$DIB_COMMON_ELEMENTS \
2>&1 | tee dib-discovery.log
fi
}
function overcloud-control {
if [ ! -f overcloud-control.qcow2 ]; then
disk-image-create \
-a $NODE_ARCH \
-o overcloud-control \
$NODE_DIST \
$OVERCLOUD_CONTROL_DIB_EXTRA_ARGS \
$DIB_COMMON_ELEMENTS \
2>&1 | tee dib-overcloud-control.log
fi
}
function overcloud-compute {
if [ ! -f overcloud-compute.qcow2 ]; then
disk-image-create \
-a $NODE_ARCH \
-o overcloud-compute \
$NODE_DIST \
$OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS \
$DIB_COMMON_ELEMENTS \
2>&1 | tee dib-overcloud-compute.log
fi
}
function overcloud-ceph-storage {
if [ ! -f overcloud-ceph-storage.qcow2 ]; then
disk-image-create \
-a $NODE_ARCH \
-o overcloud-ceph-storage \
$NODE_DIST \
$OVERCLOUD_CEPHSTORAGE_DIB_EXTRA_ARGS \
$DIB_COMMON_ELEMENTS \
2>&1 | tee dib-overcloud-ceph-storage.log
fi
}
function overcloud-cinder-volume {
if [ ! -f overcloud-cinder-volume.qcow2 ]; then
disk-image-create \
-a $NODE_ARCH \
-o overcloud-cinder-volume \
$NODE_DIST \
$OVERCLOUD_CINDER_DIB_EXTRA_ARGS \
$DIB_COMMON_ELEMENTS \
2>&1 | tee dib-overcloud-cinder-volume.log
fi
}
function overcloud-swift-storage {
if [ ! -f overcloud-swift-storage.qcow2 ]; then
disk-image-create \
-a $NODE_ARCH \
-o overcloud-swift-storage \
$NODE_DIST \
$OVERCLOUD_SWIFT_DIB_EXTRA_ARGS \
$DIB_COMMON_ELEMENTS \
2>&1 | tee dib-overcloud-swift-storage.log
fi
}
function fedora-user {
if [ ! -f fedora-user.qcow2 ]; then
if [ -f ~/.cache/image-create/fedora-21.x86_64.qcow2 ]; then
# Just copy the already downloaded Fedora cloud image as fedora-user.qcow2
cp ~/.cache/image-create/fedora-21.x86_64.qcow2 fedora-user.qcow2
else
# Download the image
curl -o fedora-user.qcow2 -L http://cloud.fedoraproject.org/fedora-21.x86_64.qcow2
fi
# The perms always seem to be wrong when copying out of the cache, so
# fix them
chmod 644 fedora-user.qcow2
fi
}
function overcloud-full {
if [ ! -f overcloud-full.qcow2 ]; then
disk-image-create \
-a $NODE_ARCH \
-o overcloud-full \
$NODE_DIST \
$OVERCLOUD_FULL_DIB_EXTRA_ARGS \
$DIB_COMMON_ELEMENTS \
$PUPPET_COMMON_ELEMENTS \
2>&1 | tee dib-overcloud-full.log
fi
}
function os-disk-config {
# Super basic image including os-disk-config for demonstrating its functionality
if [ ! -f os-disk-config.qcow2 ]; then
unset DIB_COMMON_ELEMENTS
disk-image-create \
-a $NODE_ARCH \
-o os-disk-config \
$NODE_DIST \
os-disk-config baremetal \
2>&1 | tee dib-os-disk-config.log
fi
}
if [ "$image_build" = "default" ]; then
fedora-user
deploy-ramdisk
discovery-ramdisk
overcloud-full
echo "Successfully built all necessary images."
else
eval "$image_build"
echo "Successfully built $image_build image."
fi

View File

@ -1,525 +0,0 @@
#!/bin/bash
set -eux
set -o pipefail
SCRIPT_NAME=$(basename $0)
OS_AUTH_URL=${OS_AUTH_URL:-""}
if [ -z "$OS_AUTH_URL" ]; then
echo "You must source a stackrc file for the Undercloud."
exit 1
fi
TUSKAR=
TEMPEST=
# Default stack create timeout, in minutes
# Note heat defaults to 60mins, which may not be enough when
# creating large overclouds, 240 aligns with the undercloud keystone
# token expiry, which is increased from the default 1 hour to 4.
TIMEOUT=240
function show_options {
echo "Usage: $SCRIPT_NAME [options]"
echo
echo "Deploys an Overcloud."
echo
echo "Options:"
echo " --tuskar -- will use tuskar for building the heat Template"
echo " --timeout -- create timeout in minutes, default $TIMEOUT"
echo " --tempest -- run tempest tests after deployment"
echo
exit $1
}
TEMP=$(getopt -o ,h -l,tuskar,timeout:,tempest,help -n $SCRIPT_NAME -- "$@")
if [ $? != 0 ]; then
echo "Terminating..." >&2;
exit 1;
fi
# Note the quotes around `$TEMP': they are essential!
eval set -- "$TEMP"
while true ; do
case "$1" in
--tuskar) TUSKAR="1"; shift 1;;
--timeout) TIMEOUT="$2"; shift 2;;
--tempest) TEMPEST="1"; shift 1;;
-h | --help) show_options 0;;
--) shift ; break ;;
*) echo "Error: unsupported option $1." ; exit 1 ;;
esac
done
# setup-baremetal requires this to be set
export TRIPLEO_ROOT=.
export INSTACK_ROOT=${INSTACK_ROOT:-"/usr/share"}
export NODES_JSON=${NODES_JSON:-"instackenv.json"}
export USE_IRONIC=1
export ROOT_DISK=${ROOT_DISK:-10}
# Must wait for baremetal nodes to register as nova hypervisors
expected_nodes=$(jq ".nodes | length" $NODES_JSON)
expected_memory=$(jq ".nodes | map(.memory | tonumber) | add" $NODES_JSON)
expected_vcpus=$(jq ".nodes | map(.cpu | tonumber) | add" $NODES_JSON)
tripleo wait_for 180 1 wait_for_hypervisor_stats $expected_nodes $expected_memory $expected_vcpus
if heat stack-show overcloud > /dev/null; then
HEAT_OP=${HEAT_OP:-stack-update}
else
HEAT_OP=${HEAT_OP:-stack-create}
fi
if [ "$HEAT_OP" = "stack-create" ]; then
tripleo setup-overcloud-passwords -o tripleo-overcloud-passwords
else
tripleo setup-overcloud-passwords tripleo-overcloud-passwords
fi
source tripleo-overcloud-passwords
# NOTE(bnemec): Hopefully this script will eventually be converted to
# Python and then we can kill this sort of hackery.
UNDERCLOUD_CEILOMETER_SNMPD_PASSWORD=$(python -c "import ConfigParser; p = ConfigParser.ConfigParser(); p.read('undercloud-passwords.conf'); print(p.get('auth', 'undercloud_ceilometer_snmpd_password'))")
NeutronFlatNetworks=${NeutronFlatNetworks:-'datacentre'}
NeutronPhysicalBridge=${NeutronPhysicalBridge:-'br-ex'}
NeutronBridgeMappings=${NeutronBridgeMappings:-'datacentre:br-ex'}
# Define the interface that will be bridged onto the Neutron defined
# network.
NeutronPublicInterface=${NeutronPublicInterface:-nic1}
HypervisorNeutronPublicInterface=${HypervisorNeutronPublicInterface:-$NeutronPublicInterface}
NEUTRON_NETWORK_TYPE=${NEUTRON_NETWORK_TYPE:-gre}
NEUTRON_TUNNEL_TYPES=${NEUTRON_TUNNEL_TYPES:-gre}
# Define the overcloud libvirt type for virtualization. kvm for
# baremetal, qemu for an overcloud running in vm's.
OVERCLOUD_LIBVIRT_TYPE=${OVERCLOUD_LIBVIRT_TYPE:-qemu}
NtpServer=${NtpServer:-""}
CONTROLSCALE=${CONTROLSCALE:-1}
COMPUTESCALE=${COMPUTESCALE:-1}
CEPHSTORAGESCALE=${CEPHSTORAGESCALE:-0}
BLOCKSTORAGESCALE=${BLOCKSTORAGESCALE:-0}
SWIFTSTORAGESCALE=${SWIFTSTORAGESCALE:-0}
# Default all image parameters to use overcloud-full
OVERCLOUD_CONTROLLER_IMAGE=${OVERCLOUD_CONTROLLER_IMAGE:-overcloud-full}
OVERCLOUD_COMPUTE_IMAGE=${OVERCLOUD_COMPUTE_IMAGE:-overcloud-full}
OVERCLOUD_BLOCKSTORAGE_IMAGE=${OVERCLOUD_BLOCKSTORAGE_IMAGE:-overcloud-full}
OVERCLOUD_SWIFTSTORAGE_IMAGE=${OVERCLOUD_SWIFTSTORAGE_IMAGE:-overcloud-full}
OVERCLOUD_CEPHSTORAGE_IMAGE=${OVERCLOUD_CEPHSTORAGE_IMAGE:-overcloud-full}
# Default flavor parameters
export OVERCLOUD_CONTROL_FLAVOR=${OVERCLOUD_CONTROL_FLAVOR:-"baremetal"}
export OVERCLOUD_COMPUTE_FLAVOR=${OVERCLOUD_COMPUTE_FLAVOR:-"baremetal"}
export OVERCLOUD_CEPHSTORAGE_FLAVOR=${OVERCLOUD_CEPHSTORAGE_FLAVOR:-"baremetal"}
# Even though we are not deploying nodes with these roles, the templates will
# still validate that a flavor exists, so just use the baremetal_compute flavor
# for now.
export OVERCLOUD_BLOCKSTORAGE_FLAVOR=${OVERCLOUD_BLOCKSTORAGE_FLAVOR:-"baremetal"}
export OVERCLOUD_SWIFTSTORAGE_FLAVOR=${OVERCLOUD_SWIFTSTORAGE_FLAVOR:-"baremetal"}
# Satellite parameters
export REG_ACTIVATION_KEY=${REG_ACTIVATION_KEY:-"''"}
export REG_ORG=${REG_ORG:-"''"}
export REG_FORCE=${REG_FORCE:-"1"}
export REG_SAT_URL=${REG_SAT_URL:-"''"}
export REG_METHOD=${REG_METHOD:-"''"}
export OVERCLOUD_RESOURCE_REGISTRY=${OVERCLOUD_RESOURCE_REGISTRY:-"/usr/share/openstack-tripleo-heat-templates/overcloud-resource-registry-puppet.yaml"}
NeutronControlPlaneID=$(neutron net-show ctlplane | grep ' id ' | awk '{print $4}')
# $OVERCLOUD_EXTRA is for passing any additional arbitrary options to heat
# stack-{create,update}
export OVERCLOUD_EXTRA=${OVERCLOUD_EXTRA:-""}
if [ "$REG_METHOD" = "satellite" ]; then
export OVERCLOUD_SATELLITE_ENV=${OVERCLOUD_SATELLITE_ENV:-overcloud-env-satellite.json}
if [ -f $OVERCLOUD_SATELLITE_ENV ]; then
SATELLITE_ENV_JSON=$(cat $OVERCLOUD_SATELLITE_ENV)
else
SATELLITE_ENV_JSON="{}"
fi
SATELLITE_ENV_JSON=$(jq ".parameter_defaults.rhel_reg_activation_key = \"$REG_ACTIVATION_KEY\"" <<< $SATELLITE_ENV_JSON)
SATELLITE_ENV_JSON=$(jq ".parameter_defaults.rhel_reg_sat_url = \"$REG_SAT_URL\"" <<< $SATELLITE_ENV_JSON)
SATELLITE_ENV_JSON=$(jq ".parameter_defaults.rhel_reg_org = \"$REG_ORG\"" <<< $SATELLITE_ENV_JSON)
SATELLITE_ENV_JSON=$(jq ".parameter_defaults.rhel_reg_method = \"$REG_METHOD\"" <<< $SATELLITE_ENV_JSON)
SATELLITE_ENV_JSON=$(jq ".parameter_defaults.rhel_reg_force = \"$REG_FORCE\"" <<< $SATELLITE_ENV_JSON)
echo $SATELLITE_ENV_JSON > $OVERCLOUD_SATELLITE_ENV
export OVERCLOUD_EXTRA="$OVERCLOUD_EXTRA -e /usr/share/openstack-tripleo-heat-templates/extraconfig/post_deploy/rhel-registration/rhel-registration-resource-registry.yaml"
export OVERCLOUD_EXTRA="$OVERCLOUD_EXTRA -e /usr/share/openstack-tripleo-heat-templates/extraconfig/post_deploy/rhel-registration/environment-rhel-registration.yaml"
export OVERCLOUD_EXTRA="$OVERCLOUD_EXTRA -e $OVERCLOUD_SATELLITE_ENV"
fi
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
function create_cephx_key {
local KEY=$(python -c 'import base64,os,struct,time;key = os.urandom(16);header = struct.pack("<hiih", 1, int(time.time()), 0, len(key));print(base64.b64encode(header + key))')
echo $KEY
}
if [ -n "$TUSKAR" ]; then
PLAN_ID=$(tuskar plan-show overcloud | awk '$2=="uuid" {print $4}')
if [ -n "$PLAN_ID" ]; then
tuskar plan-delete $PLAN_ID
fi
tuskar plan-create overcloud
PLAN_ID=$(tuskar plan-show overcloud | awk '$2=="uuid" {print $4}')
CONTROLLER_ID=$(tuskar role-list | awk '$4=="Controller" {print $2}')
COMPUTE_ID=$(tuskar role-list | awk '$4=="Compute" {print $2}')
SWIFT_ID=$(tuskar role-list | awk '$4=="Swift-Storage" {print $2}')
CINDER_ID=$(tuskar role-list | awk '$4=="Cinder-Storage" {print $2}')
CEPH_ID=$(tuskar role-list | awk '$4=="Ceph-Storage" {print $2}')
tuskar plan-add-role $PLAN_ID -r $CONTROLLER_ID
tuskar plan-add-role $PLAN_ID -r $COMPUTE_ID
tuskar plan-add-role $PLAN_ID -r $SWIFT_ID
tuskar plan-add-role $PLAN_ID -r $CINDER_ID
tuskar plan-add-role $PLAN_ID -r $CEPH_ID
export TUSKAR_PARAMETERS=${TUSKAR_PARAMETERS:-"
-A NeutronControlPlaneID=${NeutronControlPlaneID}
-A Controller-1::AdminPassword=${OVERCLOUD_ADMIN_PASSWORD}
-A Controller-1::AdminToken=${OVERCLOUD_ADMIN_TOKEN}
-A Compute-1::AdminPassword=${OVERCLOUD_ADMIN_PASSWORD}
-A Controller-1::SnmpdReadonlyUserPassword=${UNDERCLOUD_CEILOMETER_SNMPD_PASSWORD}
-A Cinder-Storage-1::SnmpdReadonlyUserPassword=${UNDERCLOUD_CEILOMETER_SNMPD_PASSWORD}
-A Swift-Storage-1::SnmpdReadonlyUserPassword=${UNDERCLOUD_CEILOMETER_SNMPD_PASSWORD}
-A Compute-1::SnmpdReadonlyUserPassword=${UNDERCLOUD_CEILOMETER_SNMPD_PASSWORD}
-A Controller-1::CeilometerPassword=${OVERCLOUD_CEILOMETER_PASSWORD}
-A Controller-1::CeilometerMeteringSecret=${OVERCLOUD_CEILOMETER_SECRET}
-A Compute-1::CeilometerPassword=${OVERCLOUD_CEILOMETER_PASSWORD}
-A Compute-1::CeilometerMeteringSecret=${OVERCLOUD_CEILOMETER_SECRET}
-A Controller-1::CinderPassword=${OVERCLOUD_CINDER_PASSWORD}
-A Controller-1::GlancePassword=${OVERCLOUD_GLANCE_PASSWORD}
-A Controller-1::HeatPassword=${OVERCLOUD_HEAT_PASSWORD}
-A Controller-1::NeutronPassword=${OVERCLOUD_NEUTRON_PASSWORD}
-A Compute-1::NeutronPassword=${OVERCLOUD_NEUTRON_PASSWORD}
-A Controller-1::NovaPassword=${OVERCLOUD_NOVA_PASSWORD}
-A Compute-1::NovaPassword=${OVERCLOUD_NOVA_PASSWORD}
-A Controller-1::SwiftHashSuffix=${OVERCLOUD_SWIFT_HASH}
-A Controller-1::SwiftPassword=${OVERCLOUD_SWIFT_PASSWORD}
-A Controller-1::CinderISCSIHelper=lioadm
-A Cinder-Storage-1::CinderISCSIHelper=lioadm
-A Controller-1::CloudName=overcloud
-A Controller-1::NeutronPublicInterface=$NeutronPublicInterface
-A Controller-1::NeutronBridgeMappings=$NeutronBridgeMappings
-A Compute-1::NeutronBridgeMappings=$NeutronBridgeMappings
-A Controller-1::NeutronFlatNetworks=$NeutronFlatNetworks
-A Compute-1::NeutronFlatNetworks=$NeutronFlatNetworks
-A Compute-1::NeutronPhysicalBridge=$NeutronPhysicalBridge
-A Compute-1::NeutronPublicInterface=$NeutronPublicInterface
-A Compute-1::NovaComputeLibvirtType=$OVERCLOUD_LIBVIRT_TYPE
-A Controller-1::NtpServer=${NtpServer}
-A Compute-1::NtpServer=${NtpServer}
-A Controller-1::NeutronNetworkType=${NEUTRON_NETWORK_TYPE}
-A Compute-1::NeutronNetworkType=${NEUTRON_NETWORK_TYPE}
-A Controller-1::NeutronTunnelTypes=${NEUTRON_TUNNEL_TYPES}
-A Compute-1::NeutronTunnelTypes=${NEUTRON_TUNNEL_TYPES}
-A Controller-1::count=${CONTROLSCALE}
-A Compute-1::count=${COMPUTESCALE}
-A Swift-Storage-1::count=${SWIFTSTORAGESCALE}
-A Cinder-Storage-1::count=${BLOCKSTORAGESCALE}
-A Ceph-Storage-1::count=${CEPHSTORAGESCALE}
-A Cinder-Storage-1::Flavor=${OVERCLOUD_BLOCKSTORAGE_FLAVOR}
-A Compute-1::Flavor=${OVERCLOUD_COMPUTE_FLAVOR}
-A Controller-1::Flavor=${OVERCLOUD_CONTROL_FLAVOR}
-A Swift-Storage-1::Flavor=${OVERCLOUD_SWIFTSTORAGE_FLAVOR}
-A Ceph-Storage-1::Flavor=${OVERCLOUD_CEPHSTORAGE_FLAVOR}
-A Swift-Storage-1::Image=${OVERCLOUD_SWIFTSTORAGE_IMAGE}
-A Cinder-Storage-1::Image=${OVERCLOUD_BLOCKSTORAGE_IMAGE}
-A Ceph-Storage-1::Image=${OVERCLOUD_BLOCKSTORAGE_IMAGE}
-A Controller-1::Image=${OVERCLOUD_CONTROLLER_IMAGE}
-A Compute-1::Image=${OVERCLOUD_COMPUTE_IMAGE}
"}
if [ $CONTROLSCALE -gt 1 ]; then
export TUSKAR_PARAMETERS="$TUSKAR_PARAMETERS
-A Controller-1::NeutronL3HA=True
-A Controller-1::NeutronAllowL3AgentFailover=False
-A Compute-1::NeutronL3HA=True
-A Compute-1::NeutronAllowL3AgentFailover=False
"
fi
# unsure if these should be present with puppet
#export TUSKAR_PARAMETERS="$TUSKAR_PARAMETERS
#-A cinder-storage-1::CinderPassword=${OVERCLOUD_CINDER_PASSWORD}
#-A swift-storage-1::NeutronNetworkType=${NEUTRON_NETWORK_TYPE}
#-A cinder-storage-1::AdminPassword=${OVERCLOUD_ADMIN_PASSWORD}
#"
if [ $CEPHSTORAGESCALE -gt 0 ]; then
FSID=$(uuidgen)
MON_KEY=$(create_cephx_key)
ADMIN_KEY=$(create_cephx_key)
CINDER_ISCSI=${CINDER_ISCSI:-0}
export TUSKAR_PARAMETERS="$TUSKAR_PARAMETERS
-A Controller-1::CinderEnableRbdBackend=True
-A Controller-1::GlanceBackend=rbd
-A CephClusterFSID=$FSID
-A CephMonKey=$MON_KEY
-A CephAdminKey=$ADMIN_KEY
-A Compute-1::NovaEnableRbdBackend=True
"
if [ $CINDER_ISCSI -eq 0 ]; then
export TUSKAR_PARAMETERS="$TUSKAR_PARAMETERS
-A Controller-1::CinderEnableIscsiBackend=false
"
else
# (this is the default parameter value)
export TUSKAR_PARAMETERS="$TUSKAR_PARAMETERS
-A Controller-1::CinderEnableIscsiBackend=true
"
fi
fi
# These attributes can't be changed in Tuskar-UI, so this is the only
# difference to deploying through UI
# -A NeutronDnsmasqOptions=dhcp-option-force=26,1400
# -A NeutronNetworkType=${NEUTRON_NETWORK_TYPE}
# -A NeutronTunnelTypes=${NEUTRON_TUNNEL_TYPES}
tuskar plan-update $TUSKAR_PARAMETERS $PLAN_ID
HEAT_ENV=${HEAT_ENV:-"overcloud-env.json"}
if [ -f $HEAT_ENV -a "$HEAT_OP" = "stack-update" ]; then
ENV_JSON=$(cat $HEAT_ENV)
else
ENV_JSON='{"parameters":{}}'
fi
jq . > "${HEAT_ENV}" <<< $ENV_JSON
chmod 0600 "${HEAT_ENV}"
if [ "$HEAT_OP" = "stack-create" ]; then
generate-keystone-pki --heatenv $HEAT_ENV
fi
KeystoneCACertificate=$(os-apply-config -m $HEAT_ENV --key parameters.KeystoneCACertificate --type raw)
KeystoneSigningCertificate=$(os-apply-config -m $HEAT_ENV --key parameters.KeystoneSigningCertificate --type raw)
KeystoneSigningKey=$(os-apply-config -m $HEAT_ENV --key parameters.KeystoneSigningKey --type raw)
# Sending the Certificates one by one, otherwise there are problems with escaping
tuskar plan-update -A Controller-1::KeystoneCACertificate="${KeystoneCACertificate}" $PLAN_ID
tuskar plan-update -A Controller-1::KeystoneSigningCertificate="${KeystoneSigningCertificate}" $PLAN_ID
tuskar plan-update -A Controller-1::KeystoneSigningKey="${KeystoneSigningKey}" $PLAN_ID
# Get templates from tuskar
tuskar plan-templates -O tuskar_templates $PLAN_ID
OVERCLOUD_YAML_PATH="tuskar_templates/plan.yaml"
ENVIRONMENT_YAML_PATH="tuskar_templates/environment.yaml"
heat $HEAT_OP -t $TIMEOUT -f $OVERCLOUD_YAML_PATH \
-e $ENVIRONMENT_YAML_PATH \
$OVERCLOUD_EXTRA \
overcloud
else
OVERCLOUD_YAML_PATH="$INSTACK_ROOT/openstack-tripleo-heat-templates/overcloud-without-mergepy.yaml"
export OVERCLOUD_PARAMETERS=${OVERCLOUD_PARAMETERS:-"\
-P AdminPassword=${OVERCLOUD_ADMIN_PASSWORD} \
-P AdminToken=${OVERCLOUD_ADMIN_TOKEN} \
-P CeilometerPassword=${OVERCLOUD_CEILOMETER_PASSWORD} \
-P CeilometerMeteringSecret=${OVERCLOUD_CEILOMETER_SECRET} \
-P CinderPassword=${OVERCLOUD_CINDER_PASSWORD} \
-P CinderISCSIHelper=lioadm \
-P CloudName=overcloud \
-P GlancePassword=${OVERCLOUD_GLANCE_PASSWORD} \
-P HeatPassword=${OVERCLOUD_HEAT_PASSWORD} \
-P NeutronControlPlaneID=$NeutronControlPlaneID \
-P NeutronDnsmasqOptions=dhcp-option-force=26,1400 \
-P NeutronPassword=${OVERCLOUD_NEUTRON_PASSWORD} \
-P NeutronPublicInterface=$NeutronPublicInterface \
-P NeutronBridgeMappings=$NeutronBridgeMappings \
-P NeutronFlatNetworks=$NeutronFlatNetworks \
-P HypervisorNeutronPhysicalBridge=$NeutronPhysicalBridge \
-P HypervisorNeutronPublicInterface=$HypervisorNeutronPublicInterface \
-P NovaComputeLibvirtType=$OVERCLOUD_LIBVIRT_TYPE \
-P NovaPassword=${OVERCLOUD_NOVA_PASSWORD} \
-P SwiftHashSuffix=${OVERCLOUD_SWIFT_HASH} \
-P SwiftPassword=${OVERCLOUD_SWIFT_PASSWORD} \
-P NeutronNetworkType=${NEUTRON_NETWORK_TYPE} \
-P NeutronTunnelTypes=${NEUTRON_TUNNEL_TYPES} \
-P SnmpdReadonlyUserPassword=${UNDERCLOUD_CEILOMETER_SNMPD_PASSWORD} \
-P OvercloudControlFlavor=${OVERCLOUD_CONTROL_FLAVOR} \
-P OvercloudComputeFlavor=${OVERCLOUD_COMPUTE_FLAVOR} \
-P OvercloudBlockStorageFlavor=${OVERCLOUD_BLOCKSTORAGE_FLAVOR} \
-P OvercloudSwiftStorageFlavor=${OVERCLOUD_SWIFTSTORAGE_FLAVOR} \
-P OvercloudCephStorageFlavor=${OVERCLOUD_CEPHSTORAGE_FLAVOR} \
-P NtpServer=${NtpServer} \
-P controllerImage=${OVERCLOUD_CONTROLLER_IMAGE} \
-P NovaImage=${OVERCLOUD_COMPUTE_IMAGE} \
-P BlockStorageImage=${OVERCLOUD_BLOCKSTORAGE_IMAGE} \
-P SwiftStorageImage=${OVERCLOUD_SWIFTSTORAGE_IMAGE} \
-P CephStorageImage=${OVERCLOUD_SWIFTSTORAGE_IMAGE} \
-P Debug=True \
"}
if [ $CONTROLSCALE -gt 1 ]; then
export OVERCLOUD_PARAMETERS="$OVERCLOUD_PARAMETERS \
-P NeutronL3HA=True \
-P NeutronAllowL3AgentFailover=False \
"
fi
if [ $CEPHSTORAGESCALE -gt 0 ]; then
FSID=$(uuidgen)
MON_KEY=$(create_cephx_key)
ADMIN_KEY=$(create_cephx_key)
CINDER_ISCSI=${CINDER_ISCSI:-0}
export OVERCLOUD_PARAMETERS="$OVERCLOUD_PARAMETERS \
-P CephClusterFSID=$FSID \
-P CephMonKey=$MON_KEY \
-P CephAdminKey=$ADMIN_KEY \
-P CinderEnableRbdBackend=True \
-P NovaEnableRbdBackend=True \
-P GlanceBackend=rbd \
"
if [ $CINDER_ISCSI -eq 0 ]; then
export OVERCLOUD_PARAMETERS="$OVERCLOUD_PARAMETERS \
-P CinderEnableIscsiBackend=False
"
else
# (this is the default parameter value)
export OVERCLOUD_PARAMETERS="$OVERCLOUD_PARAMETERS \
-P CinderEnableIscsiBackend=True
"
fi
fi
HEAT_ENV=${HEAT_ENV:-"overcloud-env.json"}
if [ -f $HEAT_ENV -a "$HEAT_OP" = "stack-update" ]; then
ENV_JSON=$(cat $HEAT_ENV)
else
ENV_JSON='{"parameters":{}}'
fi
RESOURCE_REGISTRY="-e $OVERCLOUD_RESOURCE_REGISTRY"
ENV_JSON=$(jq '.parameters = .parameters + {
"ControllerCount": '${CONTROLSCALE}',
"ComputeCount": '${COMPUTESCALE}',
"CephStorageCount": '${CEPHSTORAGESCALE}',
"BlockStorageCount": '${BLOCKSTORAGESCALE}',
"ObjectStorageCount": '${SWIFTSTORAGESCALE}'
}' <<< $ENV_JSON)
jq . > "${HEAT_ENV}" <<< $ENV_JSON
chmod 0600 "${HEAT_ENV}"
if [ "$HEAT_OP" = "stack-create" ]; then
generate-keystone-pki --heatenv $HEAT_ENV
fi
HEAT_ENVIRONMENT="-e ${HEAT_ENV}"
heat $HEAT_OP -t $TIMEOUT -f $OVERCLOUD_YAML_PATH \
$RESOURCE_REGISTRY \
$OVERCLOUD_PARAMETERS \
$HEAT_ENVIRONMENT \
$OVERCLOUD_EXTRA \
overcloud
fi
tripleo wait_for_stack_ready 220 10 overcloud
if [ "$HEAT_OP" = "stack-create" ]; then
echo "Overcloud CREATE_COMPLETE"
else
echo "Overcloud UPDATE_COMPLETE"
exit 0
fi
OVERCLOUD_ENDPOINT=$(heat output-show overcloud KeystoneURL|sed 's/^"\(.*\)"$/\1/')
export OVERCLOUD_IP=$(echo $OVERCLOUD_ENDPOINT | awk -F '[/:]' '{print $4}')
touch ~/.ssh/known_hosts
chmod 600 ~/.ssh/known_hosts
ssh-keygen -R $OVERCLOUD_IP
NEW_JSON=$(jq '.overcloud.password="'${OVERCLOUD_ADMIN_PASSWORD}'" | .overcloud.endpoint="'${OVERCLOUD_ENDPOINT}'" | .overcloud.endpointhost="'${OVERCLOUD_IP}'"' $NODES_JSON)
echo $NEW_JSON > $NODES_JSON
export TE_DATAFILE=$NODES_JSON
# We're done with TRIPLEO_ROOT at this point, and the value set above doesn't
# work for creating the overcloudrc file.
unset TRIPLEO_ROOT
instack-create-overcloudrc
source ~/overcloudrc
init-keystone -o $OVERCLOUD_IP -t $OVERCLOUD_ADMIN_TOKEN \
-e admin.example.com -p $OVERCLOUD_ADMIN_PASSWORD -u heat-admin \
${SSLBASE:+-s $PUBLIC_API_URL}
REGISTER_SERVICE_OPTS=""
openstack role create swiftoperator
openstack role create ResellerAdmin
tripleo setup-endpoints $OVERCLOUD_IP \
--cinder-password $OVERCLOUD_CINDER_PASSWORD \
--glance-password $OVERCLOUD_GLANCE_PASSWORD \
--heat-password $OVERCLOUD_HEAT_PASSWORD \
--neutron-password $OVERCLOUD_NEUTRON_PASSWORD \
--nova-password $OVERCLOUD_NOVA_PASSWORD \
--swift-password $OVERCLOUD_SWIFT_PASSWORD \
--ceilometer-password $OVERCLOUD_CEILOMETER_PASSWORD \
$REGISTER_SERVICE_OPTS \
${SSLBASE:+--ssl $PUBLIC_API_URL}
openstack role create heat_stack_user
# setup-neutron "" "" 10.0.0.0/8 "" "" "" "" 192.0.2.45 192.0.2.64 192.0.2.0/24
NETWORK_CIDR=${NETWORK_CIDR:-"10.0.0.0/8"}
FLOATING_IP_START=${FLOATING_IP_START:-"192.0.2.45"}
FLOATING_IP_END=${FLOATING_IP_END:-"192.0.2.64"}
FLOATING_IP_CIDR=${FLOATING_IP_CIDR:-"192.0.2.0/24"}
OVERCLOUD_NAMESERVER="8.8.8.8"
BM_NETWORK_GATEWAY=${BM_NETWORK_GATEWAY:-"192.0.2.1"}
NETWORK_JSON=$(mktemp)
jq "." <<EOF > $NETWORK_JSON
{
"float": {
"cidr": "$NETWORK_CIDR",
"name": "default-net",
"nameserver": "$OVERCLOUD_NAMESERVER"
},
"external": {
"name": "ext-net",
"cidr": "$FLOATING_IP_CIDR",
"allocation_start": "$FLOATING_IP_START",
"allocation_end": "$FLOATING_IP_END",
"gateway": "$BM_NETWORK_GATEWAY"
}
}
EOF
setup-neutron -n $NETWORK_JSON
rm $NETWORK_JSON
nova flavor-create m1.demo auto 512 10 1
echo "Overcloud Deployed"
if [ -n "$TEMPEST" ]; then
TEMPEST_ARGS=${TEMPEST_ARGS:-} $(dirname $0)/instack-tempest-test-overcloud
$(dirname $0)/instack-test-overcloud
fi

View File

@ -1,404 +0,0 @@
#!/bin/bash
set -eu
set -o pipefail
SCRIPT_NAME=$(basename $0)
OS_AUTH_URL=${OS_AUTH_URL:-""}
if [ -z "$OS_AUTH_URL" ]; then
echo "You must source a stackrc file for the Undercloud."
exit 1
fi
# NOTE(dtantsur): last working API version
export IRONIC_API_VERSION="1.6"
function show_options {
echo "Usage: $SCRIPT_NAME [options]"
echo
echo "Deploys instances via Ironic in preparation for an Overcloud deployment."
echo
echo "Options:"
echo " --register-nodes -- register nodes from a nodes json file"
echo " --nodes-json -- nodes json file containing node data"
echo " for registration."
echo " Default: nodes.json in the current directory"
echo " --discover-nodes -- Perform discovery of registered nodes."
echo " Powers on registered nodes to complete"
echo " the discovery process."
echo " --configure-nodes -- Configure BIOS and RAID volumes of "
echo " registered nodes."
echo " --setup-flavors -- Setup Nova flavors to match discovered"
echo " profiles"
echo " --show-profile -- Show matching profile of nodes"
echo " --deploy-nodes -- Deploy nodes"
echo " --check-ssh -- Check the root ssh connection to each"
echo " deployed node."
echo " --delete-stack -- Wait until the stack has deleted."
echo " --delete-nodes -- Delete all nodes."
echo " -x -- enable tracing"
echo " --help, -h -- Print this help message."
echo
exit $1
}
TEMP=$(getopt -o ,h -l,register-nodes,nodes-json:,discover-nodes,configure-nodes,deploy-nodes,help,setup-flavors,show-profile,check-ssh,delete-stack,delete-nodes,config-tools-provision -o,x,h -n $SCRIPT_NAME -- "$@")
if [ $? != 0 ]; then
echo "Terminating..." >&2;
exit 1;
fi
# Note the quotes around `$TEMP': they are essential!
eval set -- "$TEMP"
REGISTER_NODES=
NODES_JSON=
DISCOVER_NODES=
CONFIGURE_NODES=
DEPLOY_NODES=
SETUP_FLAVORS=
SHOW_PROFILE=
CHECK_SSH=
DELETE_STACK=
DELETE_NODES=
STDERR=/dev/null
# NOTE(bnemec): Can be removed once Ironic is updated to fix the
# novaclient.v1_1 deprecation warning
export PYTHONWARNINGS="ignore"
DEPLOY_NAME=${DEPLOY_NAME:-"ironic-discover"}
IRONIC=$(openstack endpoint show baremetal | grep publicurl | awk '{ print $4; }')
IRONIC=${IRONIC%/}
# Raise retry time to 60 seconds
export IRONIC_MAX_RETRIES=${IRONIC_MAX_RETRIES:-24}
export IRONIC_RETRY_INTERVAL=${IRONIC_RETRY_INTERVAL:-5}
while true ; do
case "$1" in
--register-nodes) REGISTER_NODES="1"; shift 1;;
--nodes-json) NODES_JSON="$2"; shift 2;;
--discover-nodes) DISCOVER_NODES="1"; shift 1;;
--configure-nodes) CONFIGURE_NODES="1"; shift 1;;
--show-profile) SHOW_PROFILE="1"; shift 1;;
--deploy-nodes) DEPLOY_NODES="1"; shift 1;;
--setup-flavors) SETUP_FLAVORS="1"; shift 1;;
--check-ssh) CHECK_SSH="1"; shift 1;;
--delete-stack) DELETE_STACK="1"; shift 1;;
--delete-nodes) DELETE_NODES="1"; shift 1;;
-x) set -x; STDERR=/dev/stderr; shift 1;;
-h | --help) show_options 0;;
--) shift ; break ;;
*) echo "Error: unsupported option $1." ; exit 1 ;;
esac
done
function register_nodes {
NODES_JSON=${NODES_JSON:-"nodes.json"}
NULL_STATS=${NULL_STATS:-0}
tmp_json=$NODES_JSON
if [ "$NULL_STATS" = "1" ]; then
tmp_json=$(mktemp)
jq '.nodes[].memory=null | .nodes[].disk=null | .nodes[].arch=null | .nodes[].cpu=null' $NODES_JSON > $tmp_json
fi
echo " Registering nodes from $NODES_JSON"
register-nodes --service-host undercloud --nodes <(jq '.nodes' $tmp_json) 1>/dev/null
if [ "$NULL_STATS" = "1" ]; then
rm -f $tmp_json
fi
deploy_kernel_id=$(glance image-show bm-deploy-kernel | awk ' / id / {print $4}')
deploy_ramdisk_id=$(glance image-show bm-deploy-ramdisk | awk ' / id / {print $4}')
# Add the local boot capability and deploy_{kernel, ramdisk} parameters
node_list=$(ironic node-list --limit 0)
node_ids=$(echo "$node_list" | tail -n +4 | head -n -1 | awk -F "| " '{print $2}')
for node_id in $node_ids; do
ironic node-update $node_id add properties/capabilities="boot_option:local" driver_info/deploy_ramdisk=$deploy_ramdisk_id driver_info/deploy_kernel=$deploy_kernel_id 1> /dev/null
done
echo " Nodes registered."
echo
echo "$node_list"
echo
}
function discover_nodes {
echo " Discovering nodes."
node_ids=$(ironic node-list | tail -n +4 | head -n -1 | awk -F "| " '{print $2}')
for node_id in $node_ids; do
# NOTE(dtantsur): ||true is in case nodes are already MANAGEABLE
ironic node-set-provision-state $node_id manage 2> /dev/null || true
echo -n " Sending node ID $node_id to discoverd for discovery ... "
openstack baremetal introspection start $node_id
# small sleep to avoid too many nodes DHCP'ing at once
sleep 5
echo "DONE."
done
echo " Polling discoverd for discovery results ... "
for node_id in $node_ids; do
echo -n " Result for node $node_id is ... "
while true; do
finished=$(openstack baremetal introspection status $node_id -f value -c finished)
if [ "$finished" = "True" ]; then
error=$(openstack baremetal introspection status $node_id -f value -c error)
if [ "$error" = "None" ]; then
echo "DISCOVERED."
else
echo "ERROR: $error"
fi
break
fi
sleep 15
done
done
echo " Setting node states to AVAILABLE... "
for node_id in $node_ids; do
ironic node-set-provision-state $node_id provide
done
echo
}
function configure_bios {
echo " Configuring BIOS."
node_ids=$(ironic node-list | tail -n +4 | head -n -1 | awk -F "| " '{print $2}')
for node_id in $node_ids; do
driver=$(ironic node-show $node_id | grep 'driver[ \t]' | awk -F "|" '{print $3}' | xargs)
if [ "$driver" == "pxe_drac" ]; then
echo -n " Configuring BIOS for node ID $node_id ... "
ironic node-vendor-passthru $node_id --http_method POST configure_bios_settings
echo "DONE."
fi
done
# NOTE(ifarkas): wait until Ironic processes the request
sleep 15
echo
}
function configure_raid_volumes {
create_root_volume=$1
create_nonroot_volumes=$2
echo " Configuring RAID volumes."
node_ids=$(ironic node-list | tail -n +4 | head -n -1 | awk -F "| " '{print $2}')
for node_id in $node_ids; do
driver=$(ironic node-show $node_id | grep 'driver[ \t]' | awk -F "|" '{print $3}' | xargs)
if [ "$driver" == "pxe_drac" ]; then
echo -n " Configuring RAID volumes for node ID $node_id ... "
ironic node-vendor-passthru $node_id --http_method POST create_raid_configuration \
create_root_volume=$create_root_volume \
create_nonroot_volumes=$create_nonroot_volumes
echo "DONE."
fi
done
# NOTE(ifarkas): wait until Ironic processes the request
sleep 15
echo
}
function wait_for_drac_config_jobs {
echo " Waiting for DRAC config jobs to finish ... "
node_ids=$(ironic node-list | tail -n +4 | head -n -1 | awk -F "| " '{print $2}')
for node_id in $node_ids; do
driver=$(ironic node-show $node_id | grep 'driver[ \t]' | awk -F "|" '{print $3}' | xargs)
if [ "$driver" == "pxe_drac" ]; then
echo -n " Waiting for node $node_id ... "
while true; do
jobs=$(ironic node-vendor-passthru $node_id --http_method GET list_unfinished_jobs)
if [[ $jobs == *"'unfinished_jobs': []"* ]]; then
break
fi
sleep 30
done
echo "DONE."
fi
done
echo
}
function change_power_state {
requested_state=$1
echo " Changing power states."
node_ids=$(ironic node-list | tail -n +4 | head -n -1 | awk -F "| " '{print $2}')
for node_id in $node_ids; do
echo -n " Changing power state for node ID $node_id ... "
ironic node-set-power-state $node_id $requested_state
echo "DONE."
done
# NOTE(ifarkas): wait until Ironic processes the request
sleep 15
echo
}
function wait_for_ssh {
echo " Waiting for ssh as root to be enabled ... "
echo
ips=$(nova list | tail -n +4 | head -n -1 | awk '{print $12}' | cut -d= -f2)
for ip in $ips; do
echo -n " checking $ip ... "
tripleo wait_for 300 1 ssh -o "PasswordAuthentication=no" -o "StrictHostKeyChecking=no" root@$ip ls
echo "DONE."
done
echo
}
function deploy_nodes {
wait_for_hypervisor_stats
DEPLOY_HEAT_TEMPLATE=${DEPLOY_HEAT_TEMPLATE:-"/usr/share/instack-undercloud/heat-templates/ironic-deployment.yaml"}
CONTROL_COUNT=${CONTROL_COUNT:-"1"}
COMPUTE_COUNT=${COMPUTE_COUNT:-"3"}
echo " Creating heat stack ... "
heat stack-create $DEPLOY_NAME -f $DEPLOY_HEAT_TEMPLATE -P "control_count=$CONTROL_COUNT" -P "compute_count=$COMPUTE_COUNT"
echo " Created."
echo
echo -n " Waiting for stack to finish ... "
echo
tripleo wait_for_stack_ready 60 10 $DEPLOY_NAME
echo "DONE."
heat stack-show $DEPLOY_NAME
heat stack-list
wait_for_ssh
echo
}
function setup_flavors {
if ! nova flavor-show baremetal 2>$STDERR 1>/dev/null; then
echo " Creating baremetal flavor ... "
nova flavor-create baremetal auto 4096 40 1
else
echo " baremetal flavor already exists."
fi
echo
nova flavor-list
echo
echo " Setting baremetal flavor keys ... "
nova flavor-key baremetal set \
"cpu_arch"="x86_64" \
"capabilities:boot_option"="local"
nova flavor-show baremetal
}
function show_profile {
node_ids=$(ironic node-list | tail -n +4 | head -n -1 | awk -F "| " '{print $2}')
token=$(openstack token issue | grep ' id ' | awk '{print $4}')
echo " Querying assigned profiles ... "
echo
for node_id in $node_ids; do
echo " $node_id"
echo -n " "
curl -s -H "x-auth-token: $token" $IRONIC/v1/nodes/$node_id | jq '.properties.capabilities'
echo
done
echo
echo " DONE."
echo
}
function wait_for_hypervisor_stats {
node_ids=$(ironic node-list | tail -n +4 | head -n -1 | awk -F "| " '{print $2}')
expected_nodes=$(echo $node_ids | wc -w)
expected_memory=0
expected_vcpus=0
token=$(openstack token issue | grep ' id ' | awk '{print $4}')
echo -n " Wating for nova hypervisor stats ... "
for node_id in $node_ids; do
mem=$(curl -s -H "x-auth-token: $token" $IRONIC/v1/nodes/$node_id | jq '.properties.memory_mb | tonumber')
vcpu=$(curl -s -H "x-auth-token: $token" $IRONIC/v1/nodes/$node_id | jq '.properties.cpus | tonumber')
expected_memory=$(($expected_memory + $mem))
expected_vcpus=$(($expected_vcpus + $vcpu))
done
tripleo wait_for 180 1 wait_for_hypervisor_stats $expected_nodes $expected_memory $expected_vcpus
echo "DONE."
echo
}
function delete_stack {
heat stack-delete $DEPLOY_NAME
tripleo wait_for 90 2 ! heat stack-show $DEPLOY_NAME
}
function delete_nodes {
for n in $(ironic node-list | tail -n +4 | head -n -1 | awk '{print $2}'); do
ironic node-delete $n;
done
}
echo "Preparing for deployment..."
if [ "$REGISTER_NODES" = 1 ]; then
register_nodes
fi
if [ "$DISCOVER_NODES" = 1 ]; then
discover_nodes
fi
if [ "$CONFIGURE_NODES" = 1 ]; then
configure_bios
create_root_volume=true
create_nonroot_volumes=false
configure_raid_volumes $create_root_volume $create_nonroot_volumes
change_power_state reboot
wait_for_drac_config_jobs
change_power_state off
discover_nodes
create_root_volume=false
create_nonroot_volumes=true
configure_raid_volumes $create_root_volume $create_nonroot_volumes
change_power_state reboot
wait_for_drac_config_jobs
change_power_state off
fi
if [ "$SETUP_FLAVORS" = 1 ]; then
setup_flavors
fi
if [ "$SHOW_PROFILE" = 1 ]; then
show_profile
fi
if [ "$CHECK_SSH" = 1 ]; then
wait_for_ssh
fi
if [ "$DELETE_STACK" = 1 ]; then
delete_stack
fi
if [ "$DELETE_NODES" = 1 ]; then
delete_nodes
fi
echo "Prepared."
if [ "$DEPLOY_NODES" = 1 ]; then
echo "Deploying..."
deploy_nodes
echo "Deployed."
fi

View File

@ -1,54 +0,0 @@
#!/bin/bash
set -eux
IMAGE_PATH=${IMAGE_PATH:-"."}
export DEPLOY_NAME=${DEPLOY_NAME:-deploy-ramdisk-ironic}
export DISCOVERY_NAME=${DISCOVERY_NAME:-discovery-ramdisk}
HTTP_ROOT=${HTTP_ROOT:-/httpboot}
source <(sudo cat /root/stackrc)
OS_AUTH_URL=${OS_AUTH_URL:-""}
if [ -z "$OS_AUTH_URL" ]; then
echo "You must source a stackrc file for the Undercloud."
exit 1
fi
function check_image {
local image_name=$IMAGE_PATH/$1
if [ ! -f $image_name ]; then
echo "$image_name does not exist."
exit 1
fi
}
function load_image {
local image_name=$IMAGE_PATH/$1
tripleo load-image -d $image_name
}
check_image $DEPLOY_NAME.initramfs
check_image $DEPLOY_NAME.kernel
check_image $DISCOVERY_NAME.initramfs
check_image $DISCOVERY_NAME.kernel
check_image overcloud-full.qcow2
load_image overcloud-full.qcow2
glance image-delete bm-deploy-kernel 2>/dev/null || :
glance image-create --name bm-deploy-kernel --is-public true \
--disk-format aki < $IMAGE_PATH/$DEPLOY_NAME.kernel
glance image-delete bm-deploy-ramdisk 2>/dev/null || :
glance image-create --name bm-deploy-ramdisk --is-public true \
--disk-format ari < $IMAGE_PATH/$DEPLOY_NAME.initramfs
deploy_kernel_id=$(glance image-show bm-deploy-kernel | awk ' / id / {print $4}')
deploy_ramdisk_id=$(glance image-show bm-deploy-ramdisk | awk ' / id / {print $4}')
sudo cp -f "$IMAGE_PATH/$DISCOVERY_NAME.kernel" "$HTTP_ROOT/discovery.kernel"
sudo cp -f "$IMAGE_PATH/$DISCOVERY_NAME.initramfs" "$HTTP_ROOT/discovery.ramdisk"

View File

@ -1,31 +0,0 @@
#!/bin/bash
set -eux
source ~/overcloudrc
TEMPEST_ARGS=${TEMPEST_ARGS:-}
# e.g. TEMPEST_ARGS='tempest.api.compute.flavors tempest.api.network.admin'
mkdir -p ~/tempest
cd ~/tempest
/usr/share/openstack-tempest-kilo/tools/configure-tempest-directory
./tools/config_tempest.py --out etc/tempest.conf --debug --create \
identity.uri $OS_AUTH_URL \
compute.allow_tenant_isolation true \
object-storage.operator_role SwiftOperator \
identity.admin_password $OS_PASSWORD \
compute.build_timeout 500 \
compute.image_ssh_user cirros \
compute.ssh_user cirros \
network.build_timeout 500 \
volume.build_timeout 500 \
scenario.ssh_user cirros
FULL_TEMPEST_ARGS="--no-virtual-env"
if [ -n "$TEMPEST_ARGS" ]; then
FULL_TEMPEST_ARGS="$FULL_TEMPEST_ARGS -- $TEMPEST_ARGS"
fi
./run_tempest.sh $FULL_TEMPEST_ARGS 2>&1 | tee ~/tempest/tempest-run.log

View File

@ -1,105 +0,0 @@
#!/bin/bash
set -eux
source ~/overcloudrc
# TODO(bnemec): Hard-coding this to . for now because it's tricky to extract
# the value from the new conf file when not using oslo.config.
IMAGE_PATH='.'
# tripleo os-adduser -p $OVERCLOUD_DEMO_PASSWORD demo demo@example.com
if ! glance image-show user 2>&1 1>/dev/null; then
glance image-create --name user --is-public True --disk-format qcow2 \
--container-format bare --file $IMAGE_PATH/fedora-user.qcow2
fi
tripleo wait_for 30 10 nova service-list --binary nova-compute 2\>/dev/null \| grep 'enabled.*\ up\ '
tripleo wait_for 30 10 neutron agent-list -f csv -c alive -c agent_type -c host \| grep "\":-).*Open vSwitch agent.*compute\""
# source $TRIPLEO_ROOT/overcloudrc-user
NET_ID=$(neutron net-list -f csv --quote none | grep default-net | cut -d, -f1)
if ! nova keypair-show default 2>/dev/null; then
tripleo user-config
fi
nova boot --poll --key-name default --flavor m1.demo --image user --nic net-id=$NET_ID demo
sleep 3
PRIVATEIP=$(nova list | grep demo | awk -F"default-net=" '{print $2}' | awk '{print $1}')
tripleo wait_for 10 5 neutron port-list -f csv -c id --quote none \| grep id
PORT=$(neutron port-list | grep $PRIVATEIP | cut -d'|' -f2)
FLOATINGIP=$(neutron floatingip-create ext-net --port-id "${PORT//[[:space:]]/}" | awk '$2=="floating_ip_address" {print $4}')
SECGROUPID=$(nova secgroup-list | grep default | cut -d ' ' -f2)
neutron security-group-rule-create $SECGROUPID --protocol icmp \
--direction ingress --port-range-min 8 || true
neutron security-group-rule-create $SECGROUPID --protocol tcp \
--direction ingress --port-range-min 22 --port-range-max 22 || true
# Must use sudo when calling ping
# See https://bugzilla.redhat.com/show_bug.cgi?id=1144149
tripleo wait_for 30 10 sudo -E ping -c 1 $FLOATINGIP
tripleo wait_for 10 10 nova list \| grep ACTIVE
ssh-keygen -R $FLOATINGIP
tripleo wait_for 30 10 ssh -o BatchMode=yes -o StrictHostKeyChecking=no fedora@$FLOATINGIP ls
tripleo wait_for 30 10 ssh -o BatchMode=yes -o StrictHostKeyChecking=no -tt fedora@$FLOATINGIP systemctl status cloud-final
echo Compute test successful!
CINDER_VOLUME_ID=$(cinder create 1 | grep " id " | awk '{print $4}')
tripleo wait_for 10 3 cinder list \| grep available
nova volume-attach demo $CINDER_VOLUME_ID
tripleo wait_for 30 10 ssh -o StrictHostKeyChecking=no fedora@$FLOATINGIP ls /dev/vdb
ssh -tt fedora@$FLOATINGIP sudo fdisk /dev/vdb <<EOF
o
w
EOF
ssh -tt fedora@$FLOATINGIP sudo fdisk /dev/vdb <<EOF
n
p
1
w
EOF
ssh -tt fedora@$FLOATINGIP sudo mkfs.ext4 /dev/vdb1
ssh -tt fedora@$FLOATINGIP sudo mount /dev/vdb1 /mnt
ssh -tt fedora@$FLOATINGIP sudo umount /mnt
echo Cinder test successful!
tmpfile=$(mktemp)
echo SWIFTTEST > $tmpfile
swift upload test $tmpfile
swiftfile=$(swift list test)
swift download --output $tmpfile-1 test $swiftfile
if [ ! "$(cat $tmpfile-1)" == "SWIFTTEST" ]; then
echo Swift test failed!
fi
swift delete test
echo Swift test successful!

View File

@ -23,14 +23,8 @@ packages =
instack_undercloud
scripts =
scripts/instack-build-images
scripts/instack-create-overcloudrc
scripts/instack-deploy-overcloud
scripts/instack-install-undercloud
scripts/instack-ironic-deployment
scripts/instack-prepare-for-overcloud
scripts/instack-tempest-test-overcloud
scripts/instack-test-overcloud
scripts/instack-virt-setup
data_files =