adding support for baremetal hypervisor

New files for baremetal driver:
- lib/baremetal
- files/apts/baremetal

Adds two dependencies:
- google shell-in-a-box
- diskimage-builder

Enable by setting both:
  VIRT_DRIVER=baremetal
  ENABLED_SERVICES="$ENABLED_SERVICES,baremetal"

Change-Id: Ibf6fe1671a759a449c9eb0df47751d1b31ade591
This commit is contained in:
Devananda van der Veen 2012-11-12 17:58:38 -08:00 committed by Dean Troyer
parent 796342c06e
commit f35cf91a1d
5 changed files with 522 additions and 11 deletions

9
files/apts/baremetal Normal file
View File

@ -0,0 +1,9 @@
busybox
dnsmasq
gcc
ipmitool
make
open-iscsi
qemu-kvm
syslinux
tgt

403
lib/baremetal Normal file
View File

@ -0,0 +1,403 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This file provides devstack with the environment and utilities to
# control nova-compute's baremetal driver.
# It sets reasonable defaults to run within a single host,
# using virtual machines in place of physical hardware.
# However, by changing just a few options, devstack+baremetal can in fact
# control physical hardware resources on the same network, if you know
# the MAC address(es) and IPMI credentials.
#
# At a minimum, to enable the baremetal driver, you must set these in loclarc:
# VIRT_DRIVER=baremetal
# ENABLED_SERVICES="$ENABLED_SERVICES,baremetal"
#
#
# We utilize diskimage-builder to create a ramdisk, and then
# baremetal driver uses that to push a disk image onto the node(s).
#
# Below we define various defaults which control the behavior of the
# baremetal compute service, and inform it of the hardware it will contorl.
#
# Below that, various functions are defined, which are called by devstack
# in the following order:
#
# before nova-cpu starts:
# - prepare_baremetal_toolchain
# - configure_baremetal_nova_dirs
#
# after nova and glance have started:
# - build_and_upload_baremetal_deploy_k_and_r $token
# - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID
# - upload_baremetal_image $url $token
# - add_baremetal_node <first_mac> <second_mac>
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Sub-driver settings
# -------------------
# sub-driver to use for kernel deployment
# - nova.virt.baremetal.pxe.PXE
# - nova.virt.baremetal.tilera.TILERA
BM_DRIVER=${BM_DRIVER:-nova.virt.baremetal.pxe.PXE}
# sub-driver to use for remote power management
# - nova.virt.baremetal.fake.FakePowerManager, for manual power control
# - nova.virt.baremetal.ipmi.Ipmi, for remote IPMI
# - nova.virt.baremetal.tilera_pdu.Pdu, for TilePro hardware
BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager}
# These should be customized to your environment and hardware
# -----------------------------------------------------------
# BM_DNSMASQ_* options must be changed to suit your network environment
BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-$PUBLIC_INTERFACE}
BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0}
BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48}
# BM_FIRST_MAC *must* be set to the MAC address of the node you will boot.
# This is passed to dnsmasq along with the kernel/ramdisk to
# deploy via PXE.
BM_FIRST_MAC=${BM_FIRST_MAC:-}
# BM_SECOND_MAC is only important if the host has >1 NIC.
BM_SECOND_MAC=${BM_SECOND_MAC:-}
# Hostname for the baremetal nova-compute node, if not run on this host
BM_HOSTNAME=${BM_HOSTNAME:-$(hostname -f)}
# BM_PM_* options are only necessary if BM_POWER_MANAGER=...IPMI
BM_PM_ADDR=${BM_PM_ADDR:-0.0.0.0}
BM_PM_USER=${BM_PM_USER:-user}
BM_PM_PASS=${BM_PM_PASS:-pass}
# BM_FLAVOR_* options are arbitrary and not necessarily related to physical
# hardware capacity. These can be changed if you are testing
# BaremetalHostManager with multiple nodes and different flavors.
BM_CPU_ARCH=${BM_CPU_ARCH:-x86_64}
BM_FLAVOR_CPU=${BM_FLAVOR_CPU:-1}
BM_FLAVOR_RAM=${BM_FLAVOR_RAM:-1024}
BM_FLAVOR_ROOT_DISK=${BM_FLAVOR_ROOT_DISK:-10}
BM_FLAVOR_EPHEMERAL_DISK=${BM_FLAVOR_EPHEMERAL_DISK:-0}
BM_FLAVOR_SWAP=${BM_FLAVOR_SWAP:-1}
BM_FLAVOR_NAME=${BM_FLAVOR_NAME:-bm.small}
BM_FLAVOR_ID=${BM_FLAVOR_ID:-11}
BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH}
# Below this, we set some path and filenames.
# Defaults are probably sufficient.
BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder}
BM_HOST_CURRENT_KERNEL=$(uname -r)
BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-bm-deploy-$BM_HOST_CURRENT_KERNEL-initrd}
BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-bm-deploy-$BM_HOST_CURRENT_KERNEL-vmlinuz}
# If you need to add any extra flavors to the deploy ramdisk image
# eg, specific network drivers, specify them here
BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:-}
# set URL and version for google shell-in-a-box
BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/shellinabox-2.14.tar.gz}
# Functions
# ---------
# Check if baremetal is properly enabled
# Returns false if VIRT_DRIVER is not baremetal, or if ENABLED_SERVICES
# does not contain "baremetal"
function is_baremetal() {
if [[ "$ENABLED_SERVICES" =~ 'baremetal' && "$VIRT_DRIVER" = 'baremetal' ]]; then
return 0
fi
return 1
}
# Install diskimage-builder and shell-in-a-box
# so that we can build the deployment kernel & ramdisk
function prepare_baremetal_toolchain() {
git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH
local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX)
if [[ ! -e $DEST/$shellinabox_basename ]]; then
cd $DEST
wget $BM_SHELL_IN_A_BOX
fi
if [[ ! -d $DEST/${shellinabox_basename%%.tar.gz} ]]; then
cd $DEST
tar xzf $shellinabox_basename
fi
if [[ ! $(which shellinaboxd) ]]; then
cd $DEST/${shellinabox_basename%%.tar.gz}
./configure
make
sudo make install
fi
}
# prepare various directories needed by baremetal hypervisor
function configure_baremetal_nova_dirs() {
# ensure /tftpboot is prepared
sudo mkdir -p /tftpboot
sudo mkdir -p /tftpboot/pxelinux.cfg
sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/
sudo chown -R `whoami`:libvirtd /tftpboot
# ensure $NOVA_STATE_PATH/baremetal is prepared
sudo mkdir -p $NOVA_STATE_PATH/baremetal
sudo mkdir -p $NOVA_STATE_PATH/baremetal/console
sudo mkdir -p $NOVA_STATE_PATH/baremetal/dnsmasq
sudo touch $NOVA_STATE_PATH/baremetal/dnsmasq/dnsmasq-dhcp.host
sudo chown -R `whoami` $NOVA_STATE_PATH/baremetal
# ensure dnsmasq is installed but not running
# because baremetal driver will reconfigure and restart this as needed
if [ ! is_package_installed dnsmasq ]; then
install_package dnsmasq
fi
stop_service dnsmasq
}
# build deploy kernel+ramdisk, then upload them to glance
# this function sets BM_DEPLOY_KERNEL_ID and BM_DEPLOY_RAMDISK_ID
function upload_baremetal_deploy() {
token=$1
if [ ! -e $TOP_DIR/files/$BM_DEPLOY_KERNEL -a -e /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL ]; then
sudo cp /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL $TOP_DIR/files/$BM_DEPLOY_KERNEL
sudo chmod a+r $TOP_DIR/files/$BM_DEPLOY_KERNEL
fi
if [ ! -e $TOP_DIR/files/$BM_DEPLOY_RAMDISK ]; then
$BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \
-o $TOP_DIR/files/$BM_DEPLOY_RAMDISK -k $BM_HOST_CURRENT_KERNEL
fi
# load them into glance
BM_DEPLOY_KERNEL_ID=$(glance \
--os-auth-token $token \
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
--name $BM_DEPLOY_KERNEL \
--public --disk-format=aki \
< $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2)
BM_DEPLOY_RAMDISK_ID=$(glance \
--os-auth-token $token \
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
--name $BM_DEPLOY_RAMDISK \
--public --disk-format=ari \
< $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2)
}
# create a basic baremetal flavor, associated with deploy kernel & ramdisk
#
# Usage: create_baremetal_flavor <aki_uuid> <ari_uuid>
function create_baremetal_flavor() {
aki=$1
ari=$2
nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \
$BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU
nova-manage instance_type set_key \
--name=$BM_FLAVOR_NAME --key cpu_arch --value $BM_FLAVOR_ARCH
nova-manage instance_type set_key \
--name=$BM_FLAVOR_NAME --key deploy_kernel_id --value $aki
nova-manage instance_type set_key \
--name=$BM_FLAVOR_NAME --key deploy_ramdisk_id --value $ari
}
# pull run-time kernel/ramdisk out of disk image and load into glance
# note that $file is currently expected to be in qcow2 format
# Sets KERNEL_ID and RAMDISK_ID
#
# Usage: extract_and_upload_k_and_r_from_image $token $file
function extract_and_upload_k_and_r_from_image() {
token=$1
file=$2
image_name=$(basename "$file" ".qcow2")
# this call returns the file names as "$kernel,$ramdisk"
out=$($BM_IMAGE_BUILD_DIR/bin/disk-image-get-kernel \
-x -d $TOP_DIR/files -o bm-deploy -i $file)
if [ $? -ne 0 ]; then
die "Failed to get kernel and ramdisk from $file"
fi
XTRACE=$(set +o | grep xtrace)
set +o xtrace
out=$(echo "$out" | tail -1)
$XTRACE
OUT_KERNEL=${out%%,*}
OUT_RAMDISK=${out##*,}
# load them into glance
KERNEL_ID=$(glance \
--os-auth-token $token \
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
--name $image_name-kernel \
--public --disk-format=aki \
< $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2)
RAMDISK_ID=$(glance \
--os-auth-token $token \
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
--name $image_name-initrd \
--public --disk-format=ari \
< $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2)
}
# Re-implementation of devstack's "upload_image" function
#
# Takes the same parameters, but has some peculiarities which made it
# easier to create a separate method, rather than complicate the logic
# of the existing function.
function upload_baremetal_image() {
local image_url=$1
local token=$2
# Create a directory for the downloaded image tarballs.
mkdir -p $FILES/images
# Downloads the image (uec ami+aki style), then extracts it.
IMAGE_FNAME=`basename "$image_url"`
if [[ ! -f $FILES/$IMAGE_FNAME || \
"$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
wget -c $image_url -O $FILES/$IMAGE_FNAME
if [[ $? -ne 0 ]]; then
echo "Not found: $image_url"
return
fi
fi
local KERNEL=""
local RAMDISK=""
local DISK_FORMAT=""
local CONTAINER_FORMAT=""
case "$IMAGE_FNAME" in
*.tar.gz|*.tgz)
# Extract ami and aki files
[ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] &&
IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" ||
IMAGE_NAME="${IMAGE_FNAME%.tgz}"
xdir="$FILES/images/$IMAGE_NAME"
rm -Rf "$xdir";
mkdir "$xdir"
tar -zxf $FILES/$IMAGE_FNAME -C "$xdir"
KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do
[ -f "$f" ] && echo "$f" && break; done; true)
RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do
[ -f "$f" ] && echo "$f" && break; done; true)
IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do
[ -f "$f" ] && echo "$f" && break; done; true)
if [[ -z "$IMAGE_NAME" ]]; then
IMAGE_NAME=$(basename "$IMAGE" ".img")
fi
DISK_FORMAT=ami
CONTAINER_FORMAT=ami
;;
*.qcow2)
IMAGE="$FILES/${IMAGE_FNAME}"
IMAGE_NAME=$(basename "$IMAGE" ".qcow2")
DISK_FORMAT=qcow2
CONTAINER_FORMAT=bare
;;
*) echo "Do not know what to do with $IMAGE_FNAME"; false;;
esac
if [ "$CONTAINER_FORMAT" = "bare" ]; then
extract_and_upload_k_and_r_from_image $token $IMAGE
elif [ "$CONTAINER_FORMAT" = "ami" ]; then
KERNEL_ID=$(glance \
--os-auth-token $token \
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
--name "$IMAGE_NAME-kernel" --public \
--container-format aki \
--disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
RAMDISK_ID=$(glance \
--os-auth-token $token \
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
--name "$IMAGE_NAME-ramdisk" --public \
--container-format ari \
--disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
else
# TODO(deva): add support for other image types
return
fi
glance \
--os-auth-token $token \
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
--name "${IMAGE_NAME%.img}" --public \
--container-format $CONTAINER_FORMAT \
--disk-format $DISK_FORMAT \
${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
# override DEFAULT_IMAGE_NAME so that tempest can find the image
# that we just uploaded in glance
DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}"
}
function clear_baremetal_of_all_nodes() {
list=$(nova-baremetal-manage node list | tail -n +2 | awk '{print $1}' )
for node in $list
do
nova-baremetal-manage node delete $node
done
list=$(nova-baremetal-manage interface list | tail -n +2 | awk '{print $1}' )
for iface in $list
do
nova-baremetal-manage interface delete $iface
done
}
# inform nova-baremetal about nodes, MACs, etc
# Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified
#
# Usage: add_baremetal_node <first_mac> <second_mac>
function add_baremetal_node() {
mac_1=${1:-$BM_FIRST_MAC}
mac_2=${2:-$BM_SECOND_MAC}
id=$(nova-baremetal-manage node create \
--host=$BM_HOSTNAME --prov_mac=$mac_1 \
--cpus=$BM_FLAVOR_CPU --memory_mb=$BM_FLAVOR_RAM \
--local_gb=$BM_FLAVOR_ROOT_DISK --terminal_port=0 \
--pm_address=$BM_PM_ADDR --pm_user=$BM_PM_USER --pm_password=$BM_PM_PASS \
)
[ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node"
id2=$(nova-baremetal-manage interface create \
--node_id=$id --mac_address=$mac_2 --datapath_id=0 --port_no=0 \
)
[ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id"
}
# Restore xtrace
$XTRACE

View File

@ -214,6 +214,11 @@ function configure_nova() {
fi
fi
# Prepare directories and packages for baremetal driver
if is_baremetal; then
configure_baremetal_nova_dirs
fi
if is_service_enabled quantum && is_quantum_ovs_base_plugin "$Q_PLUGIN" && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then
# Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
cat <<EOF | sudo tee -a $QEMU_CONF
@ -356,6 +361,10 @@ function create_nova_conf() {
local dburl
database_connection_url dburl nova
add_nova_opt "sql_connection=$dburl"
if is_baremetal; then
database_connection_url dburl nova_bm
add_nova_opt "baremetal_sql_connection=$dburl"
fi
add_nova_opt "libvirt_type=$LIBVIRT_TYPE"
add_nova_opt "libvirt_cpu_mode=none"
add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x"
@ -441,12 +450,23 @@ function init_nova() {
# (Re)create nova database
$NOVA_BIN_DIR/nova-manage db sync
# (Re)create nova baremetal database
if is_baremetal; then
recreate_database nova_bm latin1
$NOVA_BIN_DIR/nova-baremetal-manage db sync
fi
fi
# Create cache dir
sudo mkdir -p $NOVA_AUTH_CACHE_DIR
sudo chown `whoami` $NOVA_AUTH_CACHE_DIR
rm -f $NOVA_AUTH_CACHE_DIR/*
# Create the keys folder
sudo mkdir -p ${NOVA_STATE_PATH}/keys
# make sure we own NOVA_STATE_PATH and all subdirs
sudo chown -R `whoami` ${NOVA_STATE_PATH}
}
# install_novaclient() - Collect source and prepare

View File

@ -322,6 +322,7 @@ source $TOP_DIR/lib/ceilometer
source $TOP_DIR/lib/heat
source $TOP_DIR/lib/quantum
source $TOP_DIR/lib/tempest
source $TOP_DIR/lib/baremetal
# Set the destination directories for OpenStack projects
HORIZON_DIR=$DEST/horizon
@ -393,6 +394,13 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then
# Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
GUEST_INTERFACE_DEFAULT=eth1
elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
PUBLIC_INTERFACE_DEFAULT=eth0
FLAT_NETWORK_BRIDGE_DEFAULT=br100
FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-False}
NET_MAN=${NET_MAN:-FlatManager}
STUB_NETWORK=${STUB_NETWORK:-False}
else
PUBLIC_INTERFACE_DEFAULT=br100
FLAT_NETWORK_BRIDGE_DEFAULT=br100
@ -404,6 +412,7 @@ NET_MAN=${NET_MAN:-FlatDHCPManager}
EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-True}
# Test floating pool and range are used for testing. They are defined
# here until the admin APIs can replace nova-manage
@ -1009,9 +1018,9 @@ if is_service_enabled n-net q-dhcp; then
# Delete traces of nova networks from prior runs
sudo killall dnsmasq || true
clean_iptables
rm -rf $NOVA_STATE_PATH/networks
mkdir -p $NOVA_STATE_PATH/networks
rm -rf ${NOVA_STATE_PATH}/networks
sudo mkdir -p ${NOVA_STATE_PATH}/networks
sudo chown -R ${USER} ${NOVA_STATE_PATH}/networks
# Force IP forwarding on, just on case
sudo sysctl -w net.ipv4.ip_forward=1
fi
@ -1092,6 +1101,10 @@ if is_service_enabled nova; then
# Need to avoid crash due to new firewall support
XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER"
# OpenVZ
# ------
elif [ "$VIRT_DRIVER" = 'openvz' ]; then
echo_summary "Using OpenVZ virtualization driver"
# TODO(deva): OpenVZ driver does not yet work if compute_driver is set here.
@ -1100,6 +1113,25 @@ if is_service_enabled nova; then
add_nova_opt "connection_type=openvz"
LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
# Bare Metal
# ----------
elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
echo_summary "Using BareMetal driver"
add_nova_opt "compute_driver=nova.virt.baremetal.driver.BareMetalDriver"
LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"}
add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
add_nova_opt "baremetal_driver=$BM_DRIVER"
add_nova_opt "baremetal_tftp_root=/tftpboot"
add_nova_opt "instance_type_extra_specs=cpu_arch:$BM_CPU_ARCH"
add_nova_opt "power_manager=$BM_POWER_MANAGER"
add_nova_opt "scheduler_host_manager=nova.scheduler.baremetal_host_manager.BaremetalHostManager"
add_nova_opt "scheduler_default_filters=AllHostsFilter"
# Default
# -------
else
echo_summary "Using libvirt virtualization driver"
add_nova_opt "compute_driver=libvirt.LibvirtDriver"
@ -1108,6 +1140,12 @@ if is_service_enabled nova; then
fi
fi
# Extra things to prepare nova for baremetal, before nova starts
if is_service_enabled nova && is_baremetal; then
echo_summary "Preparing for nova baremetal"
prepare_baremetal_toolchain
configure_baremetal_nova_dirs
fi
# Launch Services
# ===============
@ -1227,19 +1265,56 @@ fi
# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz
if is_service_enabled g-reg; then
echo_summary "Uploading images"
TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
# Option to upload legacy ami-tty, which works with xenserver
if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz"
fi
if is_baremetal; then
echo_summary "Creating and uploading baremetal images"
for image_url in ${IMAGE_URLS//,/ }; do
upload_image $image_url $TOKEN
done
# build and upload separate deploy kernel & ramdisk
upload_baremetal_deploy $TOKEN
# upload images, separating out the kernel & ramdisk for PXE boot
for image_url in ${IMAGE_URLS//,/ }; do
upload_baremetal_image $image_url $TOKEN
done
else
echo_summary "Uploading images"
# Option to upload legacy ami-tty, which works with xenserver
if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz"
fi
for image_url in ${IMAGE_URLS//,/ }; do
upload_image $image_url $TOKEN
done
fi
fi
# If we are running nova with baremetal driver, there are a few
# last-mile configuration bits to attend to, which must happen
# after n-api and n-sch have started.
# Also, creating the baremetal flavor must happen after images
# are loaded into glance, though just knowing the IDs is sufficient here
if is_service_enabled nova && is_baremetal; then
# create special flavor for baremetal if we know what images to associate
[[ -n "$BM_DEPLOY_KERNEL_ID" ]] && [[ -n "$BM_DEPLOY_RAMDISK_ID" ]] && \
create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID
# otherwise user can manually add it later by calling nova-baremetal-manage
# otherwise user can manually add it later by calling nova-baremetal-manage
[[ -n "$BM_FIRST_MAC" ]] && add_baremetal_node
# NOTE: we do this here to ensure that our copy of dnsmasq is running
sudo pkill dnsmasq || true
sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \
--dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \
--interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE
# ensure callback daemon is running
sudo pkill nova-baremetal-deploy-helper || true
screen_it baremetal "nova-baremetal-deploy-helper"
fi
# Configure Tempest last to ensure that the runtime configuration of
# the various OpenStack services can be queried.

View File

@ -111,6 +111,10 @@ HEATCLIENT_BRANCH=master
RYU_REPO=https://github.com/osrg/ryu.git
RYU_BRANCH=master
# diskimage-builder
BM_IMAGE_BUILD_REPO=https://github.com/stackforge/diskimage-builder.git
BM_IMAGE_BUILD_BRANCH=master
# Nova hypervisor configuration. We default to libvirt with **kvm** but will
# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
# also install an **LXC** or **OpenVZ** based system.