Files
devstack/stackrc
Sean Dague cc52406a78 use released library versions by default
This patch provides a new path for installing libraries in devstack so
that it's possible to either test with upstream released libraries, or
with git versions of individual libraries.

Libraries are added by name to 3 associative arrays GITREPO,
GITBRANCH, GITDIR. When we get to the library install phase we inspect
LIBS_FROM_GIT and look for libraries by name (i.e. "oslo.config") and
if they exist we'll clone and install those libraries from
git. Otherwise we won't, and just let pip pull them as dependencies
when it needs them.

This patch provides the conversion of the oslo libraries, including
pbr.

Devstack-gate jobs for these libraries will need to change to support
actually forward testing their content.

Change-Id: I6161fa3194dbe8fbc25b6ee0e2fe3cc722a1cea4
2014-10-01 16:20:22 -04:00

523 lines
20 KiB
Plaintext

# stackrc
#
# Find the other rc files
RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
# Destination path for installation
DEST=/opt/stack
# Destination for working data
DATA_DIR=${DEST}/data
# Destination for status files
SERVICE_DIR=${DEST}/status
# Determine stack user
if [[ $EUID -eq 0 ]]; then
STACK_USER=stack
else
STACK_USER=$(whoami)
fi
# Specify region name Region
REGION_NAME=${REGION_NAME:-RegionOne}
# Specify which services to launch. These generally correspond to
# screen tabs. To change the default list, use the ``enable_service`` and
# ``disable_service`` functions in ``local.conf``.
# For example, to enable Swift add this to ``local.conf``:
# enable_service s-proxy s-object s-container s-account
# In order to enable Neutron (a single node setup) add the following
# settings in `` localrc``:
# disable_service n-net
# enable_service q-svc
# enable_service q-agt
# enable_service q-dhcp
# enable_service q-l3
# enable_service q-meta
# # Optional, to enable tempest configuration as part of devstack
# enable_service tempest
# this allows us to pass ENABLED_SERVICES
if [[ -z "$ENABLED_SERVICES" ]]; then
# core compute (glance / keystone / nova (+ nova-network))
ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,n-sch,n-novnc,n-xvnc,n-cauth
# cinder
ENABLED_SERVICES+=,c-sch,c-api,c-vol
# heat
ENABLED_SERVICES+=,h-eng,h-api,h-api-cfn,h-api-cw
# dashboard
ENABLED_SERVICES+=,horizon
# additional services
ENABLED_SERVICES+=,rabbit,tempest,mysql
fi
# SQLAlchemy supports multiple database drivers for each database server
# type. For example, deployer may use MySQLdb, MySQLConnector, or oursql
# to access MySQL database.
#
# When defined, the variable controls which database driver is used to
# connect to database server. Otherwise using default driver defined for
# each database type.
#
# You can find the list of currently supported drivers for each database
# type at: http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html
# SQLALCHEMY_DATABASE_DRIVER="mysqldb"
# Global toggle for enabling services under mod_wsgi. If this is set to
# ``True`` all services that use HTTPD + mod_wsgi as the preferred method of
# deployment, will be deployed under Apache. If this is set to ``False`` all
# services will rely on the local toggle variable (e.g. ``KEYSTONE_USE_MOD_WSGI``)
ENABLE_HTTPD_MOD_WSGI_SERVICES=True
# Tell Tempest which services are available. The default is set here as
# Tempest falls late in the configuration sequence. This differs from
# ``ENABLED_SERVICES`` in that the project names are used here rather than
# the service names, i.e.: TEMPEST_SERVICES="key,glance,nova"
TEMPEST_SERVICES=""
# Set the default Nova APIs to enable
NOVA_ENABLED_APIS=ec2,osapi_compute,metadata
# Configure Identity API version: 2.0, 3
IDENTITY_API_VERSION=2.0
# Whether to use 'dev mode' for screen windows. Dev mode works by
# stuffing text into the screen windows so that a developer can use
# ctrl-c, up-arrow, enter to restart the service. Starting services
# this way is slightly unreliable, and a bit slower, so this can
# be disabled for automated testing by setting this value to False.
USE_SCREEN=True
# allow local overrides of env variables, including repo config
if [[ -f $RC_DIR/localrc ]]; then
# Old-style user-supplied config
source $RC_DIR/localrc
elif [[ -f $RC_DIR/.localrc.auto ]]; then
# New-style user-supplied config extracted from local.conf
source $RC_DIR/.localrc.auto
fi
# This can be used to turn database query logging on and off
# (currently only implemented for MySQL backend)
DATABASE_QUERY_LOGGING=$(trueorfalse True $DATABASE_QUERY_LOGGING)
# Set a timeout for git operations. If git is still running when the
# timeout expires, the command will be retried up to 3 times. This is
# in the format for timeout(1);
#
# DURATION is a floating point number with an optional suffix: 's'
# for seconds (the default), 'm' for minutes, 'h' for hours or 'd'
# for days.
#
# Zero disables timeouts
GIT_TIMEOUT=${GIT_TIMEOUT:-0}
# Repositories
# ------------
# Base GIT Repo URL
# Another option is http://review.openstack.org/p
GIT_BASE=${GIT_BASE:-git://git.openstack.org}
# metering service
CEILOMETER_REPO=${CEILOMETER_REPO:-${GIT_BASE}/openstack/ceilometer.git}
CEILOMETER_BRANCH=${CEILOMETER_BRANCH:-master}
# ceilometer client library
CEILOMETERCLIENT_REPO=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git}
CEILOMETERCLIENT_BRANCH=${CEILOMETERCLIENT_BRANCH:-master}
# volume service
CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git}
CINDER_BRANCH=${CINDER_BRANCH:-master}
# volume client
CINDERCLIENT_REPO=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git}
CINDERCLIENT_BRANCH=${CINDERCLIENT_BRANCH:-master}
# diskimage-builder
DIB_REPO=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
DIB_BRANCH=${DIB_BRANCH:-master}
# image catalog service
GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git}
GLANCE_BRANCH=${GLANCE_BRANCH:-master}
GLANCE_STORE_REPO=${GLANCE_STORE_REPO:-${GIT_BASE}/openstack/glance_store.git}
GLANCE_STORE_BRANCH=${GLANCE_STORE_BRANCH:-master}
# python glance client library
GLANCECLIENT_REPO=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git}
GLANCECLIENT_BRANCH=${GLANCECLIENT_BRANCH:-master}
# heat service
HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git}
HEAT_BRANCH=${HEAT_BRANCH:-master}
# python heat client library
HEATCLIENT_REPO=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git}
HEATCLIENT_BRANCH=${HEATCLIENT_BRANCH:-master}
# heat-cfntools server agent
HEAT_CFNTOOLS_REPO=${HEAT_CFNTOOLS_REPO:-${GIT_BASE}/openstack/heat-cfntools.git}
HEAT_CFNTOOLS_BRANCH=${HEAT_CFNTOOLS_BRANCH:-master}
# heat example templates and elements
HEAT_TEMPLATES_REPO=${HEAT_TEMPLATES_REPO:-${GIT_BASE}/openstack/heat-templates.git}
HEAT_TEMPLATES_BRANCH=${HEAT_TEMPLATES_BRANCH:-master}
# django powered web control panel for openstack
HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git}
HORIZON_BRANCH=${HORIZON_BRANCH:-master}
# django openstack_auth library
HORIZONAUTH_REPO=${HORIZONAUTH_REPO:-${GIT_BASE}/openstack/django_openstack_auth.git}
HORIZONAUTH_BRANCH=${HORIZONAUTH_BRANCH:-master}
# baremetal provisioning service
IRONIC_REPO=${IRONIC_REPO:-${GIT_BASE}/openstack/ironic.git}
IRONIC_BRANCH=${IRONIC_BRANCH:-master}
IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironic-python-agent.git}
IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-master}
# ironic client
IRONICCLIENT_REPO=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git}
IRONICCLIENT_BRANCH=${IRONICCLIENT_BRANCH:-master}
# unified auth system (manages accounts/tokens)
KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git}
KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master}
# python keystone client library to nova that horizon uses
KEYSTONECLIENT_REPO=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git}
KEYSTONECLIENT_BRANCH=${KEYSTONECLIENT_BRANCH:-master}
# keystone middleware
KEYSTONEMIDDLEWARE_REPO=${KEYSTONEMIDDLEWARE_REPO:-${GIT_BASE}/openstack/keystonemiddleware.git}
KEYSTONEMIDDLEWARE_BRANCH=${KEYSTONEMIDDLEWARE_BRANCH:-master}
# compute service
NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
NOVA_BRANCH=${NOVA_BRANCH:-master}
# python client library to nova that horizon (and others) use
NOVACLIENT_REPO=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git}
NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master}
# os-apply-config configuration template tool
OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git}
OAC_BRANCH=${OAC_BRANCH:-master}
# os-collect-config configuration agent
OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git}
OCC_BRANCH=${OCC_BRANCH:-master}
# consolidated openstack python client
OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git}
OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master}
# os-refresh-config configuration run-parts tool
ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git}
ORC_BRANCH=${ORC_BRANCH:-master}
# cliff command line framework
GITREPO["cliff"]=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git}
GITBRANCH["cliff"]=${CLIFF_BRANCH:-master}
# oslo.concurrency
GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git}
GITBRANCH["olso.concurrency"]=${OSLOCON_BRANCH:-master}
# oslo.config
GITREPO["oslo.config"]=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git}
GITBRANCH["oslo.config"]=${OSLOCFG_BRANCH:-master}
# oslo.db
GITREPO["olso.db"]=${OSLODB_REPO:-${GIT_BASE}/openstack/oslo.db.git}
GITBRANCH["olso.db"]=${OSLODB_BRANCH:-master}
# oslo.i18n
GITREPO["olso.i18n"]=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git}
GITBRANCH["olso.i18n"]=${OSLOI18N_BRANCH:-master}
# oslo.log
GITREPO["olso.log"]=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git}
GITBRANCH["olso.log"]=${OSLOLOG_BRANCH:-master}
# oslo.messaging
GITREPO["olso.messaging"]=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git}
GITBRANCH["olso.messaging"]=${OSLOMSG_BRANCH:-master}
# oslo.middleware
GITREPO["oslo.middleware"]=${OSLOMID_REPO:-${GIT_BASE}/openstack/oslo.middleware.git}
GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-master}
# oslo.rootwrap
GITREPO["olso.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
GITBRANCH["olso.rootwrap"]=${OSLORWRAP_BRANCH:-master}
# oslo.serialization
GITREPO["olso.serialization"]=${OSLOSERIALIZATION_REPO:-${GIT_BASE}/openstack/oslo.serialization.git}
GITBRANCH["olso.serialization"]=${OSLOSERIALIZATION_BRANCH:-master}
# oslo.utils
GITREPO["olso.utils"]=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git}
GITBRANCH["olso.utils"]=${OSLOUTILS_BRANCH:-master}
# oslo.vmware
GITREPO["olso.vmware"]=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git}
GITBRANCH["olso.vmware"]=${OSLOVMWARE_BRANCH:-master}
# pycadf auditing library
GITREPO["pycadf"]=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git}
GITBRANCH["pycadf"]=${PYCADF_BRANCH:-master}
# stevedore plugin manager
GITREPO["stevedore"]=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git}
GITBRANCH["stevedore"]=${STEVEDORE_BRANCH:-master}
# taskflow plugin manager
GITREPO["taskflow"]=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git}
GITBRANCH["taskflow"]=${TASKFLOW_BRANCH:-master}
# pbr drives the setuptools configs
GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git}
GITBRANCH["pbr"]=${PBR_BRANCH:-master}
# neutron service
NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git}
NEUTRON_BRANCH=${NEUTRON_BRANCH:-master}
# neutron client
NEUTRONCLIENT_REPO=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git}
NEUTRONCLIENT_BRANCH=${NEUTRONCLIENT_BRANCH:-master}
# consolidated openstack requirements
REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git}
REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master}
# storage service
SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
SWIFT_BRANCH=${SWIFT_BRANCH:-master}
SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/stackforge/swift3.git}
SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
# python swift client library
SWIFTCLIENT_REPO=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git}
SWIFTCLIENT_BRANCH=${SWIFTCLIENT_BRANCH:-master}
# Tempest test suite
TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git}
TEMPEST_BRANCH=${TEMPEST_BRANCH:-master}
TEMPEST_LIB_REPO=${TEMPEST_LIB_REPO:-${GIT_BASE}/openstack/tempest-lib.git}
TEMPEST_LIB_BRANCH=${TEMPEST_LIB_BRANCH:-master}
# Tripleo elements for diskimage-builder images
TIE_REPO=${TIE_REPO:-${GIT_BASE}/openstack/tripleo-image-elements.git}
TIE_BRANCH=${TIE_BRANCH:-master}
# a websockets/html5 or flash powered VNC console for vm instances
NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
NOVNC_BRANCH=${NOVNC_BRANCH:-master}
# ryu service
RYU_REPO=${RYU_REPO:-https://github.com/osrg/ryu.git}
RYU_BRANCH=${RYU_BRANCH:-master}
# a websockets/html5 or flash powered SPICE console for vm instances
SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
SPICE_BRANCH=${SPICE_BRANCH:-master}
# trove service
TROVE_REPO=${TROVE_REPO:-${GIT_BASE}/openstack/trove.git}
TROVE_BRANCH=${TROVE_BRANCH:-master}
# trove client library test
TROVECLIENT_REPO=${TROVECLIENT_REPO:-${GIT_BASE}/openstack/python-troveclient.git}
TROVECLIENT_BRANCH=${TROVECLIENT_BRANCH:-master}
# stackforge libraries that are used by OpenStack core services
# wsme
WSME_REPO=${WSME_REPO:-${GIT_BASE}/stackforge/wsme.git}
WSME_BRANCH=${WSME_BRANCH:-master}
# pecan
PECAN_REPO=${PECAN_REPO:-${GIT_BASE}/stackforge/pecan.git}
PECAN_BRANCH=${PECAN_BRANCH:-master}
# sqlalchemy-migrate
SQLALCHEMY_MIGRATE_REPO=${SQLALCHEMY_MIGRATE_REPO:-${GIT_BASE}/stackforge/sqlalchemy-migrate.git}
SQLALCHEMY_MIGRATE_BRANCH=${SQLALCHEMY_MIGRATE_BRANCH:-master}
# Nova hypervisor configuration. We default to libvirt with **kvm** but will
# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
# also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core
# is installed, the default will be XenAPI
DEFAULT_VIRT_DRIVER=libvirt
is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver
VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER}
case "$VIRT_DRIVER" in
ironic|libvirt)
LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
if [[ "$os_VENDOR" =~ (Debian) ]]; then
LIBVIRT_GROUP=libvirt
else
LIBVIRT_GROUP=libvirtd
fi
;;
fake)
NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1}
;;
xenserver)
# Xen config common to nova and neutron
XENAPI_USER=${XENAPI_USER:-"root"}
# This user will be used for dom0 - domU communication
# should be able to log in to dom0 without a password
# will be used to install the plugins
DOMZERO_USER=${DOMZERO_USER:-"domzero"}
;;
*)
;;
esac
# Images
# ------
# Specify a comma-separated list of images to download and install into glance.
# Supported urls here are:
# * "uec-style" images:
# If the file ends in .tar.gz, uncompress the tarball and and select the first
# .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel
# and "*-initrd*" as the ramdisk
# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz
# * disk image (*.img,*.img.gz)
# if file ends in .img, then it will be uploaded and registered as a to
# glance as a disk image. If it ends in .gz, it is uncompressed first.
# example:
# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img
# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz
# * OpenVZ image:
# OpenVZ uses its own format of image, and does not support UEC style images
#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
CIRROS_VERSION=${CIRROS_VERSION:-"0.3.2"}
CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
# which may be set in ``localrc``. Also allow ``DEFAULT_IMAGE_NAME`` and
# ``IMAGE_URLS`` to be set directly in ``localrc``.
case "$VIRT_DRIVER" in
openvz)
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64}
IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};;
libvirt)
case "$LIBVIRT_TYPE" in
lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs}
IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz"};;
*) # otherwise, use the uec style image (with kernel, ramdisk, disk)
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz"};;
esac
;;
vsphere)
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk}
IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.2-i386-disk.vmdk"};;
xenserver)
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk}
IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"}
IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";;
ironic)
# Ironic can do both partition and full disk images, depending on the driver
if [[ "$IRONIC_DEPLOY_DRIVER" == "agent_ssh" ]]; then
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-disk}
else
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec}
fi
IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz"}
IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img";;
*) # Default to Cirros with kernel, ramdisk and disk image
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz"};;
esac
# Use 64bit fedora image if heat is enabled
if [[ "$ENABLED_SERVICES" =~ 'h-api' ]]; then
case "$VIRT_DRIVER" in
libvirt|baremetal|ironic)
HEAT_CFN_IMAGE_URL=${HEAT_CFN_IMAGE_URL:-"http://dl.fedoraproject.org/pub/alt/openstack/20/x86_64/Fedora-x86_64-20-20140618-sda.qcow2"}
IMAGE_URLS+=",$HEAT_CFN_IMAGE_URL"
;;
*)
;;
esac
fi
# Trove needs a custom image for it's work
if [[ "$ENABLED_SERVICES" =~ 'tr-api' ]]; then
case "$VIRT_DRIVER" in
libvirt|baremetal|ironic|xenapi)
TROVE_GUEST_IMAGE_URL=${TROVE_GUEST_IMAGE_URL:-"http://tarballs.openstack.org/trove/images/ubuntu_mysql.qcow2/ubuntu_mysql.qcow2"}
IMAGE_URLS+=",${TROVE_GUEST_IMAGE_URL}"
;;
*)
;;
esac
fi
# Staging Area for New Images, have them here for at least 24hrs for nodepool
# to cache them otherwise the failure rates in the gate are too high
PRECACHE_IMAGES=$(trueorfalse False $PRECACHE_IMAGES)
if [[ "$PRECACHE_IMAGES" == "True" ]]; then
# staging in update for nodepool
IMAGE_URL="http://dl.fedoraproject.org/pub/alt/openstack/20/x86_64/Fedora-x86_64-20-20140618-sda.qcow2"
if ! [[ "$IMAGE_URLS" =~ "$IMAGE_URL" ]]; then
IMAGE_URLS+=",$IMAGE_URL"
fi
fi
# 10Gb default volume backing file size
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M}
# Prefixes for volume and instance names
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
# Set default port for nova-objectstore
S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333}
# Common network names
PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"}
# Compatibility until it's eradicated from CI
USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
# Set default screen name
SCREEN_NAME=${SCREEN_NAME:-stack}
# Do not install packages tagged with 'testonly' by default
INSTALL_TESTONLY_PACKAGES=${INSTALL_TESTONLY_PACKAGES:-False}
# Undo requirements changes by global requirements
UNDO_REQUIREMENTS=${UNDO_REQUIREMENTS:-True}
# Allow the use of an alternate protocol (such as https) for service endpoints
SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
# Sets the maximum number of workers for most services to reduce
# the memory used where there are a large number of CPUs present
# (the default number of workers for many services is the number of CPUs)
# Also sets the minimum number of workers to 2.
API_WORKERS=${API_WORKERS:=$(( ($(nproc)/2)<2 ? 2 : ($(nproc)/2) ))}
# Local variables:
# mode: shell-script
# End: