diff --git a/lib/cinder b/lib/cinder
index ec491dda72..250c0291ff 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -3,6 +3,7 @@
 
 # Dependencies:
 # - functions
+# - DEST, DATA_DIR must be defined
 # - KEYSTONE_AUTH_* must be defined
 # SERVICE_{TENANT_NAME|PASSWORD} must be defined
 
@@ -25,14 +26,17 @@ set -o xtrace
 
 # set up default directories
 CINDER_DIR=$DEST/cinder
-if [ -d $CINDER_DIR/bin ] ; then
+CINDERCLIENT_DIR=$DEST/python-cinderclient
+CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder}
+CINDER_CONF_DIR=/etc/cinder
+CINDER_CONF=$CINDER_CONF_DIR/cinder.conf
+
+# Support entry points installation of console scripts
+if [[ -d $CINDER_DIR/bin ]]; then
     CINDER_BIN_DIR=$CINDER_DIR/bin
 else
     CINDER_BIN_DIR=/usr/local/bin
 fi
-CINDERCLIENT_DIR=$DEST/python-cinderclient
-CINDER_CONF_DIR=/etc/cinder
-CINDER_CONF=$CINDER_CONF_DIR/cinder.conf
 
 # Name of the lvm volume group to use/create for iscsi volumes
 VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
@@ -112,6 +116,7 @@ function configure_cinder() {
     iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
     iniset $CINDER_CONF DEFAULT root_helper "sudo ${CINDER_ROOTWRAP}"
     iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.openstack.volume.contrib.standard_extensions
+    iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
 
     if is_service_enabled qpid ; then
         iniset $CINDER_CONF DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid
@@ -162,7 +167,7 @@ function init_cinder() {
             if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi
         fi
 
-        mkdir -p $CINDER_DIR/volumes
+        mkdir -p $CINDER_STATE_PATH/volumes
 
         if sudo vgs $VOLUME_GROUP; then
             if [[ "$os_PACKAGE" = "rpm" ]]; then
diff --git a/lib/n-vol b/lib/n-vol
index a9d1c7d12e..99b8cb17cd 100644
--- a/lib/n-vol
+++ b/lib/n-vol
@@ -3,7 +3,9 @@
 
 # Dependencies:
 # - functions
+# - DATA_DIR must be defined
 # - KEYSTONE_AUTH_* must be defined
+# - NOVA_DIR, NOVA_BIN_DIR, NOVA_STATE_PATH must be defined
 # SERVICE_{TENANT_NAME|PASSWORD} must be defined
 # _configure_tgt_for_config_d() from lib/cinder
 
@@ -64,7 +66,7 @@ function init_nvol() {
         if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi
     fi
 
-    mkdir -p $NOVA_DIR/volumes
+    mkdir -p $NOVA_STATE_PATH/volumes
 
     if sudo vgs $VOLUME_GROUP; then
         if [[ "$os_PACKAGE" = "rpm" ]]; then
@@ -97,7 +99,7 @@ function start_nvol() {
     if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then
         _configure_tgt_for_config_d
        sudo mkdir -p /etc/tgt/conf.d
-       echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf
+       echo "include $NOVA_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf
     fi
 
     if [[ "$os_PACKAGE" = "deb" ]]; then
@@ -109,7 +111,7 @@ function start_nvol() {
         restart_service tgtd
     fi
 
-    screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume"
+    screen_it n-vol "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-volume"
 }
 
 # stop_nvol() - Stop running processes (non-screen)
diff --git a/stack.sh b/stack.sh
index c352a2752c..fa1bf46efd 100755
--- a/stack.sh
+++ b/stack.sh
@@ -317,11 +317,7 @@ source $TOP_DIR/lib/heat
 source $TOP_DIR/lib/quantum
 
 # Set the destination directories for OpenStack projects
-NOVA_DIR=$DEST/nova
 HORIZON_DIR=$DEST/horizon
-GLANCE_DIR=$DEST/glance
-GLANCECLIENT_DIR=$DEST/python-glanceclient
-NOVACLIENT_DIR=$DEST/python-novaclient
 OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
 NOVNC_DIR=$DEST/noVNC
 SWIFT_DIR=$DEST/swift
@@ -330,6 +326,33 @@ SWIFTCLIENT_DIR=$DEST/python-swiftclient
 QUANTUM_DIR=$DEST/quantum
 QUANTUM_CLIENT_DIR=$DEST/python-quantumclient
 
+# Nova defaults
+NOVA_DIR=$DEST/nova
+NOVACLIENT_DIR=$DEST/python-novaclient
+NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova}
+# INSTANCES_PATH is the previous name for this
+NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}}
+
+# Support entry points installation of console scripts
+if [[ -d $NOVA_DIR/bin ]]; then
+    NOVA_BIN_DIR=$NOVA_DIR/bin
+else
+    NOVA_BIN_DIR=/usr/local/bin
+fi
+
+# Glance defaults
+GLANCE_DIR=$DEST/glance
+GLANCECLIENT_DIR=$DEST/python-glanceclient
+GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache}
+GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images}
+
+# Support entry points installation of console scripts
+if [[ -d $GLANCE_DIR/bin ]]; then
+    GLANCE_BIN_DIR=$GLANCE_DIR/bin
+else
+    GLANCE_BIN_DIR=/usr/local/bin
+fi
+
 # Default Quantum Plugin
 Q_PLUGIN=${Q_PLUGIN:-openvswitch}
 # Default Quantum Port
@@ -1062,13 +1085,11 @@ if is_service_enabled g-reg; then
     fi
     sudo chown `whoami` $GLANCE_CONF_DIR
 
-    GLANCE_IMAGE_DIR=$DEST/glance/images
     # Delete existing images
     rm -rf $GLANCE_IMAGE_DIR
     mkdir -p $GLANCE_IMAGE_DIR
 
-    GLANCE_CACHE_DIR=$DEST/glance/cache
-    # Delete existing images
+    # Delete existing cache
     rm -rf $GLANCE_CACHE_DIR
     mkdir -p $GLANCE_CACHE_DIR
 
@@ -1144,7 +1165,7 @@ if is_service_enabled g-reg; then
     GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
     cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
 
-    $GLANCE_DIR/bin/glance-manage db_sync
+    $GLANCE_BIN_DIR/glance-manage db_sync
 
 fi
 
@@ -1613,15 +1634,15 @@ EOF'
     # ~~~~~~~~~~~~~~~~
 
     # Nova stores each instance in its own directory.
-    mkdir -p $NOVA_DIR/instances
+    mkdir -p $NOVA_INSTANCES_PATH
 
     # You can specify a different disk to be mounted and used for backing the
     # virtual machines.  If there is a partition labeled nova-instances we
     # mount it (ext filesystems can be labeled via e2label).
     if [ -L /dev/disk/by-label/nova-instances ]; then
-        if ! mount -n | grep -q $NOVA_DIR/instances; then
-            sudo mount -L nova-instances $NOVA_DIR/instances
-            sudo chown -R `whoami` $NOVA_DIR/instances
+        if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then
+            sudo mount -L nova-instances $NOVA_INSTANCES_PATH
+            sudo chown -R `whoami` $NOVA_INSTANCES_PATH
         fi
     fi
 
@@ -1640,15 +1661,15 @@ EOF'
     sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | sudo iscsiadm --mode node --op delete || true
 
     # Clean out the instances directory.
-    sudo rm -rf $NOVA_DIR/instances/*
+    sudo rm -rf $NOVA_INSTANCES_PATH/*
 fi
 
 if is_service_enabled n-net q-dhcp; then
     # Delete traces of nova networks from prior runs
     sudo killall dnsmasq || true
     clean_iptables
-    rm -rf $NOVA_DIR/networks
-    mkdir -p $NOVA_DIR/networks
+    rm -rf $NOVA_STATE_PATH/networks
+    mkdir -p $NOVA_STATE_PATH/networks
 
     # Force IP forwarding on, just on case
     sudo sysctl -w net.ipv4.ip_forward=1
@@ -1918,13 +1939,6 @@ elif is_service_enabled n-vol; then
     init_nvol
 fi
 
-# Support entry points installation of console scripts
-if [ -d $NOVA_DIR/bin ] ; then
-    NOVA_BIN_DIR=$NOVA_DIR/bin
-else
-    NOVA_BIN_DIR=/usr/local/bin
-fi
-
 NOVA_CONF=nova.conf
 function add_nova_opt {
     echo "$1" >> $NOVA_CONF_DIR/$NOVA_CONF
@@ -2016,8 +2030,11 @@ elif [ -n "$RABBIT_HOST" ] &&  [ -n "$RABBIT_PASSWORD" ]; then
 fi
 add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT"
 add_nova_opt "force_dhcp_release=True"
-if [ -n "$INSTANCES_PATH" ]; then
-    add_nova_opt "instances_path=$INSTANCES_PATH"
+if [ -n "$NOVA_STATE_PATH" ]; then
+    add_nova_opt "state_path=$NOVA_STATE_PATH"
+fi
+if [ -n "$NOVA_INSTANCES_PATH" ]; then
+    add_nova_opt "instances_path=$NOVA_INSTANCES_PATH"
 fi
 if [ "$MULTI_HOST" != "False" ]; then
     add_nova_opt "multi_host=True"
@@ -2124,12 +2141,12 @@ fi
 
 # Launch the glance registry service
 if is_service_enabled g-reg; then
-    screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
+    screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
 fi
 
 # Launch the glance api and wait for it to answer before continuing
 if is_service_enabled g-api; then
-    screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
+    screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
     echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
       echo "g-api did not start"