25f84277ea
This change fixes the empty value set to the gid option in rsyncd.conf, which was caused by reference to the invalid USER_GROUP variable, and ensures the option is set to the group which STACK_USER belongs to. This also fixes duplicate declaration of the local user_group variable. Closes-Bug: #1940742 Change-Id: Ifd0a5ef0bc5f3647f43b169df1f7176393971853
885 lines
38 KiB
Bash
885 lines
38 KiB
Bash
#!/bin/bash
|
|
#
|
|
# lib/swift
|
|
# Functions to control the configuration and operation of the **Swift** service
|
|
|
|
# Dependencies:
|
|
#
|
|
# - ``functions`` file
|
|
# - ``apache`` file
|
|
# - ``DEST``, `SWIFT_HASH` must be defined
|
|
# - ``STACK_USER`` must be defined
|
|
# - ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined
|
|
# - ``lib/keystone`` file
|
|
#
|
|
# ``stack.sh`` calls the entry points in this order:
|
|
#
|
|
# - install_swift
|
|
# - _config_swift_apache_wsgi
|
|
# - configure_swift
|
|
# - init_swift
|
|
# - start_swift
|
|
# - stop_swift
|
|
# - cleanup_swift
|
|
# - _cleanup_swift_apache_wsgi
|
|
|
|
# Save trace setting
|
|
_XTRACE_LIB_SWIFT=$(set +o | grep xtrace)
|
|
set +o xtrace
|
|
|
|
|
|
# Defaults
|
|
# --------
|
|
|
|
if is_service_enabled tls-proxy; then
|
|
SWIFT_SERVICE_PROTOCOL="https"
|
|
fi
|
|
|
|
# Set up default directories
|
|
GITDIR["python-swiftclient"]=$DEST/python-swiftclient
|
|
SWIFT_DIR=$DEST/swift
|
|
|
|
# Swift virtual environment
|
|
if [[ ${USE_VENV} = True ]]; then
|
|
PROJECT_VENV["swift"]=${SWIFT_DIR}.venv
|
|
SWIFT_BIN_DIR=${PROJECT_VENV["swift"]}/bin
|
|
else
|
|
SWIFT_BIN_DIR=$(get_python_exec_prefix)
|
|
fi
|
|
|
|
SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift}
|
|
|
|
SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
|
|
SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080}
|
|
SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081}
|
|
SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST}
|
|
SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
|
|
|
|
# TODO: add logging to different location.
|
|
|
|
# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects.
|
|
# Default is the common DevStack data directory.
|
|
SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift}
|
|
SWIFT_DISK_IMAGE=${SWIFT_DATA_DIR}/drives/images/swift.img
|
|
|
|
# Set ``SWIFT_CONF_DIR`` to the location of the configuration files.
|
|
# Default is ``/etc/swift``.
|
|
SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-/etc/swift}
|
|
|
|
if is_service_enabled s-proxy && is_service_enabled s3api; then
|
|
# If we are using ``s3api``, we can default the S3 port to swift instead
|
|
# of nova-objectstore
|
|
S3_SERVICE_PORT=${S3_SERVICE_PORT:-$SWIFT_DEFAULT_BIND_PORT}
|
|
fi
|
|
|
|
if is_service_enabled g-api; then
|
|
# Minimum Cinder volume size is 1G so if Swift backend for Glance is
|
|
# only 1G we can not upload volume to image.
|
|
# Increase Swift disk size up to 2G
|
|
SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=2G
|
|
SWIFT_MAX_FILE_SIZE_DEFAULT=1073741824 # 1G
|
|
else
|
|
# DevStack will create a loop-back disk formatted as XFS to store the
|
|
# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in
|
|
# kilobytes.
|
|
# Default is 1 gigabyte.
|
|
SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1G
|
|
SWIFT_MAX_FILE_SIZE_DEFAULT=536870912 # 512M
|
|
fi
|
|
|
|
# if tempest enabled the default size is 6 Gigabyte.
|
|
if is_service_enabled tempest; then
|
|
SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-6G}
|
|
SWIFT_MAX_FILE_SIZE_DEFAULT=5368709122 # Swift default 5G
|
|
fi
|
|
|
|
SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT}
|
|
|
|
# Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares.
|
|
# Default is ``staticweb, formpost``
|
|
SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-formpost staticweb}
|
|
|
|
# Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at
|
|
# the end of the pipeline.
|
|
SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST:-}
|
|
|
|
# Set ``SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH`` to extras middlewares that need to be at
|
|
# the beginning of the pipeline, before authentication middlewares.
|
|
SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH=${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH:-crossdomain}
|
|
|
|
# The ring uses a configurable number of bits from a path's MD5 hash as
|
|
# a partition index that designates a device. The number of bits kept
|
|
# from the hash is known as the partition power, and 2 to the partition
|
|
# power indicates the partition count. Partitioning the full MD5 hash
|
|
# ring allows other parts of the cluster to work in batches of items at
|
|
# once which ends up either more efficient or at least less complex than
|
|
# working with each item separately or the entire cluster all at once.
|
|
# By default we define 9 for the partition count (which mean 512).
|
|
SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
|
|
|
|
# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be
|
|
# configured for your Swift cluster. By default we are configuring
|
|
# only one replica since this is way less CPU and memory intensive. If
|
|
# you are planning to test swift replication you may want to set this
|
|
# up to 3.
|
|
SWIFT_REPLICAS=${SWIFT_REPLICAS:-1}
|
|
SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS})
|
|
|
|
# Set ``SWIFT_START_ALL_SERVICES`` to control whether all Swift
|
|
# services (including the *-auditor, *-replicator, *-reconstructor, etc.
|
|
# daemons) should be started.
|
|
SWIFT_START_ALL_SERVICES=$(trueorfalse True SWIFT_START_ALL_SERVICES)
|
|
|
|
# Set ``SWIFT_LOG_TOKEN_LENGTH`` to configure how many characters of an auth
|
|
# token should be placed in the logs. When keystone is used with PKI tokens,
|
|
# the token values can be huge, seemingly larger the 2K, at the least. We
|
|
# restrict it here to a default of 12 characters, which should be enough to
|
|
# trace through the logs when looking for its use.
|
|
SWIFT_LOG_TOKEN_LENGTH=${SWIFT_LOG_TOKEN_LENGTH:-12}
|
|
|
|
# Set ``SWIFT_MAX_HEADER_SIZE`` to configure the maximum length of headers in
|
|
# Swift API
|
|
SWIFT_MAX_HEADER_SIZE=${SWIFT_MAX_HEADER_SIZE:-16384}
|
|
|
|
# Set ``SWIFT_MAX_FILE_SIZE`` to configure the maximum file size in Swift API
|
|
# Default 500MB because the loopback file used for swift could be 1 or 2 GB
|
|
SWIFT_MAX_FILE_SIZE=${SWIFT_MAX_FILE_SIZE:-$SWIFT_MAX_FILE_SIZE_DEFAULT}
|
|
|
|
# Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE``
|
|
# Port bases used in port number calculation for the service "nodes"
|
|
# The specified port number will be used, the additional ports calculated by
|
|
# base_port + node_num * 10
|
|
OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6613}
|
|
CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6611}
|
|
ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6612}
|
|
|
|
# Enable tempurl feature
|
|
SWIFT_ENABLE_TEMPURLS=${SWIFT_ENABLE_TEMPURLS:-False}
|
|
SWIFT_TEMPURL_KEY=${SWIFT_TEMPURL_KEY:-}
|
|
|
|
# Toggle for deploying Swift under HTTPD + mod_wsgi
|
|
SWIFT_USE_MOD_WSGI=${SWIFT_USE_MOD_WSGI:-False}
|
|
|
|
# A space-separated list of storage node IPs that
|
|
# should be used to create the Swift rings
|
|
SWIFT_STORAGE_IPS=${SWIFT_STORAGE_IPS:-}
|
|
|
|
|
|
# Functions
|
|
# ---------
|
|
|
|
# Test if any Swift services are enabled
|
|
# is_swift_enabled
|
|
function is_swift_enabled {
|
|
[[ ,${DISABLED_SERVICES} =~ ,"swift" ]] && return 1
|
|
[[ ,${ENABLED_SERVICES} =~ ,"s-" ]] && return 0
|
|
return 1
|
|
}
|
|
|
|
# cleanup_swift() - Remove residual data files
|
|
function cleanup_swift {
|
|
rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
|
|
if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
|
|
sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
|
|
fi
|
|
if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
|
|
rm ${SWIFT_DISK_IMAGE}
|
|
fi
|
|
rm -rf ${SWIFT_DATA_DIR}/run/
|
|
if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
|
|
_cleanup_swift_apache_wsgi
|
|
fi
|
|
}
|
|
|
|
# _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
|
|
function _cleanup_swift_apache_wsgi {
|
|
sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi
|
|
disable_apache_site proxy-server
|
|
local node_number type
|
|
for node_number in ${SWIFT_REPLICAS_SEQ}; do
|
|
for type in object container account; do
|
|
local site_name=${type}-server-${node_number}
|
|
disable_apache_site ${site_name}
|
|
sudo rm -f $(apache_site_config_for ${site_name})
|
|
done
|
|
done
|
|
}
|
|
|
|
# _config_swift_apache_wsgi() - Set WSGI config files of Swift
|
|
function _config_swift_apache_wsgi {
|
|
sudo mkdir -p ${SWIFT_APACHE_WSGI_DIR}
|
|
local proxy_port=${SWIFT_DEFAULT_BIND_PORT}
|
|
|
|
# copy proxy vhost and wsgi file
|
|
sudo cp ${SWIFT_DIR}/examples/apache2/proxy-server.template $(apache_site_config_for proxy-server)
|
|
sudo sed -e "
|
|
/^#/d;/^$/d;
|
|
s/%PORT%/$proxy_port/g;
|
|
s/%SERVICENAME%/proxy-server/g;
|
|
s/%APACHE_NAME%/${APACHE_NAME}/g;
|
|
s/%USER%/${STACK_USER}/g;
|
|
" -i $(apache_site_config_for proxy-server)
|
|
enable_apache_site proxy-server
|
|
|
|
sudo cp ${SWIFT_DIR}/examples/wsgi/proxy-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi
|
|
sudo sed -e "
|
|
/^#/d;/^$/d;
|
|
s/%SERVICECONF%/proxy-server.conf/g;
|
|
" -i ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi
|
|
|
|
# copy apache vhost file and set name and port
|
|
local node_number
|
|
for node_number in ${SWIFT_REPLICAS_SEQ}; do
|
|
local object_port
|
|
object_port=$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))
|
|
local container_port
|
|
container_port=$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))
|
|
local account_port
|
|
account_port=$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))
|
|
|
|
sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template $(apache_site_config_for object-server-${node_number})
|
|
sudo sed -e "
|
|
s/%PORT%/$object_port/g;
|
|
s/%SERVICENAME%/object-server-${node_number}/g;
|
|
s/%APACHE_NAME%/${APACHE_NAME}/g;
|
|
s/%USER%/${STACK_USER}/g;
|
|
" -i $(apache_site_config_for object-server-${node_number})
|
|
enable_apache_site object-server-${node_number}
|
|
|
|
sudo cp ${SWIFT_DIR}/examples/wsgi/object-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi
|
|
sudo sed -e "
|
|
/^#/d;/^$/d;
|
|
s/%SERVICECONF%/object-server\/${node_number}.conf/g;
|
|
" -i ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi
|
|
|
|
sudo cp ${SWIFT_DIR}/examples/apache2/container-server.template $(apache_site_config_for container-server-${node_number})
|
|
sudo sed -e "
|
|
/^#/d;/^$/d;
|
|
s/%PORT%/$container_port/g;
|
|
s/%SERVICENAME%/container-server-${node_number}/g;
|
|
s/%APACHE_NAME%/${APACHE_NAME}/g;
|
|
s/%USER%/${STACK_USER}/g;
|
|
" -i $(apache_site_config_for container-server-${node_number})
|
|
enable_apache_site container-server-${node_number}
|
|
|
|
sudo cp ${SWIFT_DIR}/examples/wsgi/container-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi
|
|
sudo sed -e "
|
|
/^#/d;/^$/d;
|
|
s/%SERVICECONF%/container-server\/${node_number}.conf/g;
|
|
" -i ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi
|
|
|
|
sudo cp ${SWIFT_DIR}/examples/apache2/account-server.template $(apache_site_config_for account-server-${node_number})
|
|
sudo sed -e "
|
|
/^#/d;/^$/d;
|
|
s/%PORT%/$account_port/g;
|
|
s/%SERVICENAME%/account-server-${node_number}/g;
|
|
s/%APACHE_NAME%/${APACHE_NAME}/g;
|
|
s/%USER%/${STACK_USER}/g;
|
|
" -i $(apache_site_config_for account-server-${node_number})
|
|
enable_apache_site account-server-${node_number}
|
|
|
|
sudo cp ${SWIFT_DIR}/examples/wsgi/account-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi
|
|
sudo sed -e "
|
|
/^#/d;/^$/d;
|
|
s/%SERVICECONF%/account-server\/${node_number}.conf/g;
|
|
" -i ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi
|
|
done
|
|
}
|
|
|
|
# This function generates an object/container/account configuration
|
|
# emulating 4 nodes on different ports
|
|
function generate_swift_config_services {
|
|
local swift_node_config=$1
|
|
local node_id=$2
|
|
local bind_port=$3
|
|
local server_type=$4
|
|
|
|
log_facility=$(( node_id - 1 ))
|
|
local node_path=${SWIFT_DATA_DIR}/${node_number}
|
|
|
|
iniuncomment ${swift_node_config} DEFAULT user
|
|
iniset ${swift_node_config} DEFAULT user ${STACK_USER}
|
|
|
|
iniuncomment ${swift_node_config} DEFAULT bind_port
|
|
iniset ${swift_node_config} DEFAULT bind_port ${bind_port}
|
|
|
|
iniuncomment ${swift_node_config} DEFAULT swift_dir
|
|
iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR}
|
|
|
|
iniuncomment ${swift_node_config} DEFAULT devices
|
|
iniset ${swift_node_config} DEFAULT devices ${node_path}
|
|
|
|
iniuncomment ${swift_node_config} DEFAULT log_facility
|
|
iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility}
|
|
|
|
iniuncomment ${swift_node_config} DEFAULT workers
|
|
iniset ${swift_node_config} DEFAULT workers ${API_WORKERS:-1}
|
|
|
|
iniuncomment ${swift_node_config} DEFAULT disable_fallocate
|
|
iniset ${swift_node_config} DEFAULT disable_fallocate true
|
|
|
|
iniuncomment ${swift_node_config} DEFAULT mount_check
|
|
iniset ${swift_node_config} DEFAULT mount_check false
|
|
|
|
iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode
|
|
iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes
|
|
|
|
# Using a sed and not iniset/iniuncomment because we want to a global
|
|
# modification and make sure it works for new sections.
|
|
sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
|
|
}
|
|
|
|
# configure_swift() - Set config files, create data dirs and loop image
|
|
function configure_swift {
|
|
local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}"
|
|
local node_number
|
|
local swift_node_config
|
|
local swift_log_dir
|
|
|
|
# Make sure to kill all swift processes first
|
|
$SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
|
|
|
|
sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR}
|
|
sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR}/{object,container,account}-server
|
|
|
|
if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then
|
|
# Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed.
|
|
# Create a symlink if the config dir is moved
|
|
sudo ln -sf ${SWIFT_CONF_DIR} /etc/swift
|
|
fi
|
|
|
|
# Swift use rsync to synchronize between all the different
|
|
# partitions (which make more sense when you have a multi-node
|
|
# setup) we configure it with our version of rsync.
|
|
sed -e "
|
|
s/%GROUP%/$(id -g -n ${STACK_USER})/;
|
|
s/%USER%/${STACK_USER}/;
|
|
s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,;
|
|
" $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
|
|
# rsyncd.conf just prepared for 4 nodes
|
|
if is_ubuntu; then
|
|
sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync
|
|
elif [ -e /etc/xinetd.d/rsync ]; then
|
|
sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync
|
|
fi
|
|
|
|
SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf
|
|
cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER}
|
|
cp ${SWIFT_DIR}/etc/internal-client.conf-sample ${SWIFT_CONF_DIR}/internal-client.conf
|
|
|
|
# To run container sync feature introduced in Swift ver 1.12.0,
|
|
# container sync "realm" is added in container-sync-realms.conf
|
|
local csyncfile=${SWIFT_CONF_DIR}/container-sync-realms.conf
|
|
cp ${SWIFT_DIR}/etc/container-sync-realms.conf-sample ${csyncfile}
|
|
iniset ${csyncfile} realm1 key realm1key
|
|
iniset ${csyncfile} realm1 cluster_name1 "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/"
|
|
|
|
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${STACK_USER}
|
|
|
|
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONF_DIR}
|
|
|
|
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1
|
|
|
|
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG
|
|
|
|
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_ip
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS}
|
|
|
|
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port
|
|
if is_service_enabled tls-proxy; then
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT_INT}
|
|
else
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT}
|
|
fi
|
|
|
|
# DevStack is commonly run in a small slow environment, so bump the timeouts up.
|
|
# ``node_timeout`` is the node read operation response time to the proxy server
|
|
# ``conn_timeout`` is how long it takes a connect() system call to return
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20
|
|
|
|
# Versioned Writes
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true
|
|
|
|
# Configure Ceilometer
|
|
if is_service_enabled ceilometer; then
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN"
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer paste.filter_factory "ceilometermiddleware.swift:filter_factory"
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer control_exchange "swift"
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer url $(get_notification_url)
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer driver "messaging"
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer topic "notifications"
|
|
SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer"
|
|
fi
|
|
|
|
# Restrict the length of auth tokens in the Swift ``proxy-server`` logs.
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH}
|
|
|
|
# By default Swift will be installed with Keystone and tempauth middleware
|
|
# and add the s3api middleware if its configured for it. The token for
|
|
# tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the
|
|
# token for keystoneauth would have the standard reseller_prefix `AUTH_`
|
|
if is_service_enabled s3api;then
|
|
swift_pipeline+=" s3api"
|
|
fi
|
|
if is_service_enabled keystone; then
|
|
swift_pipeline+=" authtoken"
|
|
if is_service_enabled s3api;then
|
|
swift_pipeline+=" s3token"
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_AUTH_URI_V3}
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true
|
|
fi
|
|
swift_pipeline+=" keystoneauth"
|
|
fi
|
|
|
|
swift_pipeline+=" tempauth "
|
|
|
|
sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER}
|
|
sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER}
|
|
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server allow_account_management true
|
|
|
|
# Configure Crossdomain
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:crossdomain use "egg:swift#crossdomain"
|
|
|
|
# Configure authtoken middleware to use the same Python logging
|
|
# adapter provided by the Swift ``proxy-server``, so that request transaction
|
|
# IDs will included in all of its log messages.
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift
|
|
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken paste.filter_factory keystonemiddleware.auth_token:filter_factory
|
|
configure_keystone_authtoken_middleware $SWIFT_CONFIG_PROXY_SERVER swift filter:authtoken
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken delay_auth_decision 1
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cache swift.cache
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken include_service_catalog False
|
|
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use "egg:swift#keystoneauth"
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin"
|
|
|
|
# Configure Tempauth. In the sample config file Keystoneauth is commented
|
|
# out. Make sure we uncomment Tempauth after we uncomment Keystoneauth
|
|
# otherwise, this code also sets the reseller_prefix for Keystoneauth.
|
|
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH"
|
|
|
|
# Allow both reseller prefixes to be used with domain_remap
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:domain_remap reseller_prefixes "AUTH, TEMPAUTH"
|
|
|
|
cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf
|
|
iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
|
|
iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_header_size ${SWIFT_MAX_HEADER_SIZE}
|
|
iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_file_size ${SWIFT_MAX_FILE_SIZE}
|
|
|
|
local node_number
|
|
for node_number in ${SWIFT_REPLICAS_SEQ}; do
|
|
local swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf
|
|
cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config}
|
|
generate_swift_config_services ${swift_node_config} ${node_number} $(( OBJECT_PORT_BASE + 10 * (node_number - 1) )) object
|
|
iniuncomment ${swift_node_config} DEFAULT bind_ip
|
|
iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS}
|
|
iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache
|
|
|
|
swift_node_config=${SWIFT_CONF_DIR}/container-server/${node_number}.conf
|
|
cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config}
|
|
generate_swift_config_services ${swift_node_config} ${node_number} $(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) container
|
|
iniuncomment ${swift_node_config} DEFAULT bind_ip
|
|
iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS}
|
|
|
|
swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf
|
|
cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config}
|
|
generate_swift_config_services ${swift_node_config} ${node_number} $(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) )) account
|
|
iniuncomment ${swift_node_config} DEFAULT bind_ip
|
|
iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS}
|
|
done
|
|
|
|
# Set new accounts in tempauth to match keystone project/user (to make testing easier)
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swiftprojecttest1_swiftusertest1 "testing .admin"
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swiftprojecttest2_swiftusertest2 "testing2 .admin"
|
|
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swiftprojecttest1_swiftusertest3 "testing3 .admin"
|
|
|
|
testfile=${SWIFT_CONF_DIR}/test.conf
|
|
cp ${SWIFT_DIR}/test/sample.conf ${testfile}
|
|
|
|
# Set accounts for functional tests
|
|
iniset ${testfile} func_test account swiftprojecttest1
|
|
iniset ${testfile} func_test username swiftusertest1
|
|
iniset ${testfile} func_test username3 swiftusertest3
|
|
iniset ${testfile} func_test account2 swiftprojecttest2
|
|
iniset ${testfile} func_test username2 swiftusertest2
|
|
iniset ${testfile} func_test account4 swiftprojecttest4
|
|
iniset ${testfile} func_test username4 swiftusertest4
|
|
iniset ${testfile} func_test password4 testing4
|
|
iniset ${testfile} func_test domain4 swift_test
|
|
|
|
if is_service_enabled keystone; then
|
|
iniuncomment ${testfile} func_test auth_version
|
|
local auth_vers
|
|
auth_vers=$(iniget ${testfile} func_test auth_version)
|
|
iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST}
|
|
if [[ "$KEYSTONE_AUTH_PROTOCOL" == "https" ]]; then
|
|
iniset ${testfile} func_test auth_port 443
|
|
else
|
|
iniset ${testfile} func_test auth_port 80
|
|
fi
|
|
iniset ${testfile} func_test auth_uri ${KEYSTONE_SERVICE_URI}
|
|
if [[ "$auth_vers" == "3" ]]; then
|
|
iniset ${testfile} func_test auth_prefix /identity/v3/
|
|
else
|
|
iniset ${testfile} func_test auth_prefix /identity/v2.0/
|
|
fi
|
|
if is_service_enabled tls-proxy; then
|
|
iniset ${testfile} func_test cafile ${SSL_BUNDLE_FILE}
|
|
iniset ${testfile} func_test web_front_end apache2
|
|
fi
|
|
fi
|
|
|
|
local user_group
|
|
user_group=$(id -g ${STACK_USER})
|
|
sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}
|
|
|
|
local swift_log_dir=${SWIFT_DATA_DIR}/logs
|
|
sudo rm -rf ${swift_log_dir}
|
|
local swift_log_group=adm
|
|
if is_suse; then
|
|
swift_log_group=root
|
|
fi
|
|
sudo install -d -o ${STACK_USER} -g ${swift_log_group} ${swift_log_dir}/hourly
|
|
|
|
if [[ $SYSLOG != "False" ]]; then
|
|
sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
|
|
tee /etc/rsyslog.d/10-swift.conf
|
|
echo "MaxMessageSize 6k" | sudo tee /etc/rsyslog.d/99-maxsize.conf
|
|
# restart syslog to take the changes
|
|
sudo killall -HUP rsyslogd
|
|
fi
|
|
|
|
if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
|
|
_config_swift_apache_wsgi
|
|
fi
|
|
}
|
|
|
|
# create_swift_disk - Create Swift backing disk
|
|
function create_swift_disk {
|
|
local node_number
|
|
|
|
# First do a bit of setup by creating the directories and
|
|
# changing the permissions so we can run it as our user.
|
|
|
|
local user_group
|
|
user_group=$(id -g ${STACK_USER})
|
|
sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
|
|
|
|
# Create a loopback disk and format it to XFS.
|
|
if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
|
|
if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
|
|
sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
|
|
sudo rm -f ${SWIFT_DISK_IMAGE}
|
|
fi
|
|
fi
|
|
|
|
mkdir -p ${SWIFT_DATA_DIR}/drives/images
|
|
sudo touch ${SWIFT_DISK_IMAGE}
|
|
sudo chown ${STACK_USER}: ${SWIFT_DISK_IMAGE}
|
|
|
|
truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE}
|
|
|
|
# Make a fresh XFS filesystem
|
|
/sbin/mkfs.xfs -f -i size=1024 ${SWIFT_DISK_IMAGE}
|
|
|
|
# Mount the disk with mount options to make it as efficient as possible
|
|
mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
|
|
if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
|
|
sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8 \
|
|
${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1
|
|
fi
|
|
|
|
# Create a link to the above mount and
|
|
# create all of the directories needed to emulate a few different servers
|
|
local node_number
|
|
for node_number in ${SWIFT_REPLICAS_SEQ}; do
|
|
# node_devices must match *.conf devices option
|
|
local node_devices=${SWIFT_DATA_DIR}/${node_number}
|
|
local real_devices=${SWIFT_DATA_DIR}/drives/sdb1/$node_number
|
|
sudo ln -sf $real_devices $node_devices;
|
|
local device=${real_devices}/sdb1
|
|
[[ -d $device ]] && continue
|
|
sudo install -o ${STACK_USER} -g $user_group -d $device
|
|
done
|
|
}
|
|
|
|
# create_swift_accounts() - Set up standard Swift accounts and extra
|
|
# one for tests we do this by attaching all words in the account name
|
|
# since we want to make it compatible with tempauth which use
|
|
# underscores for separators.
|
|
|
|
# Project User Roles Domain
|
|
# -------------------------------------------------------------------
|
|
# service swift service default
|
|
# swiftprojecttest1 swiftusertest1 admin default
|
|
# swiftprojecttest1 swiftusertest3 anotherrole default
|
|
# swiftprojecttest2 swiftusertest2 admin default
|
|
# swiftprojecttest4 swiftusertest4 admin swift_test
|
|
|
|
function create_swift_accounts {
|
|
# Defines specific passwords used by ``tools/create_userrc.sh``
|
|
# As these variables are used by ``create_userrc.sh,`` they must be exported
|
|
# The _password suffix is expected by ``create_userrc.sh``.
|
|
export swiftusertest1_password=testing
|
|
export swiftusertest2_password=testing2
|
|
export swiftusertest3_password=testing3
|
|
export swiftusertest4_password=testing4
|
|
|
|
local another_role
|
|
another_role=$(get_or_create_role "anotherrole")
|
|
|
|
# NOTE(jroll): Swift doesn't need the admin role here, however Ironic uses
|
|
# temp urls, which break when uploaded by a non-admin role
|
|
create_service_user "swift" "admin"
|
|
|
|
get_or_create_service "swift" "object-store" "Swift Service"
|
|
get_or_create_endpoint \
|
|
"object-store" \
|
|
"$REGION_NAME" \
|
|
"$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(project_id)s" \
|
|
"$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT"
|
|
|
|
local swift_project_test1
|
|
swift_project_test1=$(get_or_create_project swiftprojecttest1 default)
|
|
die_if_not_set $LINENO swift_project_test1 "Failure creating swift_project_test1"
|
|
SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \
|
|
"default" "test@example.com")
|
|
die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
|
|
get_or_add_user_project_role admin $SWIFT_USER_TEST1 $swift_project_test1
|
|
|
|
local swift_user_test3
|
|
swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
|
|
"default" "test3@example.com")
|
|
die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3"
|
|
get_or_add_user_project_role $another_role $swift_user_test3 $swift_project_test1
|
|
|
|
local swift_project_test2
|
|
swift_project_test2=$(get_or_create_project swiftprojecttest2 default)
|
|
die_if_not_set $LINENO swift_project_test2 "Failure creating swift_project_test2"
|
|
|
|
local swift_user_test2
|
|
swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \
|
|
"default" "test2@example.com")
|
|
die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2"
|
|
get_or_add_user_project_role admin $swift_user_test2 $swift_project_test2
|
|
|
|
local swift_domain
|
|
swift_domain=$(get_or_create_domain swift_test 'Used for swift functional testing')
|
|
die_if_not_set $LINENO swift_domain "Failure creating swift_test domain"
|
|
|
|
local swift_project_test4
|
|
swift_project_test4=$(get_or_create_project swiftprojecttest4 $swift_domain)
|
|
die_if_not_set $LINENO swift_project_test4 "Failure creating swift_project_test4"
|
|
|
|
local swift_user_test4
|
|
swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \
|
|
$swift_domain "test4@example.com")
|
|
die_if_not_set $LINENO swift_user_test4 "Failure creating swift_user_test4"
|
|
get_or_add_user_project_role admin $swift_user_test4 $swift_project_test4
|
|
}
|
|
|
|
# init_swift() - Initialize rings
|
|
function init_swift {
|
|
local node_number
|
|
# Make sure to kill all swift processes first
|
|
$SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
|
|
|
|
# Forcibly re-create the backing filesystem
|
|
create_swift_disk
|
|
|
|
# This is where we create three different rings for swift with
|
|
# different object servers binding on different ports.
|
|
pushd ${SWIFT_CONF_DIR} >/dev/null && {
|
|
|
|
rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
|
|
|
|
$SWIFT_BIN_DIR/swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
|
|
$SWIFT_BIN_DIR/swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
|
|
$SWIFT_BIN_DIR/swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
|
|
|
|
# The ring will be created on each node, and because the order of
|
|
# nodes is identical we can use a seed for rebalancing, making it
|
|
# possible to get a ring on each node that uses the same partition
|
|
# assignment.
|
|
if [[ -n $SWIFT_STORAGE_IPS ]]; then
|
|
local node_number
|
|
node_number=1
|
|
|
|
for node in ${SWIFT_STORAGE_IPS}; do
|
|
$SWIFT_BIN_DIR/swift-ring-builder object.builder add z${node_number}-${node}:${OBJECT_PORT_BASE}/sdb1 1
|
|
$SWIFT_BIN_DIR/swift-ring-builder container.builder add z${node_number}-${node}:${CONTAINER_PORT_BASE}/sdb1 1
|
|
$SWIFT_BIN_DIR/swift-ring-builder account.builder add z${node_number}-${node}:${ACCOUNT_PORT_BASE}/sdb1 1
|
|
let "node_number=node_number+1"
|
|
done
|
|
|
|
else
|
|
|
|
for node_number in ${SWIFT_REPLICAS_SEQ}; do
|
|
$SWIFT_BIN_DIR/swift-ring-builder object.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
|
|
$SWIFT_BIN_DIR/swift-ring-builder container.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
|
|
$SWIFT_BIN_DIR/swift-ring-builder account.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
|
|
done
|
|
fi
|
|
|
|
# We use a seed for rebalancing. Doing this allows us to create
|
|
# identical rings on multiple nodes if SWIFT_STORAGE_IPS is the same
|
|
$SWIFT_BIN_DIR/swift-ring-builder object.builder rebalance 42
|
|
$SWIFT_BIN_DIR/swift-ring-builder container.builder rebalance 42
|
|
$SWIFT_BIN_DIR/swift-ring-builder account.builder rebalance 42
|
|
} && popd >/dev/null
|
|
}
|
|
|
|
function install_swift {
|
|
git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
|
|
# keystonemiddleware needs to be installed via keystone extras as defined
|
|
# in setup.cfg, see bug #1909018 for more details.
|
|
setup_develop $SWIFT_DIR keystone
|
|
if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
|
|
install_apache_wsgi
|
|
fi
|
|
}
|
|
|
|
function install_swiftclient {
|
|
if use_library_from_git "python-swiftclient"; then
|
|
git_clone_by_name "python-swiftclient"
|
|
setup_dev_lib "python-swiftclient"
|
|
fi
|
|
}
|
|
|
|
# install_ceilometermiddleware() - Collect source and prepare
|
|
# note that this doesn't really have anything to do with ceilometer;
|
|
# though ceilometermiddleware has ceilometer in its name as an
|
|
# artifact of history, it is not a ceilometer specific tool. It
|
|
# simply generates pycadf-based notifications about requests and
|
|
# responses on the swift proxy
|
|
function install_ceilometermiddleware {
|
|
if use_library_from_git "ceilometermiddleware"; then
|
|
git_clone_by_name "ceilometermiddleware"
|
|
setup_dev_lib "ceilometermiddleware"
|
|
else
|
|
pip_install_gr ceilometermiddleware
|
|
fi
|
|
}
|
|
|
|
# start_swift() - Start running processes
|
|
function start_swift {
|
|
# (re)start memcached to make sure we have a clean memcache.
|
|
restart_service memcached
|
|
|
|
# Start rsync
|
|
if is_ubuntu; then
|
|
sudo /etc/init.d/rsync restart || :
|
|
elif [ -e /etc/xinetd.d/rsync ]; then
|
|
start_service xinetd
|
|
else
|
|
start_service rsyncd
|
|
fi
|
|
|
|
if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
|
|
# Apache should serve the "PACO" a.k.a "main" services
|
|
restart_apache_server
|
|
# The rest of the services should be started in backgroud
|
|
$SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start
|
|
return 0
|
|
fi
|
|
|
|
|
|
# By default with only one replica we are launching the proxy, container
|
|
# account and object server in screen in foreground. Then, the rest of
|
|
# the services is optionally started.
|
|
#
|
|
# If we have ``SWIFT_REPLICAS`` set to something greater than one
|
|
# we first spawn *all* the Swift services then kill the proxy service
|
|
# so we can run it in foreground in screen.
|
|
#
|
|
# ``swift-init ... {stop|restart}`` exits with '1' if no servers are
|
|
# running, ignore it just in case
|
|
if [[ ${SWIFT_REPLICAS} == 1 ]]; then
|
|
local foreground_services type
|
|
|
|
foreground_services="object container account"
|
|
for type in ${foreground_services}; do
|
|
run_process s-${type} "$SWIFT_BIN_DIR/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
|
|
done
|
|
|
|
if [[ "$SWIFT_START_ALL_SERVICES" == "True" ]]; then
|
|
$SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start
|
|
else
|
|
# The container-sync daemon is strictly needed to pass the container
|
|
# sync Tempest tests.
|
|
enable_service s-container-sync
|
|
run_process s-container-sync "$SWIFT_BIN_DIR/swift-container-sync ${SWIFT_CONF_DIR}/container-server/1.conf"
|
|
fi
|
|
else
|
|
$SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
|
|
$SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true
|
|
fi
|
|
|
|
if is_service_enabled tls-proxy; then
|
|
local proxy_port=${SWIFT_DEFAULT_BIND_PORT}
|
|
start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT $SWIFT_MAX_HEADER_SIZE
|
|
fi
|
|
run_process s-proxy "$SWIFT_BIN_DIR/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
|
|
|
|
# We also started the storage services, but proxy started last and
|
|
# will take the longest to start, so by the time it comes up, we're
|
|
# probably fine.
|
|
echo "Waiting for swift proxy to start..."
|
|
if ! wait_for_service $SERVICE_TIMEOUT $SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/info; then
|
|
die $LINENO "swift proxy did not start"
|
|
fi
|
|
|
|
if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then
|
|
swift_configure_tempurls
|
|
fi
|
|
}
|
|
|
|
# stop_swift() - Stop running processes
|
|
function stop_swift {
|
|
local type
|
|
|
|
if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
|
|
$SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0
|
|
fi
|
|
|
|
# screen normally killed by ``unstack.sh``
|
|
if type -p $SWIFT_BIN_DIR/swift-init >/dev/null; then
|
|
$SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
|
|
fi
|
|
# Dump all of the servers
|
|
# Maintain the iteration as stop_process() has some desirable side-effects
|
|
for type in proxy object container account; do
|
|
stop_process s-${type}
|
|
done
|
|
# Blast out any stragglers
|
|
pkill -f swift- || true
|
|
}
|
|
|
|
function swift_configure_tempurls {
|
|
# note we are using swift credentials!
|
|
OS_USERNAME=swift \
|
|
OS_PASSWORD=$SERVICE_PASSWORD \
|
|
OS_USER_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \
|
|
OS_PROJECT_NAME=$SERVICE_PROJECT_NAME \
|
|
OS_PROJECT_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \
|
|
openstack object store account \
|
|
set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY"
|
|
}
|
|
|
|
# Restore xtrace
|
|
$_XTRACE_LIB_SWIFT
|
|
|
|
# Tell emacs to use shell-script-mode
|
|
## Local variables:
|
|
## mode: shell-script
|
|
## End:
|