devstack/lib/cinder
Stephen Finucane 05f7d302cf lib/cinder: Migrate cinder to WSGI module path
Change-Id: I494dae51c65318299d4fe2ff5887c97ac2be3224
Signed-off-by: Stephen Finucane <sfinucan@redhat.com>
Depends-on: https://review.opendev.org/c/openstack/cinder/+/902876
2024-12-09 14:03:49 +00:00

745 lines
28 KiB
Bash

#!/bin/bash
#
# lib/cinder
# Install and start **Cinder** volume service
# Dependencies:
#
# - functions
# - DEST, DATA_DIR, STACK_USER must be defined
# - SERVICE_{TENANT_NAME|PASSWORD} must be defined
# - ``KEYSTONE_TOKEN_FORMAT`` must be defined
# stack.sh
# ---------
# - install_cinder
# - configure_cinder
# - init_cinder
# - start_cinder
# - stop_cinder
# - cleanup_cinder
# Save trace setting
_XTRACE_CINDER=$(set +o | grep xtrace)
set +o xtrace
# Defaults
# --------
# set up default driver
CINDER_DRIVER=${CINDER_DRIVER:-default}
CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins
CINDER_BACKENDS=$TOP_DIR/lib/cinder_backends
CINDER_BACKUPS=$TOP_DIR/lib/cinder_backups
# grab plugin config if specified via cinder_driver
if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
source $CINDER_PLUGINS/$CINDER_DRIVER
fi
# set up default directories
GITDIR["python-cinderclient"]=$DEST/python-cinderclient
GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext
CINDER_DIR=$DEST/cinder
if [[ $SERVICE_IP_VERSION == 6 ]]; then
CINDER_MY_IP="$HOST_IPV6"
else
CINDER_MY_IP="$HOST_IP"
fi
# Cinder virtual environment
if [[ ${USE_VENV} = True ]]; then
PROJECT_VENV["cinder"]=${CINDER_DIR}.venv
CINDER_BIN_DIR=${PROJECT_VENV["cinder"]}/bin
else
CINDER_BIN_DIR=$(get_python_exec_prefix)
fi
CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder}
CINDER_CONF_DIR=/etc/cinder
CINDER_CONF=$CINDER_CONF_DIR/cinder.conf
CINDER_UWSGI=cinder.wsgi.api:application
CINDER_UWSGI_CONF=$CINDER_CONF_DIR/cinder-api-uwsgi.ini
CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini
# Public facing bits
if is_service_enabled tls-proxy; then
CINDER_SERVICE_PROTOCOL="https"
fi
CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776}
CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
# We do not need to report service status every 10s for devstack-like
# deployments. In the gate this generates extra work for the services and the
# database which are already taxed.
CINDER_SERVICE_REPORT_INTERVAL=${CINDER_SERVICE_REPORT_INTERVAL:-120}
# What type of LVM device should Cinder use for LVM backend
# Defaults to auto, which will do thin provisioning if it's a fresh
# volume group, otherwise it will do thick. The other valid choices are
# default, which is thick, or thin, which as the name implies utilizes lvm
# thin provisioning.
CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-auto}
# ``CINDER_USE_SERVICE_TOKEN`` is a mode where service token is passed along with
# user token while communicating to external REST APIs like Glance.
CINDER_USE_SERVICE_TOKEN=$(trueorfalse True CINDER_USE_SERVICE_TOKEN)
# Default backends
# The backend format is type:name where type is one of the supported backend
# types (lvm, nfs, etc) and name is the identifier used in the Cinder
# configuration and for the volume type name. Multiple backends are
# comma-separated.
# The old ``CINDER_MULTI_LVM_BACKEND=True`` setting had a default of:
# CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1,lvm:lvmdriver-2}
CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1}
CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}}
CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]')
VOLUME_TYPE_MULTIATTACH=${VOLUME_TYPE_MULTIATTACH:-multiattach}
if [[ -n "$CINDER_ISCSI_HELPER" ]]; then
if [[ -z "$CINDER_TARGET_HELPER" ]]; then
deprecated 'Using CINDER_ISCSI_HELPER is deprecated, use CINDER_TARGET_HELPER instead'
CINDER_TARGET_HELPER="$CINDER_ISCSI_HELPER"
else
deprecated 'Deprecated CINDER_ISCSI_HELPER is set, but is being overwritten by CINDER_TARGET_HELPER'
fi
fi
CINDER_TARGET_HELPER=${CINDER_TARGET_HELPER:-lioadm}
if [[ $CINDER_TARGET_HELPER == 'nvmet' ]]; then
CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'nvmet_rdma'}
CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'nvme-subsystem-1'}
CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-4420}
else
CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'iscsi'}
CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'iqn.2010-10.org.openstack:'}
CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-3260}
fi
# EL should only use lioadm
if is_fedora; then
if [[ ${CINDER_TARGET_HELPER} != "lioadm" && ${CINDER_TARGET_HELPER} != 'nvmet' ]]; then
die "lioadm and nvmet are the only valid Cinder target_helper config on this platform"
fi
fi
# When Cinder is used as a backend for Glance, it can be configured to clone
# the volume containing image data directly in the backend instead of
# transferring data from volume to volume. Value is a comma separated list of
# schemes (currently only 'file' and 'cinder' are supported). The default
# configuration in Cinder is empty (that is, do not use this feature). NOTE:
# to use this feature you must also enable GLANCE_SHOW_DIRECT_URL and/or
# GLANCE_SHOW_MULTIPLE_LOCATIONS for glance-api.conf.
CINDER_ALLOWED_DIRECT_URL_SCHEMES=${CINDER_ALLOWED_DIRECT_URL_SCHEMES:-}
if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then
if [[ "${GLANCE_SHOW_DIRECT_URL:-False}" != "True" \
&& "${GLANCE_SHOW_MULTIPLE_LOCATIONS:-False}" != "True" ]]; then
warn $LINENO "CINDER_ALLOWED_DIRECT_URL_SCHEMES is set, but neither \
GLANCE_SHOW_DIRECT_URL nor GLANCE_SHOW_MULTIPLE_LOCATIONS is True"
fi
fi
# For backward compatibility
# Before CINDER_BACKUP_DRIVER was introduced, ceph backup driver was configured
# along with ceph backend driver.
if [[ -z "${CINDER_BACKUP_DRIVER}" && "$CINDER_ENABLED_BACKENDS" =~ "ceph" ]]; then
CINDER_BACKUP_DRIVER=ceph
fi
# Supported backup drivers are in lib/cinder_backups
CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift}
# Source the enabled backends
if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
be_type=${be%%:*}
be_name=${be##*:}
if [[ -r $CINDER_BACKENDS/${be_type} ]]; then
source $CINDER_BACKENDS/${be_type}
fi
done
fi
# Source the backup driver
if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
if [[ -r $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER ]]; then
source $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER
else
die "cinder backup driver $CINDER_BACKUP_DRIVER is not supported"
fi
fi
# Environment variables to configure the image-volume cache
CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True}
# Environment variables to configure the optimized volume upload
CINDER_UPLOAD_OPTIMIZED=${CINDER_UPLOAD_OPTIMIZED:-False}
# Environment variables to configure the internal tenant during optimized volume upload
CINDER_UPLOAD_INTERNAL_TENANT=${CINDER_UPLOAD_INTERNAL_TENANT:-False}
# For limits, if left unset, it will use cinder defaults of 0 for unlimited
CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-}
CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-}
# Configure which cinder backends will have the image-volume cache, this takes the same
# form as the CINDER_ENABLED_BACKENDS config option. By default it will
# enable the cache for all cinder backends.
CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS}
# Configure which cinder backends will have optimized volume upload, this takes the same
# form as the CINDER_ENABLED_BACKENDS config option. By default it will
# enable the cache for all cinder backends.
CINDER_UPLOAD_OPTIMIZED_BACKENDS=${CINDER_UPLOAD_OPTIMIZED_BACKENDS:-$CINDER_ENABLED_BACKENDS}
# Flag to set the oslo_policy.enforce_scope. This is used to switch
# the Volume API policies to start checking the scope of token. by default,
# this flag is False.
# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
CINDER_ENFORCE_SCOPE=$(trueorfalse False CINDER_ENFORCE_SCOPE)
# Functions
# ---------
# Test if any Cinder services are enabled
# is_cinder_enabled
function is_cinder_enabled {
[[ ,${DISABLED_SERVICES} =~ ,"cinder" ]] && return 1
[[ ,${ENABLED_SERVICES} =~ ,"c-" ]] && return 0
return 1
}
# _cinder_cleanup_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
function _cinder_cleanup_apache_wsgi {
sudo rm -f $(apache_site_config_for osapi-volume)
}
# cleanup_cinder() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_cinder {
# ensure the volume group is cleared up because fails might
# leave dead volumes in the group
if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
local targets
targets=$(sudo tgtadm --op show --mode target)
if [ $? -ne 0 ]; then
# If tgt driver isn't running this won't work obviously
# So check the response and restart if need be
echo "tgtd seems to be in a bad state, restarting..."
if is_ubuntu; then
restart_service tgt
else
restart_service tgtd
fi
targets=$(sudo tgtadm --op show --mode target)
fi
if [[ -n "$targets" ]]; then
local iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's/<target //' | sed 's/>//') )
for i in "${iqn_list[@]}"; do
echo removing iSCSI target: $i
sudo tgt-admin --delete $i
done
fi
if is_ubuntu; then
stop_service tgt
else
stop_service tgtd
fi
elif [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then
sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete
elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then
# If we don't disconnect everything vgremove will block
sudo nvme disconnect-all
sudo nvmetcli clear
else
die $LINENO "Unknown value \"$CINDER_TARGET_HELPER\" for CINDER_TARGET_HELPER"
fi
if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
local be be_name be_type
for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
be_type=${be%%:*}
be_name=${be##*:}
if type cleanup_cinder_backend_${be_type} >/dev/null 2>&1; then
cleanup_cinder_backend_${be_type} ${be_name}
fi
done
fi
if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
if type cleanup_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then
cleanup_cinder_backup_$CINDER_BACKUP_DRIVER
fi
fi
stop_process "c-api"
remove_uwsgi_config "$CINDER_UWSGI_CONF" "cinder-wsgi"
}
# configure_cinder() - Set config files, create data dirs, etc
function configure_cinder {
sudo install -d -o $STACK_USER -m 755 $CINDER_CONF_DIR
rm -f $CINDER_CONF
configure_rootwrap cinder
if [[ -f "$CINDER_DIR/etc/cinder/resource_filters.json" ]]; then
cp -p "$CINDER_DIR/etc/cinder/resource_filters.json" "$CINDER_CONF_DIR/resource_filters.json"
fi
cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI
inicomment $CINDER_API_PASTE_INI filter:authtoken auth_host
inicomment $CINDER_API_PASTE_INI filter:authtoken auth_port
inicomment $CINDER_API_PASTE_INI filter:authtoken auth_protocol
inicomment $CINDER_API_PASTE_INI filter:authtoken cafile
inicomment $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name
inicomment $CINDER_API_PASTE_INI filter:authtoken admin_user
inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password
inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir
configure_keystone_authtoken_middleware $CINDER_CONF cinder
iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $CINDER_CONF DEFAULT target_helper "$CINDER_TARGET_HELPER"
iniset $CINDER_CONF database connection `database_connection_url cinder`
iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf"
iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions
iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS
iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH
iniset $CINDER_CONF DEFAULT my_ip "$CINDER_MY_IP"
iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager
iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16)
if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then
iniset $CINDER_CONF DEFAULT allowed_direct_url_schemes $CINDER_ALLOWED_DIRECT_URL_SCHEMES
fi
# set default quotas
iniset $CINDER_CONF DEFAULT quota_volumes ${CINDER_QUOTA_VOLUMES:-10}
iniset $CINDER_CONF DEFAULT quota_backups ${CINDER_QUOTA_BACKUPS:-10}
iniset $CINDER_CONF DEFAULT quota_snapshots ${CINDER_QUOTA_SNAPSHOTS:-10}
# Avoid RPC timeouts in slow CI and test environments by doubling the
# default response timeout set by RPC clients. See bug #1873234 for more
# details and example failures.
iniset $CINDER_CONF DEFAULT rpc_response_timeout 120
iniset $CINDER_CONF DEFAULT report_interval $CINDER_SERVICE_REPORT_INTERVAL
iniset $CINDER_CONF DEFAULT service_down_time $(($CINDER_SERVICE_REPORT_INTERVAL * 6))
if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
local enabled_backends=""
local default_name=""
local be be_name be_type
for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
be_type=${be%%:*}
be_name=${be##*:}
if type configure_cinder_backend_${be_type} >/dev/null 2>&1; then
configure_cinder_backend_${be_type} ${be_name}
fi
if [[ -z "$default_name" ]]; then
default_name=$be_name
fi
enabled_backends+=$be_name,
done
iniset $CINDER_CONF DEFAULT enabled_backends ${enabled_backends%,*}
if [[ -n "$default_name" ]]; then
iniset $CINDER_CONF DEFAULT default_volume_type ${default_name}
fi
configure_cinder_image_volume_cache
# The upload optimization uses Cinder's clone volume functionality to
# clone the Image-Volume from source volume hence can only be
# performed when glance is using cinder as it's backend.
if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then
# Configure optimized volume upload
configure_cinder_volume_upload
fi
fi
if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
if type configure_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then
configure_cinder_backup_$CINDER_BACKUP_DRIVER
else
die "configure_cinder_backup_$CINDER_BACKUP_DRIVER doesn't exist in $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER"
fi
fi
if is_service_enabled ceilometer; then
iniset $CINDER_CONF oslo_messaging_notifications driver "messagingv2"
fi
if is_service_enabled tls-proxy; then
if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
# Set the service port for a proxy to take the original
iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT
iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True
fi
fi
if [ "$SYSLOG" != "False" ]; then
iniset $CINDER_CONF DEFAULT use_syslog True
fi
iniset_rpc_backend cinder $CINDER_CONF
# Format logging
setup_logging $CINDER_CONF
if is_service_enabled c-api; then
write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" "" "cinder-api"
fi
if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
configure_cinder_driver
fi
iniset $CINDER_CONF DEFAULT osapi_volume_workers "$API_WORKERS"
iniset $CINDER_CONF DEFAULT glance_api_servers "$GLANCE_URL"
if is_service_enabled tls-proxy; then
iniset $CINDER_CONF DEFAULT glance_protocol https
iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE
fi
# Set nova credentials (used for os-assisted-snapshots)
configure_keystone_authtoken_middleware $CINDER_CONF nova nova
iniset $CINDER_CONF nova region_name "$REGION_NAME"
iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then
iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL"
elif is_service_enabled etcd3; then
# NOTE(jan.gutter): api_version can revert to default once tooz is
# updated with the etcd v3.4 defaults
iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT?api_version=v3"
fi
if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $CINDER_CONF oslo_policy enforce_scope true
iniset $CINDER_CONF oslo_policy enforce_new_defaults true
else
iniset $CINDER_CONF oslo_policy enforce_scope false
iniset $CINDER_CONF oslo_policy enforce_new_defaults false
fi
if [ "$CINDER_USE_SERVICE_TOKEN" == "True" ]; then
init_cinder_service_user_conf
fi
}
# create_cinder_accounts() - Set up common required cinder accounts
# Project User Roles
# ------------------------------------------------------------------
# SERVICE_PROJECT_NAME cinder service
# SERVICE_PROJECT_NAME cinder creator (if Barbican is enabled)
# Migrated from keystone_data.sh
function create_cinder_accounts {
# Cinder
if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
local extra_role=""
# cinder needs the "creator" role in order to interact with barbican
if is_service_enabled barbican; then
extra_role=$(get_or_create_role "creator")
fi
create_service_user "cinder" $extra_role
local cinder_api_url
cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume"
# block-storage is the official service type
get_or_create_service "cinder" "block-storage" "Cinder Volume Service"
get_or_create_endpoint \
"block-storage" \
"$REGION_NAME" \
"$cinder_api_url/v3"
configure_cinder_internal_tenant
fi
}
# init_cinder() - Initialize database and volume group
function init_cinder {
if is_service_enabled $DATABASE_BACKENDS; then
# (Re)create cinder database
recreate_database cinder
time_start "dbsync"
# Migrate cinder database
$CINDER_BIN_DIR/cinder-manage --config-file $CINDER_CONF db sync
time_stop "dbsync"
fi
if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
local be be_name be_type
for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
be_type=${be%%:*}
be_name=${be##*:}
if type init_cinder_backend_${be_type} >/dev/null 2>&1; then
init_cinder_backend_${be_type} ${be_name}
fi
done
fi
if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
if type init_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then
init_cinder_backup_$CINDER_BACKUP_DRIVER
fi
fi
mkdir -p $CINDER_STATE_PATH/volumes
}
# install_cinder() - Collect source and prepare
function install_cinder {
git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH
setup_develop $CINDER_DIR
if [[ "$CINDER_TARGET_HELPER" == "tgtadm" ]]; then
install_package tgt
elif [[ "$CINDER_TARGET_HELPER" == "lioadm" ]]; then
if is_ubuntu; then
# TODO(frickler): Workaround for https://launchpad.net/bugs/1819819
sudo mkdir -p /etc/target
install_package targetcli-fb
else
install_package targetcli
fi
elif [[ "$CINDER_TARGET_HELPER" == "nvmet" ]]; then
install_package nvme-cli
# TODO: Remove manual installation of the dependency when the
# requirement is added to nvmetcli:
# http://lists.infradead.org/pipermail/linux-nvme/2022-July/033576.html
if is_ubuntu; then
install_package python3-configshell-fb
else
install_package python3-configshell
fi
# Install from source because Ubuntu doesn't have the package and some packaged versions didn't work on Python 3
pip_install git+git://git.infradead.org/users/hch/nvmetcli.git
sudo modprobe nvmet
sudo modprobe nvme-fabrics
if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then
install_package rdma-core
sudo modprobe nvme-rdma
# Create the Soft-RoCE device over the networking interface
local iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $CINDER_MY_IP | awk '{print $1}'`}
if [[ -z "$iface" ]]; then
die $LINENO "Cannot find interface to bind Soft-RoCE"
fi
if ! sudo rdma link | grep $iface ; then
sudo rdma link add rxe_$iface type rxe netdev $iface
fi
elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then
sudo modprobe nvme-tcp
else # 'nvmet_fc'
sudo modprobe nvme-fc
fi
fi
}
# install_cinderclient() - Collect source and prepare
function install_cinderclient {
if use_library_from_git "python-brick-cinderclient-ext"; then
git_clone_by_name "python-brick-cinderclient-ext"
setup_dev_lib "python-brick-cinderclient-ext"
fi
if use_library_from_git "python-cinderclient"; then
git_clone_by_name "python-cinderclient"
setup_dev_lib "python-cinderclient"
sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-cinderclient"]}/tools/,/etc/bash_completion.d/}cinder.bash_completion
fi
}
# apply config.d approach for cinder volumes directory
function _configure_tgt_for_config_d {
if [[ ! -d /etc/tgt/stack.d/ ]]; then
sudo ln -sf $CINDER_STATE_PATH/volumes /etc/tgt/stack.d
fi
if ! grep -q "include /etc/tgt/stack.d/*" /etc/tgt/targets.conf; then
echo "include /etc/tgt/stack.d/*" | sudo tee -a /etc/tgt/targets.conf
fi
}
# start_cinder() - Start running processes
function start_cinder {
local service_port=$CINDER_SERVICE_PORT
local service_protocol=$CINDER_SERVICE_PROTOCOL
local cinder_url
if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
if is_service_enabled c-vol; then
# Delete any old stack.conf
sudo rm -f /etc/tgt/conf.d/stack.conf
_configure_tgt_for_config_d
if is_ubuntu; then
sudo service tgt restart
else
restart_service tgtd
fi
# NOTE(gfidente): ensure tgtd is running in debug mode
sudo tgtadm --mode system --op update --name debug --value on
fi
fi
if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF"
cinder_url=$service_protocol://$SERVICE_HOST/volume/v3
fi
echo "Waiting for Cinder API to start..."
if ! wait_for_service $SERVICE_TIMEOUT $cinder_url; then
die $LINENO "c-api did not start"
fi
run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF"
# Tune glibc for Python Services using single malloc arena for all threads
# and disabling dynamic thresholds to reduce memory usage when using native
# threads directly or via eventlet.tpool
# https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html
malloc_tuning="MALLOC_ARENA_MAX=1 MALLOC_MMAP_THRESHOLD_=131072 MALLOC_TRIM_THRESHOLD_=262144"
run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" "" "" "$malloc_tuning"
run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" "" "" "$malloc_tuning"
# NOTE(jdg): For cinder, startup order matters. To ensure that repor_capabilities is received
# by the scheduler start the cinder-volume service last (or restart it) after the scheduler
# has started. This is a quick fix for lp bug/1189595
}
# stop_cinder() - Stop running processes
function stop_cinder {
stop_process c-api
stop_process c-bak
stop_process c-sch
stop_process c-vol
}
function create_one_type {
type_name=$1
property_key=$2
property_value=$3
# NOTE (e0ne): openstack client doesn't work with cinder in noauth mode
if is_service_enabled keystone; then
openstack --os-region-name="$REGION_NAME" volume type create --property $property_key="$property_value" $type_name
else
# TODO (e0ne): use openstack client once it will support cinder in noauth mode:
# https://bugs.launchpad.net/python-cinderclient/+bug/1755279
local cinder_url
cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3
OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create $type_name
OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key $type_name set $property_key="$property_value"
fi
}
# create_volume_types() - Create Cinder's configured volume types
function create_volume_types {
# Create volume types
if is_service_enabled c-api && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
local be be_name
for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
be_name=${be##*:}
create_one_type $be_name "volume_backend_name" $be_name
done
if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then
create_one_type $VOLUME_TYPE_MULTIATTACH $VOLUME_TYPE_MULTIATTACH "<is> True"
fi
# Increase quota for the service project if glance is using cinder,
# since it's likely to occasionally go above the default 10 in parallel
# test execution.
if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then
openstack --os-region-name="$REGION_NAME" \
quota set --volumes 50 "$SERVICE_PROJECT_NAME"
fi
fi
}
# Compatibility for Grenade
function create_cinder_volume_group {
# During a transition period Grenade needs to have this function defined
# It is effectively a no-op in the Grenade 'target' use case
:
}
function configure_cinder_internal_tenant {
# Re-use the Cinder service account for simplicity.
iniset $CINDER_CONF DEFAULT cinder_internal_tenant_project_id $(get_or_create_project $SERVICE_PROJECT_NAME)
iniset $CINDER_CONF DEFAULT cinder_internal_tenant_user_id $(get_or_create_user "cinder")
}
function configure_cinder_image_volume_cache {
# Expect CINDER_CACHE_ENABLED_FOR_BACKENDS to be a list of backends
# similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will
# be the backend specific configuration stanza in cinder.conf.
for be in ${CINDER_CACHE_ENABLED_FOR_BACKENDS//,/ }; do
local be_name=${be##*:}
iniset $CINDER_CONF $be_name image_volume_cache_enabled $CINDER_IMG_CACHE_ENABLED
if [[ -n $CINDER_IMG_CACHE_SIZE_GB ]]; then
iniset $CINDER_CONF $be_name image_volume_cache_max_size_gb $CINDER_IMG_CACHE_SIZE_GB
fi
if [[ -n $CINDER_IMG_CACHE_SIZE_COUNT ]]; then
iniset $CINDER_CONF $be_name image_volume_cache_max_count $CINDER_IMG_CACHE_SIZE_COUNT
fi
done
}
function configure_cinder_volume_upload {
# Expect UPLOAD_VOLUME_OPTIMIZED_FOR_BACKENDS to be a list of backends
# similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will
# be the backend specific configuration stanza in cinder.conf.
local be be_name
for be in ${CINDER_UPLOAD_OPTIMIZED_BACKENDS//,/ }; do
be_name=${be##*:}
iniset $CINDER_CONF $be_name image_upload_use_cinder_backend $CINDER_UPLOAD_OPTIMIZED
iniset $CINDER_CONF $be_name image_upload_use_internal_tenant $CINDER_UPLOAD_INTERNAL_TENANT
done
}
function init_cinder_service_user_conf {
configure_keystone_authtoken_middleware $CINDER_CONF cinder service_user
iniset $CINDER_CONF service_user send_service_user_token True
iniset $CINDER_CONF service_user auth_strategy keystone
}
# Restore xtrace
$_XTRACE_CINDER
# Tell emacs to use shell-script-mode
## Local variables:
## mode: shell-script
## End: