fix whitespace in the rest of lib/*
this brings this in line with bash8 checker Change-Id: Ib34a2292dd5bc259069457461041ec9cd4fd2957
This commit is contained in:
parent
3bdb922c40
commit
101b424842
100
lib/baremetal
100
lib/baremetal
@ -256,19 +256,19 @@ function upload_baremetal_deploy() {
|
||||
|
||||
# load them into glance
|
||||
BM_DEPLOY_KERNEL_ID=$(glance \
|
||||
--os-auth-token $token \
|
||||
--os-image-url http://$GLANCE_HOSTPORT \
|
||||
image-create \
|
||||
--name $BM_DEPLOY_KERNEL \
|
||||
--is-public True --disk-format=aki \
|
||||
< $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2)
|
||||
--os-auth-token $token \
|
||||
--os-image-url http://$GLANCE_HOSTPORT \
|
||||
image-create \
|
||||
--name $BM_DEPLOY_KERNEL \
|
||||
--is-public True --disk-format=aki \
|
||||
< $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2)
|
||||
BM_DEPLOY_RAMDISK_ID=$(glance \
|
||||
--os-auth-token $token \
|
||||
--os-image-url http://$GLANCE_HOSTPORT \
|
||||
image-create \
|
||||
--name $BM_DEPLOY_RAMDISK \
|
||||
--is-public True --disk-format=ari \
|
||||
< $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2)
|
||||
--os-auth-token $token \
|
||||
--os-image-url http://$GLANCE_HOSTPORT \
|
||||
image-create \
|
||||
--name $BM_DEPLOY_RAMDISK \
|
||||
--is-public True --disk-format=ari \
|
||||
< $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2)
|
||||
}
|
||||
|
||||
# create a basic baremetal flavor, associated with deploy kernel & ramdisk
|
||||
@ -278,11 +278,11 @@ function create_baremetal_flavor() {
|
||||
aki=$1
|
||||
ari=$2
|
||||
nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \
|
||||
$BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU
|
||||
$BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU
|
||||
nova flavor-key $BM_FLAVOR_NAME set \
|
||||
"cpu_arch"="$BM_FLAVOR_ARCH" \
|
||||
"baremetal:deploy_kernel_id"="$aki" \
|
||||
"baremetal:deploy_ramdisk_id"="$ari"
|
||||
"cpu_arch"="$BM_FLAVOR_ARCH" \
|
||||
"baremetal:deploy_kernel_id"="$aki" \
|
||||
"baremetal:deploy_ramdisk_id"="$ari"
|
||||
|
||||
}
|
||||
|
||||
@ -311,19 +311,19 @@ function extract_and_upload_k_and_r_from_image() {
|
||||
|
||||
# load them into glance
|
||||
KERNEL_ID=$(glance \
|
||||
--os-auth-token $token \
|
||||
--os-image-url http://$GLANCE_HOSTPORT \
|
||||
image-create \
|
||||
--name $image_name-kernel \
|
||||
--is-public True --disk-format=aki \
|
||||
< $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2)
|
||||
--os-auth-token $token \
|
||||
--os-image-url http://$GLANCE_HOSTPORT \
|
||||
image-create \
|
||||
--name $image_name-kernel \
|
||||
--is-public True --disk-format=aki \
|
||||
< $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2)
|
||||
RAMDISK_ID=$(glance \
|
||||
--os-auth-token $token \
|
||||
--os-image-url http://$GLANCE_HOSTPORT \
|
||||
image-create \
|
||||
--name $image_name-initrd \
|
||||
--is-public True --disk-format=ari \
|
||||
< $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2)
|
||||
--os-auth-token $token \
|
||||
--os-image-url http://$GLANCE_HOSTPORT \
|
||||
image-create \
|
||||
--name $image_name-initrd \
|
||||
--is-public True --disk-format=ari \
|
||||
< $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2)
|
||||
}
|
||||
|
||||
|
||||
@ -365,11 +365,11 @@ function upload_baremetal_image() {
|
||||
mkdir "$xdir"
|
||||
tar -zxf $FILES/$IMAGE_FNAME -C "$xdir"
|
||||
KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do
|
||||
[ -f "$f" ] && echo "$f" && break; done; true)
|
||||
[ -f "$f" ] && echo "$f" && break; done; true)
|
||||
RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do
|
||||
[ -f "$f" ] && echo "$f" && break; done; true)
|
||||
[ -f "$f" ] && echo "$f" && break; done; true)
|
||||
IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do
|
||||
[ -f "$f" ] && echo "$f" && break; done; true)
|
||||
[ -f "$f" ] && echo "$f" && break; done; true)
|
||||
if [[ -z "$IMAGE_NAME" ]]; then
|
||||
IMAGE_NAME=$(basename "$IMAGE" ".img")
|
||||
fi
|
||||
@ -403,19 +403,19 @@ function upload_baremetal_image() {
|
||||
--container-format ari \
|
||||
--disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
|
||||
else
|
||||
# TODO(deva): add support for other image types
|
||||
return
|
||||
# TODO(deva): add support for other image types
|
||||
return
|
||||
fi
|
||||
|
||||
glance \
|
||||
--os-auth-token $token \
|
||||
--os-image-url http://$GLANCE_HOSTPORT \
|
||||
image-create \
|
||||
--name "${IMAGE_NAME%.img}" --is-public True \
|
||||
--container-format $CONTAINER_FORMAT \
|
||||
--disk-format $DISK_FORMAT \
|
||||
${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
|
||||
${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
|
||||
--os-auth-token $token \
|
||||
--os-image-url http://$GLANCE_HOSTPORT \
|
||||
image-create \
|
||||
--name "${IMAGE_NAME%.img}" --is-public True \
|
||||
--container-format $CONTAINER_FORMAT \
|
||||
--disk-format $DISK_FORMAT \
|
||||
${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
|
||||
${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
|
||||
|
||||
# override DEFAULT_IMAGE_NAME so that tempest can find the image
|
||||
# that we just uploaded in glance
|
||||
@ -439,15 +439,15 @@ function add_baremetal_node() {
|
||||
mac_2=${2:-$BM_SECOND_MAC}
|
||||
|
||||
id=$(nova baremetal-node-create \
|
||||
--pm_address="$BM_PM_ADDR" \
|
||||
--pm_user="$BM_PM_USER" \
|
||||
--pm_password="$BM_PM_PASS" \
|
||||
"$BM_HOSTNAME" \
|
||||
"$BM_FLAVOR_CPU" \
|
||||
"$BM_FLAVOR_RAM" \
|
||||
"$BM_FLAVOR_ROOT_DISK" \
|
||||
"$mac_1" \
|
||||
| grep ' id ' | get_field 2 )
|
||||
--pm_address="$BM_PM_ADDR" \
|
||||
--pm_user="$BM_PM_USER" \
|
||||
--pm_password="$BM_PM_PASS" \
|
||||
"$BM_HOSTNAME" \
|
||||
"$BM_FLAVOR_CPU" \
|
||||
"$BM_FLAVOR_RAM" \
|
||||
"$BM_FLAVOR_ROOT_DISK" \
|
||||
"$mac_1" \
|
||||
| grep ' id ' | get_field 2 )
|
||||
[ $? -eq 0 ] || [ "$id" ] || die $LINENO "Error adding baremetal node"
|
||||
if [ -n "$mac_2" ]; then
|
||||
id2=$(nova baremetal-interface-add "$id" "$mac_2" )
|
||||
|
@ -194,7 +194,7 @@ function start_glance() {
|
||||
screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
|
||||
echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
|
||||
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
|
||||
die $LINENO "g-api did not start"
|
||||
die $LINENO "g-api did not start"
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -203,7 +203,7 @@ function start_ironic_api() {
|
||||
screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE"
|
||||
echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..."
|
||||
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then
|
||||
die $LINENO "ir-api did not start"
|
||||
die $LINENO "ir-api did not start"
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -373,7 +373,7 @@ function start_keystone() {
|
||||
|
||||
echo "Waiting for keystone to start..."
|
||||
if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
|
||||
die $LINENO "keystone did not start"
|
||||
die $LINENO "keystone did not start"
|
||||
fi
|
||||
|
||||
# Start proxies if enabled
|
||||
|
4
lib/neutron_thirdparty/trema
vendored
4
lib/neutron_thirdparty/trema
vendored
@ -66,8 +66,8 @@ function init_trema() {
|
||||
|
||||
cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG
|
||||
sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \
|
||||
-e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \
|
||||
$TREMA_SS_CONFIG
|
||||
-e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \
|
||||
$TREMA_SS_CONFIG
|
||||
}
|
||||
|
||||
function gem_install() {
|
||||
|
41
lib/nova
41
lib/nova
@ -465,27 +465,27 @@ function create_nova_conf() {
|
||||
fi
|
||||
|
||||
if is_service_enabled n-novnc || is_service_enabled n-xvnc; then
|
||||
# Address on which instance vncservers will listen on compute hosts.
|
||||
# For multi-host, this should be the management ip of the compute host.
|
||||
VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
|
||||
VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
|
||||
iniset $NOVA_CONF DEFAULT vnc_enabled true
|
||||
iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
|
||||
iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
|
||||
# Address on which instance vncservers will listen on compute hosts.
|
||||
# For multi-host, this should be the management ip of the compute host.
|
||||
VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
|
||||
VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
|
||||
iniset $NOVA_CONF DEFAULT vnc_enabled true
|
||||
iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
|
||||
iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
|
||||
else
|
||||
iniset $NOVA_CONF DEFAULT vnc_enabled false
|
||||
iniset $NOVA_CONF DEFAULT vnc_enabled false
|
||||
fi
|
||||
|
||||
if is_service_enabled n-spice; then
|
||||
# Address on which instance spiceservers will listen on compute hosts.
|
||||
# For multi-host, this should be the management ip of the compute host.
|
||||
SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
|
||||
SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
|
||||
iniset $NOVA_CONF spice enabled true
|
||||
iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
|
||||
iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
|
||||
# Address on which instance spiceservers will listen on compute hosts.
|
||||
# For multi-host, this should be the management ip of the compute host.
|
||||
SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
|
||||
SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
|
||||
iniset $NOVA_CONF spice enabled true
|
||||
iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
|
||||
iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
|
||||
else
|
||||
iniset $NOVA_CONF spice enabled false
|
||||
iniset $NOVA_CONF spice enabled false
|
||||
fi
|
||||
|
||||
iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
|
||||
@ -602,7 +602,7 @@ function start_nova_api() {
|
||||
screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
|
||||
echo "Waiting for nova-api to start..."
|
||||
if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then
|
||||
die $LINENO "nova-api did not start"
|
||||
die $LINENO "nova-api did not start"
|
||||
fi
|
||||
|
||||
# Start proxies if enabled
|
||||
@ -620,10 +620,9 @@ function start_nova_compute() {
|
||||
# Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
|
||||
screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'"
|
||||
elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
|
||||
for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`
|
||||
do
|
||||
screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM"
|
||||
done
|
||||
for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
|
||||
screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM"
|
||||
done
|
||||
else
|
||||
if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
|
||||
start_nova_hypervisor
|
||||
|
@ -61,8 +61,8 @@ function configure_nova_hypervisor() {
|
||||
|
||||
# Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``.
|
||||
for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do
|
||||
# Attempt to convert flags to options
|
||||
iniset $NOVA_CONF baremetal ${I/=/ }
|
||||
# Attempt to convert flags to options
|
||||
iniset $NOVA_CONF baremetal ${I/=/ }
|
||||
done
|
||||
}
|
||||
|
||||
|
@ -82,10 +82,10 @@ EOF"
|
||||
sudo mkdir -p $rules_dir
|
||||
sudo bash -c "cat <<EOF > $rules_dir/50-libvirt-$STACK_USER.rules
|
||||
polkit.addRule(function(action, subject) {
|
||||
if (action.id == 'org.libvirt.unix.manage' &&
|
||||
subject.user == '"$STACK_USER"') {
|
||||
return polkit.Result.YES;
|
||||
}
|
||||
if (action.id == 'org.libvirt.unix.manage' &&
|
||||
subject.user == '"$STACK_USER"') {
|
||||
return polkit.Result.YES;
|
||||
}
|
||||
});
|
||||
EOF"
|
||||
unset rules_dir
|
||||
|
@ -102,9 +102,9 @@ function install_rpc_backend() {
|
||||
if is_fedora; then
|
||||
install_package qpid-cpp-server
|
||||
if [[ $DISTRO =~ (rhel6) ]]; then
|
||||
# RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to
|
||||
# be no or you get GSS authentication errors as it
|
||||
# attempts to default to this.
|
||||
# RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to
|
||||
# be no or you get GSS authentication errors as it
|
||||
# attempts to default to this.
|
||||
sudo sed -i.bak 's/^auth=yes$/auth=no/' /etc/qpidd.conf
|
||||
fi
|
||||
elif is_ubuntu; then
|
||||
|
64
lib/swift
64
lib/swift
@ -104,17 +104,17 @@ ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012}
|
||||
|
||||
# cleanup_swift() - Remove residual data files
|
||||
function cleanup_swift() {
|
||||
rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
|
||||
if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
|
||||
sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
|
||||
fi
|
||||
if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
|
||||
rm ${SWIFT_DISK_IMAGE}
|
||||
fi
|
||||
rm -rf ${SWIFT_DATA_DIR}/run/
|
||||
if is_apache_enabled_service swift; then
|
||||
_cleanup_swift_apache_wsgi
|
||||
fi
|
||||
rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
|
||||
if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
|
||||
sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
|
||||
fi
|
||||
if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
|
||||
rm ${SWIFT_DISK_IMAGE}
|
||||
fi
|
||||
rm -rf ${SWIFT_DATA_DIR}/run/
|
||||
if is_apache_enabled_service swift; then
|
||||
_cleanup_swift_apache_wsgi
|
||||
fi
|
||||
}
|
||||
|
||||
# _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
|
||||
@ -192,7 +192,7 @@ function _config_swift_apache_wsgi() {
|
||||
|
||||
sudo cp ${SWIFT_DIR}/examples/apache2/account-server.template ${apache_vhost_dir}/account-server-${node_number}
|
||||
sudo sed -e "
|
||||
/^#/d;/^$/d;
|
||||
/^#/d;/^$/d;
|
||||
s/%PORT%/$account_port/g;
|
||||
s/%SERVICENAME%/account-server-${node_number}/g;
|
||||
s/%APACHE_NAME%/${APACHE_NAME}/g;
|
||||
@ -202,7 +202,7 @@ function _config_swift_apache_wsgi() {
|
||||
|
||||
sudo cp ${SWIFT_DIR}/examples/wsgi/account-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi
|
||||
sudo sed -e "
|
||||
/^#/d;/^$/d;
|
||||
/^#/d;/^$/d;
|
||||
s/%SERVICECONF%/account-server\/${node_number}.conf/g;
|
||||
" -i ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi
|
||||
done
|
||||
@ -577,26 +577,26 @@ function start_swift() {
|
||||
return 0
|
||||
fi
|
||||
|
||||
# By default with only one replica we are launching the proxy,
|
||||
# container, account and object server in screen in foreground and
|
||||
# other services in background. If we have SWIFT_REPLICAS set to something
|
||||
# greater than one we first spawn all the swift services then kill the proxy
|
||||
# service so we can run it in foreground in screen. ``swift-init ...
|
||||
# {stop|restart}`` exits with '1' if no servers are running, ignore it just
|
||||
# in case
|
||||
swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
|
||||
if [[ ${SWIFT_REPLICAS} == 1 ]]; then
|
||||
# By default with only one replica we are launching the proxy,
|
||||
# container, account and object server in screen in foreground and
|
||||
# other services in background. If we have SWIFT_REPLICAS set to something
|
||||
# greater than one we first spawn all the swift services then kill the proxy
|
||||
# service so we can run it in foreground in screen. ``swift-init ...
|
||||
# {stop|restart}`` exits with '1' if no servers are running, ignore it just
|
||||
# in case
|
||||
swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
|
||||
if [[ ${SWIFT_REPLICAS} == 1 ]]; then
|
||||
todo="object container account"
|
||||
fi
|
||||
for type in proxy ${todo}; do
|
||||
swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
|
||||
done
|
||||
screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
|
||||
if [[ ${SWIFT_REPLICAS} == 1 ]]; then
|
||||
for type in object container account; do
|
||||
screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
for type in proxy ${todo}; do
|
||||
swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
|
||||
done
|
||||
screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
|
||||
if [[ ${SWIFT_REPLICAS} == 1 ]]; then
|
||||
for type in object container account; do
|
||||
screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
# stop_swift() - Stop running processes (non-screen)
|
||||
|
20
lib/tempest
20
lib/tempest
@ -193,7 +193,7 @@ function configure_tempest() {
|
||||
# If namespaces are disabled, devstack will create a single
|
||||
# public router that tempest should be configured to use.
|
||||
public_router_id=$(neutron router-list | awk "/ $Q_ROUTER_NAME / \
|
||||
{ print \$2 }")
|
||||
{ print \$2 }")
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -328,15 +328,15 @@ function init_tempest() {
|
||||
local disk_image="$image_dir/${base_image_name}-blank.img"
|
||||
# if the cirros uec downloaded and the system is uec capable
|
||||
if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a "$VIRT_DRIVER" != "openvz" \
|
||||
-a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then
|
||||
echo "Prepare aki/ari/ami Images"
|
||||
( #new namespace
|
||||
# tenant:demo ; user: demo
|
||||
source $TOP_DIR/accrc/demo/demo
|
||||
euca-bundle-image -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH"
|
||||
euca-bundle-image -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH"
|
||||
euca-bundle-image -i "$disk_image" -d "$BOTO_MATERIALS_PATH"
|
||||
) 2>&1 </dev/null | cat
|
||||
-a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then
|
||||
echo "Prepare aki/ari/ami Images"
|
||||
( #new namespace
|
||||
# tenant:demo ; user: demo
|
||||
source $TOP_DIR/accrc/demo/demo
|
||||
euca-bundle-image -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH"
|
||||
euca-bundle-image -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH"
|
||||
euca-bundle-image -i "$disk_image" -d "$BOTO_MATERIALS_PATH"
|
||||
) 2>&1 </dev/null | cat
|
||||
else
|
||||
echo "Boto materials are not prepared"
|
||||
fi
|
||||
|
15
lib/trove
15
lib/trove
@ -45,14 +45,15 @@ create_trove_accounts() {
|
||||
SERVICE_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
|
||||
|
||||
if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then
|
||||
TROVE_USER=$(keystone user-create --name=trove \
|
||||
--pass="$SERVICE_PASSWORD" \
|
||||
--tenant_id $SERVICE_TENANT \
|
||||
--email=trove@example.com \
|
||||
| grep " id " | get_field 2)
|
||||
TROVE_USER=$(keystone user-create \
|
||||
--name=trove \
|
||||
--pass="$SERVICE_PASSWORD" \
|
||||
--tenant_id $SERVICE_TENANT \
|
||||
--email=trove@example.com \
|
||||
| grep " id " | get_field 2)
|
||||
keystone user-role-add --tenant-id $SERVICE_TENANT \
|
||||
--user-id $TROVE_USER \
|
||||
--role-id $SERVICE_ROLE
|
||||
--user-id $TROVE_USER \
|
||||
--role-id $SERVICE_ROLE
|
||||
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
|
||||
TROVE_SERVICE=$(keystone service-create \
|
||||
--name=trove \
|
||||
|
Loading…
Reference in New Issue
Block a user