Adding Grenade support for Sahara

Grenade is an OpenStack test harness to exercise
the upgrade process between releases. This patch
adds ability of Sahara upgrade testing.

Change-Id: Iacc1225d92aad3aee4e6cda6be4d9691be49d927
This commit is contained in:
Andrey Pavlov 2015-06-18 15:26:17 +03:00
parent f1c12d9098
commit 02a0f99d02
8 changed files with 381 additions and 0 deletions

View File

@ -0,0 +1,8 @@
{
"name": "cluster-grenade",
"plugin_name": "fake",
"hadoop_version": "0.1",
"cluster_template_id" : "CLUSTER_TEMPLATE_ID",
"user_keypair_id": "KEYPAIR",
"default_image_id": "IMAGE_ID"
}

View File

@ -0,0 +1,17 @@
{
"name": "cluster-template-grenade",
"plugin_name": "fake",
"hadoop_version": "0.1",
"node_groups": [
{
"name": "master",
"node_group_template_id": "NG_MASTER_ID",
"count": 1
},
{
"name": "worker",
"node_group_template_id": "NG_WORKER_ID",
"count": 1
}
]
}

View File

@ -0,0 +1,10 @@
{
"name": "ng-master-grenade",
"flavor_id": "FLAVOR_ID",
"plugin_name": "fake",
"hadoop_version": "0.1",
"node_processes": ["jobtracker", "namenode"],
"auto_security_group": false,
"security_groups": ["SEC_GROUP"],
"floating_ip_pool": "FLOATING_IP_POOL"
}

View File

@ -0,0 +1,10 @@
{
"name": "ng-worker-grenade",
"flavor_id": "FLAVOR_ID",
"plugin_name": "fake",
"hadoop_version": "0.1",
"node_processes": ["tasktracker", "datanode"],
"auto_security_group": false,
"security_groups": ["SEC_GROUP"],
"floating_ip_pool": "FLOATING_IP_POOL"
}

231
devstack/upgrade/resources.sh Executable file
View File

@ -0,0 +1,231 @@
#!/bin/bash
set -o errexit
source $GRENADE_DIR/grenaderc
source $GRENADE_DIR/functions
source $TOP_DIR/openrc admin admin
set -o xtrace
SAHARA_USER=sahara_grenade
SAHARA_PROJECT=sahara_grenade
SAHARA_PASS=pass
SAHARA_SERVER=sahara_server1
SAHARA_KEY=sahara_key
SAHARA_KEY_FILE=$SAVE_DIR/sahara_key.pem
JSON_PATH=`dirname $BASH_SOURCE`
DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.small}
PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-public}
PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-private}
# cirros image is not appropriate for cluster creation
SAHARA_IMAGE_NAME=${SAHARA_IMAGE_NAME:-fedora-20.x86_64}
SAHARA_IMAGE_USER=${SAHARA_IMAGE_USER:-fedora}
function _sahara_set_user {
OS_TENANT_NAME=$SAHARA_PROJECT
OS_USERNAME=$SAHARA_USER
OS_PASSWORD=$SAHARA_PASS
}
function register_image {
eval $(openstack image show -f shell -c id $SAHARA_IMAGE_NAME)
resource_save sahara image_id $id
sahara image-register --id $id --username $SAHARA_IMAGE_USER
sahara image-add-tag --id $id --tag fake
sahara image-add-tag --id $id --tag 0.1
echo $id
}
# args: <template> <floating_ip_pool> <security group>
function create_node_group_template {
local tmp_file=$(mktemp)
local floating_pool=$2
if is_service_enabled neutron; then
eval $(openstack network show $2 -f shell -c id)
local floating_pool=$id
fi
eval $(openstack flavor show $DEFAULT_INSTANCE_TYPE -f shell -c id)
local flavor_id=$id
sed -e "s/FLAVOR_ID/$flavor_id/g" -e "s/FLOATING_IP_POOL/$floating_pool/g" -e "s/SEC_GROUP/$3/g" $1 > $tmp_file
local template_id=$(sahara node-group-template-create --json "$tmp_file" | awk '$2 ~ /^id/ { print $4 }')
echo $template_id
}
# args: <template> <node_group_1_id> <node_group_2_id>
function create_cluster_template {
local tmp_file=$(mktemp)
sed -e "s/NG_MASTER_ID/$2/g" -e "s/NG_WORKER_ID/$3/g" $1 > $tmp_file
local cluster_template_id=$(sahara cluster-template-create --json $tmp_file | awk '$2 ~ /^id/ { print $4 }')
echo $cluster_template_id
}
# args: <template> <cluster_template_id> <keypair> <image_id>
function create_cluster {
local tmp_file=$(mktemp)
sed -e "s/CLUSTER_TEMPLATE_ID/$2/g" -e "s/KEYPAIR/$3/g" -e "s/IMAGE_ID/$4/g" $1 > $tmp_file
# adding neutron management network id if neutron is enabled
local net_id=$(resource_get network net_id)
if [[ -n "$net_id" ]]; then
sed -i '8i ,"neutron_management_network": "NET_ID"' $tmp_file
sed -i -e "s/NET_ID/$net_id/g" $tmp_file
fi
local cluster_id=$(sahara cluster-create --json $tmp_file | awk '$2 ~ /^id/ { print $4 }')
echo $cluster_id
}
function create {
# create a tenant for the server
eval $(openstack project create -f shell -c id $SAHARA_PROJECT)
if [[ -z "$id" ]]; then
die $LINENO "Didn't create $SAHARA_PROJECT project"
fi
resource_save sahara project_id $id
# create the user, and set $id locally
eval $(openstack user create $SAHARA_USER \
--project $id \
--password $SAHARA_PASS \
-f shell -c id)
if [[ -z "$id" ]]; then
die $LINENO "Didn't create $SAHARA_USER user"
fi
resource_save sahara user_id $id
# register image
image_id=$(register_image)
# set ourselves to the created sahara user
_sahara_set_user
# create security group
nova secgroup-create $SAHARA_USER "Sahara security group"
nova secgroup-add-rule $SAHARA_USER tcp 22 22 0.0.0.0/0
# create key pair for access
openstack keypair create $SAHARA_KEY > $SAHARA_KEY_FILE
chmod 600 $SAHARA_KEY_FILE
# create node group templates
ng_worker_id=$(create_node_group_template $JSON_PATH/ng-worker.json $PUBLIC_NETWORK_NAME $SAHARA_USER)
ng_master_id=$(create_node_group_template $JSON_PATH/ng-master.json $PUBLIC_NETWORK_NAME $SAHARA_USER)
resource_save sahara ng_worker_id $ng_worker_id
resource_save sahara ng_master_id $ng_master_id
# create cluster template
cluster_template_id=$(create_cluster_template $JSON_PATH/cluster-template.json $ng_master_id $ng_worker_id)
resource_save sahara cluster_template_id $cluster_template_id
# create cluster
cluster_id=$(create_cluster $JSON_PATH/cluster-create.json $cluster_template_id $SAHARA_KEY $image_id)
resource_save sahara cluster_id $cluster_id
# wait until cluster moves to active state
local timeleft=300
while [[ $timeleft -gt 0 ]]; do
local cluster_state=$(sahara cluster-show --id $cluster_id | awk '$2 ~ /^status/ { print $4;exit }')
if [[ "$cluster_state" != "Active" ]]; then
echo "Cluster is still not in Active state"
sleep 10
timeleft=$((timeleft - 10))
if [[ $timeleft == 0 ]]; then
die $LINENO "Cluster hasn't moved to Active state during 300 seconds"
fi
else
break
fi
done
}
function verify {
_sahara_set_user
# check that cluster is in Active state
local cluster_id=$(resource_get sahara cluster_id)
local cluster_state=$(sahara cluster-show --id $cluster_id | awk '$2 ~ /^status/ { print $4;exit }')
echo -n $cluster_state
if [[ "$cluster_state" != "Active" ]]; then
die $LINENO "Cluster is not in Active state anymore"
fi
echo "Sahara verification: SUCCESS"
}
function verify_noapi {
:
}
function destroy {
_sahara_set_user
set +o errexit
# delete cluster
local cluster_id=$(resource_get sahara cluster_id)
sahara cluster-delete --id $cluster_id > /dev/null
# wait for cluster deletion
local timeleft=100
while [[ $timeleft -gt 0 ]]; do
sahara cluster-show --id $cluster_id > /dev/null
local rc=$?
if [[ "$rc" != 1 ]]; then
echo "Cluster still exists"
sleep 5
timeleft=$((timeleft - 5))
if [[ $timeleft == 0 ]]; then
die $LINENO "Cluster hasn't been deleted during 100 seconds"
fi
else
break
fi
done
set -o errexit
# delete cluster template
local cluster_template_id=$(resource_get sahara cluster_template_id)
sahara cluster-template-delete --id $cluster_template_id
# delete node group templates
local ng_master_id=$(resource_get sahara ng_master_id)
local ng_worker_id=$(resource_get sahara ng_worker_id)
sahara node-group-template-delete --id $ng_master_id
sahara node-group-template-delete --id $ng_worker_id
nova secgroup-delete $SAHARA_USER
source_quiet $TOP_DIR/openrc admin admin
# unregister image
local image_id=$(resource_get sahara image_id)
sahara image-unregister --id $image_id
# delete user and project
local user_id=$(resource_get sahara user_id)
local project_id=$(resource_get sahara project_id)
openstack user delete $user_id
openstack project delete $project_id
}
# Dispatcher
case $1 in
"create")
create
;;
"verify_noapi")
verify_noapi
;;
"verify")
verify
;;
"destroy")
destroy
;;
"force_destroy")
set +o errexit
destroy
;;
esac

13
devstack/upgrade/settings Normal file
View File

@ -0,0 +1,13 @@
register_project_for_upgrade sahara
register_db_to_save sahara
devstack_localrc base disable_service n-net
devstack_localrc base enable_service h-api h-api-cfn h-api-cw h-eng heat sahara q-agt q-dhcp q-l3 q-meta q-svc neutron tempest ceilometer-alarm-evaluator ceilometer-alarm-notifier ceilometer-anotification
devstack_localrc base IMAGE_URLS="http://download.cirros-cloud.net/0.3.2/cirros-0.3.2-x86_64-uec.tar.gz,http://cloud.fedoraproject.org/fedora-20.x86_64.qcow2"
devstack_localrc base LIBS_FROM_GIT=python-saharaclient
devstack_localrc base DEFAULT_IMAGE_NAME="cirros-0.3.2-x86_64-uec"
devstack_localrc target disable_service n-net
devstack_localrc target enable_service h-api h-api-cfn h-api-cw h-eng heat q-agt q-dhcp q-l3 q-meta q-svc neutron tempest ceilometer-alarm-evaluator ceilometer-alarm-notifier ceilometer-anotification
devstack_localrc target IMAGE_URLS="http://download.cirros-cloud.net/0.3.2/cirros-0.3.2-x86_64-uec.tar.gz,http://cloud.fedoraproject.org/fedora-20.x86_64.qcow2"
devstack_localrc target enable_plugin sahara git://git.openstack.org/openstack/sahara
devstack_localrc target LIBS_FROM_GIT=python-saharaclient
devstack_localrc target DEFAULT_IMAGE_NAME="cirros-0.3.2-x86_64-uec"

21
devstack/upgrade/shutdown.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/bash
# ``shutdown-sahara``
set -o errexit
source $GRENADE_DIR/grenaderc
source $GRENADE_DIR/functions
# We need base DevStack functions for this
source $BASE_DEVSTACK_DIR/functions
source $BASE_DEVSTACK_DIR/stackrc # needed for status directory
source $BASE_DEVSTACK_DIR/lib/tls
source $BASE_DEVSTACK_DIR/lib/sahara
set -o xtrace
stop_sahara
# sanity check that service is actually down
ensure_services_stopped sahara

71
devstack/upgrade/upgrade.sh Executable file
View File

@ -0,0 +1,71 @@
#!/usr/bin/env bash
# ``upgrade-sahara``
echo "*********************************************************************"
echo "Begin $0"
echo "*********************************************************************"
# Clean up any resources that may be in use
cleanup() {
set +o errexit
echo "*********************************************************************"
echo "ERROR: Abort $0"
echo "*********************************************************************"
# Kill ourselves to signal any calling process
trap 2; kill -2 $$
}
trap cleanup SIGHUP SIGINT SIGTERM
# Keep track of the grenade directory
RUN_DIR=$(cd $(dirname "$0") && pwd)
# Source params
source $GRENADE_DIR/grenaderc
# Import common functions
source $GRENADE_DIR/functions
# This script exits on an error so that errors don't compound and you see
# only the first error that occurred.
set -o errexit
# Upgrade Sahara
# ============
# Get functions from current DevStack
source $TARGET_DEVSTACK_DIR/stackrc
source $TARGET_DEVSTACK_DIR/lib/tls
source $(dirname $(dirname $BASH_SOURCE))/plugin.sh
source $(dirname $(dirname $BASH_SOURCE))/settings
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Save current config files for posterity
[[ -d $SAVE_DIR/etc.sahara ]] || cp -pr $SAHARA_CONF_DIR $SAVE_DIR/etc.sahara
# install_sahara()
stack_install_service sahara
install_python_saharaclient
# calls upgrade-sahara for specific release
upgrade_project sahara $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH
# Migrate the database
$SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head || die $LINENO "DB sync error"
# Start Sahara
start_sahara
# Don't succeed unless the service come up
ensure_services_started sahara
set +o xtrace
echo "*********************************************************************"
echo "SUCCESS: End $0"
echo "*********************************************************************"