Rally Test Framework Integration

* Integration of Rally framework in Cue project's and vagrant/devstack
* Added initial cluster create and delete scenario test
* Implemented initial scenario base class which inherits from rally's
  base.scenario and provides basic Cue CRUD interfacing
* To execute all Rally scenarios (currently one) in vagrant vm:
  1.  source admin user/tenant credentials for devstack environment
    > openrc admin admin
  2.  run script: run_rally_scenarios.sh (will run in verbose debug)
    > run_rally_scenarios.sh

Change-Id: I4e0015b69d9235d46fc785aa016c6d6f0e0caeb2
This commit is contained in:
dagnello 2015-04-10 11:07:48 -07:00 committed by Min Pae
parent ab72de78ce
commit 45fb8e8f42
8 changed files with 317 additions and 0 deletions

View File

@ -238,6 +238,8 @@ function init_cue {
set_broker
configure_scenario_rally_tests
build_cue_rabbit_test_image
}
@ -269,6 +271,21 @@ function install_cuedashboard {
fi
}
# configure Cue Scenario Rally tests
function configure_scenario_rally_tests {
if ! [ -d $HOME/.rally/plugins ]; then
mkdir -p $HOME/.rally/plugins/cue_scenarios
SCENARIOS=$(find $DEST/cue/tests/rally-scenarios/plugins -type f -name "*.py")
for SCENARIO in $SCENARIOS
do
FILE_NAME=$(echo $SCENARIO | rev | cut -d/ -f1 | rev)
ln -s $SCENARIO $HOME/.rally/plugins/cue_scenarios/$FILE_NAME
done
fi
}
# start_cue - Start running processes, including screen
function start_cue {
run_process cue-api "$CUE_BIN_DIR/cue-api --config-file $CUE_CONF"

View File

@ -9,6 +9,7 @@ CONFIG = File.join(File.dirname(__FILE__), "vagrant_config.rb")
UBUNTU_COMMON = File.join(File.dirname(__FILE__), "lib/ubuntu.rb")
FEDORA_COMMON = File.join(File.dirname(__FILE__), "lib/fedora.rb")
DEVSTACK_SCRIPT = File.join(File.dirname(__FILE__), "lib/devstack_script.rb")
RALLY_SCRIPT = File.join(File.dirname(__FILE__), "lib/rally_script.rb")
GITCONFIG = `cat $HOME/.gitconfig`
@ -20,6 +21,7 @@ $forwarded_port = {}
$install_devstack = false
$install_build_deps = true
$install_tmate = false
$install_rally = true
$ubuntu_box = "sputnik13/trusty64"
$vm_memory = 4096
$vm_cpus = 2
@ -31,6 +33,7 @@ end
require UBUNTU_COMMON
require FEDORA_COMMON
require DEVSTACK_SCRIPT
require RALLY_SCRIPT
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
$forwarded_port.each do |guest_port, host_port|
@ -79,6 +82,11 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.provision :shell, :privileged => false,
:inline => $devstack_script
if $install_rally
config.vm.provision :shell, :privileged => false,
:inline => $rally_script
end
if $install_devstack
config.vm.provision :shell, :privileged => false,
:inline => "pushd $HOME/devstack; ./stack.sh"

View File

@ -0,0 +1,22 @@
# Rally init script
$rally_script = <<SCRIPT
#!/bin/bash
set -e
DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update || sudo yum update -qy
DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo yum install -qy git
pushd ~
test -d devstack || git clone https://git.openstack.org/openstack-dev/devstack
test -d rally || git clone https://github.com/openstack/rally
cp rally/contrib/devstack/lib/rally devstack/lib/
cp rally/contrib/devstack/extras.d/70-rally.sh devstack/extras.d/
cd devstack
echo "enable_service rally" >> local.conf
cat << EOF >> /home/vagrant/.bash_aliases
alias run_rally_cue_scenarios="rally -v --debug task start --task ~/cue/tests/rally-scenarios/cue-scenarios.yaml"
EOF
SCRIPT

View File

@ -34,3 +34,6 @@ $vm_memory = 4096
# Set the number of CPU cores configured for the VM
$vm_cpus = 2
# Install rally in the vm
$install_rally = true

View File

@ -0,0 +1,17 @@
---
CueClusters.create_and_delete_cluster:
-
args:
size: 3
timeout: 600
runner:
type: "constant"
times: 6
concurrency: 2
context:
users:
tenants: 1
users_per_tenant: 1
sla:
failure_rate:
max: 0

View File

View File

@ -0,0 +1,73 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cue_utils
from rally.benchmark.scenarios import base
from rally.common import log as logging
LOG = logging.getLogger(__name__)
class CueClusters(cue_utils.CueScenario):
"""Benchmark Rally scenarios for Cue."""
@base.scenario()
def create_and_delete_cluster(self, name=None, flavor="8795",
size=1, network_id=None, volume_size=0,
timeout=300, check_interval=1, min_sleep=0,
max_sleep=0):
"""Creates a new cue cluster.
1. If Network_id is not provided, network id will be retrieved from
the private network.
2. Submit request to create cluster
3. Wait until cluster goes ACTIVE
3.1 If timeout, cluster is deleted
4. Submit request to delete cluster
5. Wait until cluster is deleted
:param name: str, cluster name
:param flavor: int, flavor ID for VM instance(s)
:param size: int, size of cluster in number of VMs
:param network_id: UUID, user's network to connect VMs to
:param volume_size: int, volume size for VM instance(s)
:returns: new cue cluster details
"""
# Retrieve appropriate network id if not provided
if not network_id:
networks = self.admin_clients("neutron").list_networks()
networks = networks['networks']
for network in networks:
if network['name'] == "private":
network_id = network['id']
break
cluster_dict = {'name': name,
'flavor': flavor,
'size': size,
'network_id': network_id,
'volume_size': volume_size}
# Submit request to create cluster and wait for ACTIVE status
cluster = self._create_cluster(**cluster_dict)
cluster_active = self._wait_for_status_change(cluster['id'], 'ACTIVE',
timeout, check_interval)
assert self._verify_cluster(cluster_dict, cluster_active), (
"Invalid Created Cluster")
self.sleep_between(min_sleep, max_sleep)
# Submit request to delete cluster and wait for cluster delete
self._delete_cluster(cluster['id'])
self._wait_for_cluster_delete(cluster['id'], timeout, check_interval)

View File

@ -0,0 +1,177 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import exceptions
import os
import time
from cueclient.v1 import client
from keystoneclient.auth.identity import v2 as ks_v2
import keystoneclient.openstack.common.apiclient.exceptions as ks_exceptions
from keystoneclient import session as ks_session
from rally.benchmark.scenarios import base
from rally.common import log as logging
LOG = logging.getLogger(__name__)
class CueScenario(base.Scenario):
"""Base class for Cue scenarios with basic atomic actions."""
@base.atomic_action_timer("cue.clusters.list")
def _list_clusters(self, cueclient=None):
"""Returns user clusters list."""
cue_client = cueclient or self._get_cue_client()
return cue_client.clusters.list()
@base.atomic_action_timer("cue.clusters.create")
def _create_cluster(self, name, flavor, size, network_id,
volume_size=0, cueclient=None):
"""Submit request to create cue cluster.
Will return immediate response from Cue, does not wait until "ACTIVE"
state.
:param name: str, cluster name
:param flavor: int, flavor ID for VM instance(s)
:param size: int, size of cluster in number of VMs
:param network_id: UUID, user's network to connect VMs to
:param volume_size: int, volume size for VM instance(s)
:returns: new cue cluster details
"""
cluster_name = name or self._generate_random_name('rally_cue_cluster_')
cue_client = cueclient or self._get_cue_client()
return cue_client.clusters.create(name=cluster_name, nic=network_id,
flavor=flavor, size=size,
volume_size=volume_size)
@base.atomic_action_timer("cue.clusters.get")
def _get_cluster(self, id, cueclient=None):
"""Retrieves a cluster record by cluster id.
:param id: int, cluster id
:return: cluster details
"""
cue_client = cueclient or self._get_cue_client()
return cue_client.clusters.get(cluster_id=id)
@base.atomic_action_timer("cue.clusters.delete")
def _delete_cluster(self, id, cueclient=None):
"""Submits request to Delete a cluster.
:param id: int, cluster id
:return: response code
"""
cue_client = cueclient or self._get_cue_client()
return cue_client.clusters.delete(cluster_id=id)
def _get_cue_client(self, auth_url=None, username=None, password=None,
tenant_name=None):
"""Retrieve an instance of Cue Client.
Will use sourced environment variables if explicit values are not
provided
:param auth_url: str, authentication url to keystone
:param username: str, OpenStack username
:param password: str, OpenStack password
:param tenant_name: str, OpenStack tenant name
:return:
"""
auth = ks_v2.Password(
auth_url=auth_url or os.environ['OS_AUTH_URL'],
username=username or os.environ['OS_USERNAME'],
password=password or os.environ['OS_PASSWORD'],
tenant_name=tenant_name or os.environ['OS_TENANT_NAME']
)
session = ks_session.Session(auth=auth)
return client.Client(session=session)
def _verify_cluster(self, ref_cluster, cmp_cluster):
"""Verifies basic values between two cluster dictionaries
:param ref_cluster: reference cluster
:param cmp_cluster: comparison cluster
:return:
"""
match = True
if ref_cluster['flavor'] != cmp_cluster['flavor']:
LOG.debug("Flavor do not match, ref: %s cmp: %s" %
(ref_cluster['flavor'], cmp_cluster['flavor']))
match = False
if ref_cluster['size'] != cmp_cluster['size']:
LOG.debug("Size do not match, ref: %s cmp: %s" %
(ref_cluster['size'], cmp_cluster['size']))
match = False
if ref_cluster['network_id'] != cmp_cluster['network_id'][0]:
LOG.debug("Network ID do not match, ref: %s cmp: %s" %
(ref_cluster['network_id'], cmp_cluster['network_id']))
match = False
if ref_cluster['volume_size'] != cmp_cluster['volume_size']:
LOG.debug("Volume size do not match, ref: %s cmp: %s" %
(ref_cluster['volume_size'], cmp_cluster['volume_size']))
match = False
return match
@base.atomic_action_timer("wait.for.delete")
def _wait_for_cluster_delete(self, cluster_id, timeout=300,
check_interval=1):
"""Waits for specified cluster has been deleted.
A cluster is deleted when the cluster get operation fails to retrieve
the cluster record.
:param cluster_id: int, cluster id.
"""
start_time = time.time()
while True:
try:
self._get_cluster(cluster_id)
except ks_exceptions.NotFound:
break
if time.time() - start_time > timeout:
raise exceptions.Exception("Delete cluster timed out")
time.sleep(check_interval)
@base.atomic_action_timer("wait.for.status.changes")
def _wait_for_status_change(self, cluster_id, final_status, timeout=300,
check_interval=1):
"""Waits for specified change in cluster status.
Will wait until cluster status changes to a specified status within
timeout period.
:param: cluster_id: uuid, cluster id
:param final_status: str, final cluster status
:param timeout: int, max time to check for status change
:param check_interval: int, interval to check status changes in
"""
start_time = time.time()
while True:
cluster = self._get_cluster(cluster_id)
current_status = cluster['status']
if current_status == final_status:
return cluster
time.sleep(check_interval)
if time.time() - start_time > timeout:
self._delete_cluster(cluster_id)
raise exceptions.Exception("Timeout while waiting for status "
"change to %s.", final_status)