Initial commit for rally in docker

Not tested, needs further debugging.
patch-2: tested on local machine

Change-Id: I3c81c936e3a351a5e44d0118ec75c392f5026ea8
This commit is contained in:
Xicheng Chang 2015-04-27 21:35:26 -07:00
parent b9a569d506
commit e64450bf40
50 changed files with 1696 additions and 0 deletions

View File

@ -0,0 +1,4 @@
compass rally cookbook
===============
compass rally cookbook

View File

@ -0,0 +1,21 @@
default['compass']['hc'] = {
'user' => 'admin',
'password' => 'admin',
'url' => 'http://127.0.0.1:5000/v2.0',
'tenant' => 'admin'
}
case platform
when 'centos'
default['docker']['platform'] = {
'service_provider' => Chef::Provider::Service::Redhat,
'override_options' => ''
}
end
default['compass']['rally_image'] = 'compassindocker/rally'
# for test
default['mysql']['bind_address'] = '10.145.89.126'
default['mysql']['port'] = '3306'
default['openstack']['identity']['users']['admin']['password'] = 'admin'

View File

@ -0,0 +1,503 @@
from multiprocessing import Pool
import argparse
import logging
import multiprocessing
import os
import simplejson as json
import site
import subprocess
import sys
import re
logging.basicConfig(filename='/var/log/check_health.log',
level=logging.INFO,
format='%(asctime)s;%(levelname)s;%(lineno)s;%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# Activate virtual environment for Rally
logging.info("Start to activate Rally virtual environment......")
virtual_env = '/opt/rally'
activate_this = '/opt/rally/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir(virtual_env)
if virtual_env not in sys.path:
sys.path.append(virtual_env)
logging.info("Activated virtual environment.")
from oslo_config import cfg
from rally import db
from rally.common import version
import requests
CONF = cfg.CONF
PIDFILE = '/tmp/compass_health_check.pid'
REQUEST_HEADER = {'content-type': 'application/json'}
def round_float(number, d=2):
return ("%." + str(d) + "f") % number
def get_task_name(task_json_path):
return os.path.basename(task_json_path).split('.')[0]
def get_report_name(task_name):
return task_name.replace('_', '-')
def is_process_running():
if not os.path.isfile(PIDFILE):
return False
file = open(PIDFILE, 'r')
pid = file.readline()
file.close()
if os.path.exists('/proc/%s/cmd' % pid):
return True
else:
os.unlink(PIDFILE)
return False
def clean_pidfile():
if not is_process_running():
return
os.unlink(PIDFILE)
class HealthException(Exception):
def __init__(self, err_msg, url=None):
super(HealthException, self).__init__(err_msg)
self.url = url
def error_handler(func_name, err_msg, url):
logging.error("%s raise excption: %s" % (func_name, err_msg))
# Clean pidfile
clean_pidfile()
# Send error back to Compass
payload = {
"report": {},
"state": "error",
"error_message": err_msg
}
resp = requests.put(
url, data=json.dumps(payload), headers=REQUEST_HEADER
)
logging.info("[error_handler] status_code: %s" % resp.status_code)
def error_handler_decorator(func):
def func_wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except HealthException as exc:
func_name = func.__name__
err_msg = str(exc)
error_handler(func_name, err_msg, exc.url)
logging.error(exc)
return func_wrapper
def run_task(args, **kwargs):
return HealthCheck.start_task(*args, **kwargs)
class HealthCheck(object):
def __init__(self, compass_url, clustername):
self.url = compass_url
self.deployment_name = clustername
self.rally_secnarios_dir = '/opt/compass/rally/scenarios'
self.rally_deployment_dir = '/opt/compass/rally/deployment'
def print_dict(self, input_dict):
print json.dumps(input_dict, indent=4)
def init_rally_config(self):
CONF([], project='rally', version=version.version_string())
@error_handler_decorator
def exec_cli(self, command, max_reties=1):
max_reties = max_reties
output = None
err_msg = None
while(max_reties > 0):
proc = subprocess.Popen(
command, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output, err_msg = proc.communicate()
if proc.returncode == 0:
break
else:
logging.error('[exec_cli]: %s' % err_msg)
proc.terminate()
max_reties -= 1
return proc.returncode, output, err_msg
@error_handler_decorator
def create_deployment(self):
dpl_file_name = '.'.join((self.deployment_name, 'json'))
dpl_path = os.path.join(self.rally_deployment_dir, dpl_file_name)
logging.info('deployment config file path is %s' % dpl_path)
if not os.path.isfile(dpl_path):
err_msg = 'Cannot find deployment config file for rally.'
raise HealthException(err_msg, self.url)
deployments = db.deployment_list(name=self.deployment_name)
if deployments:
# Destroy the previous deployment
uuid = deployments[0].uuid
self.delete_deployment_and_tasks(uuid)
logging.info("Destroy previous deployment!")
# Create deployment
command = 'rally deployment create --filename=%s --name=%s' \
% (dpl_path, self.deployment_name)
logging.info(command)
returncode, output, err_msg = self.exec_cli(command)
if returncode > 0:
# Send error message to Compass. Rally failed.
raise HealthException(err_msg, self.url)
deployment = db.deployment_list(name=self.deployment_name)[0]
return deployment.uuid
@error_handler_decorator
def delete_deployment_and_tasks(self, deployment_uuid=None):
if not deployment_uuid:
deployments = db.deployment_list(name=self.deployment_name)
if not deployments:
return
deployment_uuid = deployments[0].uuid
self.cleanup_previous_tasks(deployment_uuid)
command = 'rally deployment destroy --deployment %s'\
% self.deployment_name
returncode, output, err_msg = self.exec_cli(command)
if returncode > 0:
raise HealthException(err_msg, self.url)
logging.info("Destroyed the deployment '%s'" % self.deployment_name)
def get_all_tasks_config(self):
tasks = []
for dirpath, dirs, files in os.walk(self.rally_secnarios_dir):
for file in files:
if file.endswith('.json'):
tasks.append(os.path.join(dirpath, file))
logging.info("Get all tasks config are %s" % tasks)
return tasks
def get_tasks_uuid_from_db(self, deployment_id):
tasks = db.task_list(deployment=deployment_id)
return [task.uuid for task in tasks]
@error_handler_decorator
def start_task(self, task_json_path):
task_name = get_task_name(task_json_path)
print "Start task [%s]...." % task_name
command = 'rally -v task start %s' % task_json_path
logging.info(command)
returncode, output, err = self.exec_cli(command)
logging.info("task [%s] output is %s" % (task_name, output))
print "Done task [%s]" % task_name
print "Start to collect report......"
self.collect_and_send_report(task_name, output)
print "Collecting report for task [%s] is done!" % task_name
def collect_and_send_report(self, task_name, task_output):
"""
{
"results": {
"actions": {
"$action": {
"duration": {
"summary": {
"min (sec)": xx,
"max (sec)": xx,
"avg (sec)": xx,
"success": xx,
"errors": xx,
"total": xx
},
"data": [xx,xx,xx]
}
}
},
'total_errors': x
},
"category": "xxx",
"raw_output": {...}
}
"""
report_name = get_report_name(task_name)
report_url = '/'.join((self.url, report_name))
match = re.search('\s?rally task results\s+([\da-f\-]+)\s?', task_output)
if not match:
raise HealthException('Unknown rally internel error!', report_url)
task_uuid = match.group(1)
task_obj = db.task_get(task_uuid)
if task_obj['status'] == 'failed':
raise HealthException(task_obj['verification_log'], report_url)
command = "rally task results %s" % task_uuid
logging.info("[collect_and_send_report] command is %s" % command)
print "Start to collect report for task [%s]" % task_name
return_code, task_result, err = self.exec_cli(command)
if return_code > 0:
raise HealthException(err, report_url)
output = json.loads(task_result)[0]
report = {'actions': {}}
actions = []
# Get the name of actions
actions = []
if output['result']:
actions = output['result'][0]['atomic_actions'].keys()
for result in output['result']:
if result['error']:
continue
actions = result['atomic_actions'].keys()
break
if not actions:
actions.append(report_name)
# Get and set report for each action
for action in actions:
report['actions'].setdefault(action, {'duration': {}})
report['actions'][action]['duration'] \
= self._get_action_dur_report(action, output)
# Get and set errors if any
errors = self._get_total_errors(output)
report['total_errors'] = errors
logging.info("task [%s] report is: %s" % (task_name, report))
final_report = {"results": report, "raw_output": output}
self.send_report(final_report, report_url)
def _get_total_errors(self, output):
results = output['result']
if not results:
return 1
total_errors = 0
for result in results:
if result['error']:
total_errors += 1
return total_errors
def _get_action_dur_report(self, action, output):
summary = {
'min (sec)': 0,
'avg (sec)': 0,
'max (sec)': 0,
'success': '0.0%',
'errors': {},
'total': 0
}
data = []
errors = {
'count': 0,
'details': []
}
min_dur = sys.maxint
max_dur = 0
total_dur = 0
no_action = 0
results = output['result']
for result in results:
atomic_actions = result['atomic_actions']
if atomic_actions and action not in atomic_actions:
no_action += 1
data.append(0)
continue
elif (atomic_actions and not atomic_actions[action]
or not atomic_actions and result['error']):
errors['count'] = errors['count'] + 1
errors['details'].append(result['error'])
data.append(0)
continue
duration = result['duration']
if action in atomic_actions:
duration = atomic_actions[action]
total_dur += duration
min_dur = [min_dur, duration][duration < min_dur]
max_dur = [max_dur, duration][duration > max_dur]
data.append(duration)
error_count = errors['count']
total_exec = output['key']['kw']['runner']['times']
if not results:
errors['count'] = total_exec
errors['details'] = ['Unknown error!']
summary['errors'] = errors
return {
'summary': summary,
'data': data
}
if total_exec == error_count:
# All actions in this scenario are failed.
summary['min (sec)'] = 0
summary['avg (sec)'] = 0
else:
summary['min (sec)'] = round_float(min_dur)
summary['avg (sec)'] = round_float(
total_dur / (total_exec - error_count - no_action)
)
summary['max (sec)'] = round_float(max_dur)
summary['errors'] = errors
summary['success'] = round_float(
float(
total_exec - error_count - no_action
) * 100 / float(len(results)),
1
) + '%'
summary['total'] = total_exec
return {
'summary': summary,
'data': data
}
def create_reports(self, tasks):
reports_list = []
for task in tasks:
temp = {}
temp['name'] = get_report_name(get_task_name(task))
temp['category'] = os.path.basename(os.path.dirname(task))
reports_list.append(temp)
logging.info("tasks are %s" % reports_list)
payload = {"report_list": reports_list}
resp = requests.post(
self.url, data=json.dumps(payload), headers=REQUEST_HEADER
)
logging.info("[create reports] response code is %s" % resp.status_code)
def send_report(self, report, report_url=None):
if not report_url:
logging.error("report_url is None!")
report_url = self.url
payload = {
"report": report,
"state": "success"
}
total_errors = report['results']['total_errors']
exec_num = report['raw_output']['key']['kw']['runner']['times']
if total_errors >= exec_num or total_errors == 0 and exec_num > 0:
payload['state'] = 'error'
payload['error_message'] = "Actions in this scenario are failed."
elif total_errors:
payload['state'] = 'finished'
resp = requests.put(
report_url, data=json.dumps(payload), headers=REQUEST_HEADER
)
logging.info("Update report reponse is %s" % resp.text)
def cleanup_previous_tasks(self, deployment_id):
tasks = self.get_tasks_uuid_from_db(deployment_id)
for task_id in tasks:
db.task_delete(task_id)
logging.info("Delete all tasks of deployment[ID: %s]" % deployment_id)
def run(self):
tasks = self.get_all_tasks_config()
self.create_reports(tasks)
self.init_rally_config()
self.create_deployment()
logging.info("Start to run tasks...")
process_num = 2
try:
cpu_num = multiprocessing.cpu_count()
process_num = [process_num, cpu_num][process_num < cpu_num]
except Exception:
logging.info("cpu_count() has not been implemented!")
logging.info("The number of processes will be %s." % process_num)
try:
pool = Pool(processes=process_num)
pool.map_async(run_task, zip([self]*len(tasks), tasks))
pool.close()
pool.join()
except Exception as ex:
logging.info("processing pool get exception: '%s'" % ex)
finally:
clean_pidfile()
def main(compass_url, deployment_name):
logging.info('compass_url is %s' % compass_url)
if is_process_running():
logging.info("[%s] already exisits, exit!" % PIDFILE)
sys.exit()
else:
pid = str(os.getpid())
file(PIDFILE, 'w').write(pid)
checker = HealthCheck(compass_url, deployment_name)
checker.run()
logging.info("Health check is finished!")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--url", type=str,
help="The URL to send reports back")
parser.add_argument("--clustername", type=str,
help="The Cluster name")
args = parser.parse_args()
compass_url = args.url
deployment_name = args.clustername
main(compass_url, deployment_name)

View File

@ -0,0 +1,17 @@
{
"Authenticate.keystone": [
{
"runner": {
"type": "constant",
"times": 3,
"concurrency": 2
},
"context": {
"users": {
"tenants": 3,
"users_per_tenant": 3
}
}
}
]
}

View File

@ -0,0 +1,20 @@
{
"Authenticate.validate_cinder": [
{
"args": {
"repetitions": 2
},
"runner": {
"type": "constant",
"times": 2,
"concurrency": 2
},
"context": {
"users": {
"tenants": 3,
"users_per_tenant": 2
}
}
}
]
}

View File

@ -0,0 +1,20 @@
{
"Authenticate.validate_glance": [
{
"args": {
"repetitions": 2
},
"runner": {
"type": "constant",
"times": 2,
"concurrency": 2
},
"context": {
"users": {
"tenants": 3,
"users_per_tenant": 2
}
}
}
]
}

View File

@ -0,0 +1,20 @@
{
"Authenticate.validate_neutron": [
{
"args": {
"repetitions": 2
},
"runner": {
"type": "constant",
"times": 2,
"concurrency": 2
},
"context": {
"users": {
"tenants": 3,
"users_per_tenant": 2
}
}
}
]
}

View File

@ -0,0 +1,20 @@
{
"Authenticate.validate_nova": [
{
"args": {
"repetitions": 2
},
"runner": {
"type": "constant",
"times": 2,
"concurrency": 2
},
"context": {
"users": {
"tenants": 3,
"users_per_tenant": 2
}
}
}
]
}

View File

@ -0,0 +1,26 @@
{
"CinderVolumes.create_and_attach_volume": [
{
"args": {
"volume_size": 1,
"image": {
"name": "^cirros.*"
},
"flavor": {
"name": "m1.tiny"
}
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 2
}
}
}
]
}

View File

@ -0,0 +1,23 @@
{
"CinderVolumes.create_and_delete_snapshot": [
{
"args": {
"force": false
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"volumes": {
"size": 1
}
}
}
]
}

View File

@ -0,0 +1,20 @@
{
"CinderVolumes.create_and_delete_volume": [
{
"args": {
"size": 1
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
}
}
}
]
}

View File

@ -0,0 +1,21 @@
{
"CinderVolumes.create_and_list_volume": [
{
"args": {
"size": 1,
"detailed": true
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 2
}
}
}
]
}

View File

@ -0,0 +1,59 @@
{
"CinderVolumes.create_snapshot_and_attach_volume": [
{
"args": {
"volume_type": false,
"min_volume_size": 1,
"max_volume_size": 5
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"servers": {
"image": {
"name": "^cirros.*"
},
"flavor": {
"name": "m1.tiny"
},
"servers_per_tenant": 1
}
}
},
{
"args": {
"volume_type": false,
"min_volume_size": 1,
"max_volume_size": 5
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"servers": {
"image": {
"name": "^cirros.*"
},
"flavor": {
"name": "m1.tiny"
},
"servers_per_tenant": 1
}
}
}
]
}

View File

@ -0,0 +1,22 @@
{
"GlanceImages.create_and_delete_image": [
{
"args": {
"image_location": "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img",
"container_format": "bare",
"disk_format": "qcow2"
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
}
}
}
]
}

View File

@ -0,0 +1,22 @@
{
"GlanceImages.create_and_list_image": [
{
"args": {
"image_location": "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img",
"container_format": "bare",
"disk_format": "qcow2"
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 2
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
}
}
}
]
}

View File

@ -0,0 +1,26 @@
{
"GlanceImages.create_image_and_boot_instances": [
{
"args": {
"image_location": "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img",
"container_format": "bare",
"disk_format": "qcow2",
"flavor": {
"name": "m1.tiny"
},
"number_instances": 1
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
}
}
}
]
}

View File

@ -0,0 +1,14 @@
{
"KeystoneBasic.create_delete_user": [
{
"args": {
"name_length": 10
},
"runner": {
"type": "constant",
"times": 2,
"concurrency": 5
}
}
]
}

View File

@ -0,0 +1,14 @@
{
"KeystoneBasic.create_and_list_tenants": [
{
"args": {
"name_length": 10
},
"runner": {
"type": "constant",
"times": 5,
"concurrency": 1
}
}
]
}

View File

@ -0,0 +1,14 @@
{
"KeystoneBasic.create_and_list_users": [
{
"args": {
"name_length": 10
},
"runner": {
"type": "constant",
"times": 5,
"concurrency": 5
}
}
]
}

View File

@ -0,0 +1,15 @@
{
"KeystoneBasic.create_tenant_with_users": [
{
"args": {
"name_length": 10,
"users_per_tenant": 5
},
"runner": {
"type": "constant",
"times": 5,
"concurrency": 5
}
}
]
}

View File

@ -0,0 +1,25 @@
{
"NeutronNetworks.create_and_delete_networks": [
{
"args": {
"network_create_args": {}
},
"runner": {
"type": "constant",
"times": 2,
"concurrency": 2
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"quotas": {
"neutron": {
"network": -1
}
}
}
}
]
}

View File

@ -0,0 +1,28 @@
{
"NeutronNetworks.create_and_delete_ports": [
{
"args": {
"network_create_args": {},
"port_create_args": {},
"ports_per_network": 5
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"quotas": {
"neutron": {
"network": -1,
"port": -1
}
}
}
}
]
}

View File

@ -0,0 +1,29 @@
{
"NeutronNetworks.create_and_delete_subnets": [
{
"args": {
"network_create_args": {},
"subnet_create_args": {},
"subnet_cidr_start": "1.1.0.0/30",
"subnets_per_network": 1
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"quotas": {
"neutron": {
"network": -1,
"subnet": -1
}
}
}
}
]
}

View File

@ -0,0 +1,25 @@
{
"NeutronNetworks.create_and_list_networks": [
{
"args": {
"network_create_args": {}
},
"runner": {
"type": "constant",
"times": 2,
"concurrency": 2
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"quotas": {
"neutron": {
"network": -1
}
}
}
}
]
}

View File

@ -0,0 +1,28 @@
{
"NeutronNetworks.create_and_list_ports": [
{
"args": {
"network_create_args": {},
"port_create_args": {},
"ports_per_network": 2
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 2
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"quotas": {
"neutron": {
"network": -1,
"port": -1
}
}
}
}
]
}

View File

@ -0,0 +1,31 @@
{
"NeutronNetworks.create_and_list_routers": [
{
"args": {
"network_create_args": {},
"subnet_create_args": {},
"subnet_cidr_start": "1.1.0.0/30",
"subnets_per_network": 2,
"router_create_args": {}
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 2
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"quotas": {
"neutron": {
"network": -1,
"subnet": -1,
"router": -1
}
}
}
}
]
}

View File

@ -0,0 +1,29 @@
{
"NeutronNetworks.create_and_list_subnets": [
{
"args": {
"network_create_args": {},
"subnet_create_args": {},
"subnet_cidr_start": "1.1.0.0/30",
"subnets_per_network": 2
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 2
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"quotas": {
"neutron": {
"network": -1,
"subnet": -1
}
}
}
}
]
}

View File

@ -0,0 +1,29 @@
{
"NeutronNetworks.create_and_update_networks": [
{
"args": {
"network_update_args": {
"admin_state_up": false,
"name": "_updated"
},
"network_create_args": {}
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 2
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"quotas": {
"neutron": {
"network": -1
}
}
}
}
]
}

View File

@ -0,0 +1,34 @@
{
"NeutronNetworks.create_and_update_ports": [
{
"args": {
"network_create_args": {},
"port_create_args": {},
"port_update_args": {
"admin_state_up": false,
"device_id": "dummy_id",
"device_owner": "dummy_owner",
"name": "_port_updated"
},
"ports_per_network": 1
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 2
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"quotas": {
"neutron": {
"network": -1,
"port": -1
}
}
}
}
]
}

View File

@ -0,0 +1,35 @@
{
"NeutronNetworks.create_and_update_routers": [
{
"args": {
"network_create_args": {},
"subnet_create_args": {},
"subnet_cidr_start": "1.1.0.0/30",
"subnets_per_network": 2,
"router_create_args": {},
"router_update_args": {
"admin_state_up": false,
"name": "_router_updated"
}
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 2
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"quotas": {
"neutron": {
"network": -1,
"subnet": -1,
"router": -1
}
}
}
}
]
}

View File

@ -0,0 +1,33 @@
{
"NeutronNetworks.create_and_update_subnets": [
{
"args": {
"subnet_update_args": {
"enable_dhcp": false,
"name": "_subnet_updated"
},
"network_create_args": {},
"subnet_create_args": {},
"subnet_cidr_start": "1.4.0.0/16",
"subnets_per_network": 2
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 2
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"quotas": {
"neutron": {
"network": -1,
"subnet": -1
}
}
}
}
]
}

View File

@ -0,0 +1,26 @@
{
"NovaServers.boot_and_delete_server": [
{
"args": {
"flavor": {
"name": "m1.tiny"
},
"image": {
"name": "^cirros.*"
},
"force_delete": false
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 2
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
}
}
}
]
}

View File

@ -0,0 +1,30 @@
{
"NovaSecGroup.boot_and_delete_server_with_secgroups": [
{
"args": {
"flavor": {
"name": "m1.tiny"
},
"image": {
"name": "^cirros.*"
},
"security_group_count": 2,
"rules_per_security_group": 2
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"network": {
"start_cidr": "100.1.0.0/26"
}
}
}
]
}

View File

@ -0,0 +1,32 @@
{
"NovaServers.boot_and_bounce_server": [
{
"args": {
"flavor": {
"name": "m1.tiny"
},
"image": {
"name": "^cirros.*"
},
"force_delete": false,
"actions": [
{"hard_reboot": 1},
{"soft_reboot": 1},
{"stop_start": 1},
{"rescue_unrescue": 1}
]
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
}
}
}
]
}

View File

@ -0,0 +1,27 @@
{
"NovaServers.boot_server_from_volume_and_delete": [
{
"args": {
"flavor": {
"name": "m1.tiny"
},
"image": {
"name": "^cirros.*"
},
"volume_size": 1,
"force_delete": false
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
}
}
}
]
}

View File

@ -0,0 +1,26 @@
{
"NovaServers.boot_server_from_volume": [
{
"args": {
"flavor": {
"name": "m1.tiny"
},
"image": {
"name": "^cirros.*"
},
"volume_size": 1
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
}
}
}
]
}

View File

@ -0,0 +1,26 @@
{
"NovaServers.snapshot_server": [
{
"args": {
"flavor": {
"name": "m1.tiny"
},
"image": {
"name": "^cirros.*"
},
"force_delete": false
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
}
}
}
]
}

View File

@ -0,0 +1,21 @@
{
"NovaSecGroup.create_and_delete_secgroups": [
{
"args": {
"security_group_count": 2,
"rules_per_security_group": 2
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 2
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 2
}
}
}
]
}

View File

@ -0,0 +1,21 @@
{
"NovaSecGroup.create_and_list_secgroups": [
{
"args": {
"security_group_count": 5,
"rules_per_security_group": 5
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 2
}
}
}
]
}

View File

@ -0,0 +1,31 @@
{
"NovaServers.resize_server": [
{
"args": {
"flavor": {
"name": "m1.tiny"
},
"image": {
"name": "^cirros.*"
},
"to_flavor": {
"name": "m1.small"
},
"confirm": true,
"force_delete": false
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
}
}
}
]
}

View File

@ -0,0 +1,20 @@
{
"Quotas.cinder_update_and_delete": [
{
"args": {
"max_quota": 1024
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
}
}
}
]
}

View File

@ -0,0 +1,20 @@
{
"Quotas.cinder_update": [
{
"args": {
"max_quota": 1024
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 2
}
}
}
]
}

View File

@ -0,0 +1,20 @@
{
"Quotas.nova_update_and_delete": [
{
"args": {
"max_quota": 1024
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 3,
"users_per_tenant": 2
}
}
}
]
}

View File

@ -0,0 +1,20 @@
{
"Quotas.nova_update": [
{
"args": {
"max_quota": 1024
},
"runner": {
"type": "constant",
"times": 1,
"concurrency": 1
},
"context": {
"users": {
"tenants": 3,
"users_per_tenant": 2
}
}
}
]
}

View File

@ -0,0 +1,10 @@
name 'compass-rally'
maintainer 'xicheng chang'
maintainer_email 'xicheng.chang@huawei.com'
license 'All rights reserved'
description 'Installs/Configures stackforge-rally'
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
version '0.1.0'
depends 'yum-epel'
# depends 'python'
# depends 'openstack-common', '~> 9.0'

View File

@ -0,0 +1,66 @@
# encoding: UTF-8
#
# Cookbook Name:: compass-really
# Recipe:: default
#
# Copyright 2013, Opscode, Inc.
# Copyright 2013, AT&T Services, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pull latest rally image
docker_image = node['compass']['rally_image']
execute "pull latest rally image" do
command "docker pull #{docker_image}"
end
remote_directory "/var/lib/rally-docker/scenarios" do
source "scenarios"
recursive true
mode "0755"
action :create_if_missing
end
cookbook_file "check_health.py" do
mode "0755"
path "/var/lib/rally-docker/check_health.py"
end
# load variables
rally_db = node['mysql']['bind_address'] + ":#{node['mysql']['port']}"
deployment_name = node.name.split('.')[-1]
endpoint = node['compass']['hc']['url']
admin = node['openstack']['identity']['admin_user'] || 'admin'
pass = node['openstack']['identity']['users'][admin]['password']
template "/var/lib/rally-docker/Dockerfile" do
source 'Dockerfile.erb'
variables(
RALLY_DB: rally_db)
action :create_if_missing
end
template "/var/lib/rally-docker/deployment.json" do
source 'deployment.json.erb'
variables(
user: admin,
password: pass,
url: endpoint,
tenant: 'admin')
action :create_if_missing
end
execute "build running image" do
command "docker build -t #{deployment_name} /var/lib/rally-docker"
end

View File

@ -0,0 +1,39 @@
#
# Cookbook Name:: compass-rally
# Recipe:: docker
#
# Copyright 2013, Troy Howard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
platform_options = node['docker']['platform']
major_version = node['platform_version'].split('.').first.to_i
if platform_family?('rhel') && major_version < 7
include_recipe 'yum-epel'
docker_packages = ['docker-io']
else
docker_packages = ['docker']
end
docker_packages.each do |pkg|
yum_package pkg do
action :install
end
end
service "docker" do
provider platform_options['service_provider']
supports :status => true, :restart => true, :reload => true
action [ :start ]
end

View File

@ -0,0 +1,9 @@
From compassindocker/rally
ADD scenarios /opt/compass/rally/scenarios
ADD check_health.py /opt/compass/rally/check_health.py
ADD deployment.json /opt/compass/rally/deployment.json
RUN sed 's|#connection=<None>|connection=mysql://rally:rally@'#{RALLY_DB}'/rally|' /etc/rally/rally.conf && \
rally-manage db recreate && \
chmod -R go+w /opt/rally/database && \
sleep 200

View File

@ -0,0 +1,9 @@
{
"type": "ExistingCloud",
"auth_url": "<%= @url %>",
"admin": {
"username": "<%= @user %>",
"password": "<%= @password %>",
"tenant_name": "<%= @tenant %>"
}
}

View File

@ -0,0 +1,16 @@
{
"name": "compass-rally",
"description": "Rally check",
"json_class": "Chef::Role",
"default_attributes": {
},
"override_attributes": {
},
"chef_type": "role",
"run_list": [
"recipe[compass-rally::docker]",
"recipe[compass-rally]"
],
"env_run_lists": {
}
}