Add support and bugfixes for dual-cloud scenario

1. Support to run on dual-cloud scenario;
2. Add retry mechanisiam when deleting floating IP;
3. Enhance the logic for consolidating data;
4. New v5 image created with wrk2 support;

Change-Id: I4f71f3fd3cf7f6e0cc01828039ad22df8f6b8502
This commit is contained in:
Yichen Wang 2015-05-01 16:53:48 -07:00
parent 21d2822a44
commit bb8f74d48f
6 changed files with 48 additions and 32 deletions

View File

@ -158,13 +158,11 @@ class SecGroup(object):
Add a retry mechanism
"""
LOG.info("Deleting secgroup %s" % self.secgroup)
for retry_count in range(1, 10):
for _ in range(10):
try:
self.novaclient.security_groups.delete(self.secgroup)
break
except Exception:
LOG.warn("Security group %s in use. Retry #%d" % (
self.secgroup_name, retry_count))
time.sleep(2)

View File

@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
# Global CIDR shared by all objects of this class
# Enables each network to get a unique CIDR
START_CIDR = "1.0.0.0/16"
START_CIDR = "10.0.0.0/16"
cidr = START_CIDR
def create_floating_ip(neutron_client, ext_net):
@ -145,7 +145,17 @@ class BaseNetwork(object):
for instance in self.instance_list:
instance.delete_server()
if instance.fip:
delete_floating_ip(self.neutron_client, instance.fip['floatingip']['id'])
"""
Delete the Floating IP
Sometimes this will fail if instance is just deleted
Add a retry mechanism
"""
for _ in range(10):
try:
delete_floating_ip(self.neutron_client, instance.fip['floatingip']['id'])
break
except Exception:
time.sleep(2)
# Delete all security groups
for secgroup_instance in self.secgroup_list:

View File

@ -1,7 +1,7 @@
# KloudBuster Default configuration file
server:
# Number of tenants to be created on the cloud
number_tenants: 2
number_tenants: 1
# Number of Users to be created inside the tenant
users_per_tenant: 1
@ -15,7 +15,7 @@ server:
networks_per_router: 1
# Number of VM instances to be created within the context of each Network
vms_per_network: 1
vms_per_network: 2
# Number of security groups per network
secgroups_per_network: 1
@ -24,7 +24,7 @@ server:
keypairs_per_network: 1
# Assign floating IP for every VM
use_floatingip: False
use_floatingip: True
# SSH configuration
ssh_vm_username: 'ubuntu'
@ -35,7 +35,7 @@ server:
keystone_admin_role: "admin"
cleanup_resources: True
public_key_file: '../ssh/id_rsa.pub'
image_name: 'Scale Image v4'
image_name: 'Scale Image v5'
flavor_type: 'm1.small'
client:
@ -63,10 +63,7 @@ client:
keypairs_per_network: 1
# Assign floating IP for every VM
use_floatingip: False
# Specify whether the testing cloud is running in same cloud
run_on_same_cloud: True
use_floatingip: True
# SSH configuration
ssh_vm_username: 'ubuntu'
@ -90,22 +87,24 @@ client:
# HTTP Tool Specific Configs
http_tool_configs:
# Threads to run tests
threads: 2
threads: 1
# Connections to be kept concurrently
connections: 5000
connections: 1000
# Timeout for HTTP requests
timeout: 5
# Connection Type: "Keep-alive", "New"
connection_type: 'Keep-alive'
# Interval between 2 HTTP requests in msec for each connection
interval_per_connection: 1000
# Duration of testing tools (seconds)
duration: 30
# Prompt before running benchmarking tools
prompt_before_run: True
prompt_before_run: False
# Configs that remain constant
keystone_admin_role: "admin"
cleanup_resources: True
public_key_file: '../ssh/id_rsa.pub'
image_name: 'Scale Image v4'
image_name: 'Scale Image v5'
flavor_type: 'm1.small'

View File

@ -36,11 +36,12 @@ class KBScheduler(object):
Control the testing VMs on the testing cloud
"""
def __init__(self, client_list, config):
def __init__(self, client_list, config, single_cloud=True):
self.client_dict = dict(zip([x.vm_name.lower() for x in client_list], client_list))
self.config = config
self.single_cloud = single_cloud
self.result = {}
self.tool_result = None
self.tool_result = {}
# Redis
self.connection_pool = None
@ -100,7 +101,6 @@ class KBScheduler(object):
if instance.up_flag:
continue
else:
self.send_cmd('ACK', None, None)
clist[vm_name].up_flag = True
clist.pop(vm_name)
cnt_succ = cnt_succ + 1
@ -109,6 +109,7 @@ class KBScheduler(object):
clist.pop(vm_name)
if self.result[vm_name]['status']:
# Command returned with non-zero status, command failed
LOG.error("[%s] %s", vm_name, self.result[vm_name]['stderr'])
cnt_failed = cnt_failed + 1
else:
# Command returned with zero, command succeed
@ -120,10 +121,11 @@ class KBScheduler(object):
return (cnt_succ, cnt_failed, len(clist))
def wait_for_vm_up(self, timeout=120):
def wait_for_vm_up(self, timeout=300):
cnt_succ = self.polling_vms(timeout)[0]
if cnt_succ != len(self.client_dict):
raise KBVMUpException()
self.send_cmd('ACK', None, None)
def setup_static_route(self, timeout=10):
func = {'cmd': 'setup_static_route'}
@ -132,7 +134,7 @@ class KBScheduler(object):
if cnt_succ != len(self.client_dict):
raise KBSetStaticRouteException()
def check_http_service(self, timeout=60):
def check_http_service(self, timeout=10):
func = {'cmd': 'check_http_service'}
self.send_cmd('EXEC', 'http', func)
cnt_succ = self.polling_vms(timeout)[0]
@ -143,10 +145,11 @@ class KBScheduler(object):
func = {'cmd': 'run_http_test'}
self.send_cmd('EXEC', 'http', func)
# Give additional 30 seconds for everybody to report results
timeout = self.config.http_tool_configs.duration + 3000
cnt_succ = self.polling_vms(timeout)[0]
if cnt_succ != len(self.client_dict):
raise KBHTTPBenchException()
timeout = self.config.http_tool_configs.duration + 30
cnt_pending = self.polling_vms(timeout)[2]
if cnt_pending != 0:
LOG.warn("Testing VMs are not returning results within grace period, "
"summary shown below may not be accurate!")
# Parse the results from HTTP Tools
for key, instance in self.client_dict.items():
@ -166,8 +169,9 @@ class KBScheduler(object):
LOG.info("Waiting for agents on VMs to come up...")
self.wait_for_vm_up()
LOG.info("Setting up static route to reach tested cloud...")
self.setup_static_route()
if self.single_cloud:
LOG.info("Setting up static route to reach tested cloud...")
self.setup_static_route()
LOG.info("Waiting for HTTP service to come up...")
self.check_http_service()

View File

@ -109,6 +109,8 @@ class Kloud(object):
def create_vm(self, instance):
LOG.info("Creating Instance: " + instance.vm_name)
instance.create_server(**instance.boot_info)
if not instance:
return
instance.fixed_ip = instance.instance.networks.values()[0][0]
if instance.config['use_floatingip']:
@ -120,7 +122,7 @@ class Kloud(object):
instance.ssh_ip = instance.fixed_ip
def create_vms(self):
tpool = ThreadPool(processes=10)
tpool = ThreadPool(processes=5)
tpool.map(self.create_vm, self.get_all_instances())
@ -215,7 +217,9 @@ class KloudBuster(object):
client_list = self.testing_kloud.get_all_instances()
server_list = self.kloud.get_all_instances()
kbscheduler = kb_scheduler.KBScheduler(client_list, config_scale.client)
kbscheduler = kb_scheduler.KBScheduler(client_list,
config_scale.client,
self.single_cloud)
kbscheduler.run()
self.final_result = kbscheduler.tool_result
self.final_result['total_server_vms'] = len(server_list)

View File

@ -40,7 +40,7 @@ class WrkTool(PerfTool):
def cmd_parser_run_client(self, status, stdout, stderr):
if status:
return [self.parse_error(stderr)]
return self.parse_error(stderr)
# Sample Output:
# Running 10s test @ http://192.168.1.1/index.html
@ -106,6 +106,7 @@ class WrkTool(PerfTool):
for key in ['http_rps', 'http_total_req', 'http_sock_err', 'http_rates_kbytes']:
all_res[key] = 0
for item in results:
all_res[key] += item['results'][key]
if (key in item['results']):
all_res[key] += item['results'][key]
all_res[key] = int(all_res[key])
return all_res