Simplify cfg and ssh + key pairs, switch to wrk2

Change-Id: I66f1c6e035bbf894ae4ce505eee1eb44020dec9d
This commit is contained in:
ahothan 2015-05-05 17:35:03 -07:00
parent c1a288c831
commit 325393a0a9
7 changed files with 156 additions and 104 deletions

View File

@ -90,7 +90,6 @@ class BaseNetwork(object):
self.network = None
self.instance_list = []
self.secgroup_list = []
self.keypair_list = []
def create_compute_resources(self, network_prefix, config_scale):
"""
@ -106,13 +105,6 @@ class BaseNetwork(object):
secgroup_name = network_prefix + "-SG" + str(secgroup_count)
secgroup_instance.create_secgroup_with_rules(secgroup_name)
# Create the keypair list
for keypair_count in range(config_scale['keypairs_per_network']):
keypair_instance = base_compute.KeyPair(self.nova_client)
self.keypair_list.append(keypair_instance)
keypair_name = network_prefix + "-K" + str(keypair_count)
keypair_instance.add_public_key(keypair_name, config_scale['public_key_file'])
LOG.info("Scheduled to create virtual machines...")
if config_scale['use_floatingip']:
external_network = find_external_network(self.neutron_client)
@ -132,7 +124,7 @@ class BaseNetwork(object):
# Create the VMs on specified network, first keypair, first secgroup
perf_instance.boot_info['image_name'] = config_scale['image_name']
perf_instance.boot_info['flavor_type'] = config_scale['flavor_type']
perf_instance.boot_info['keyname'] = self.keypair_list[0].keypair_name
perf_instance.boot_info['keyname'] = self.router.user.key_name
perf_instance.boot_info['nic'] = [{'net-id': self.network['id']}]
perf_instance.boot_info['sec_group'] = self.secgroup_list[0].secgroup
@ -161,10 +153,6 @@ class BaseNetwork(object):
for secgroup_instance in self.secgroup_list:
secgroup_instance.delete_secgroup()
# Delete all keypairs
for keypair_instance in self.keypair_list:
keypair_instance.remove_public_key()
def create_network_and_subnet(self, network_name):
"""

View File

@ -1,4 +1,34 @@
# KloudBuster Default configuration file
# Name of the image to use for all test VMs (client, server and proxy)
# The image name must exist in OpenStack and must be built with the appropriate
# packages
image_name: 'Scale Image v6a'
# Flavor to use for the test images - the flavor name must exist in OpenStack
flavor_type: 'm1.small'
# Config options common to client and server side
keystone_admin_role: "admin"
# Cleanup all kloudbuster resources upon exit
cleanup_resources: True
#
# ssh access to the test VMs launched by kloudbuster is not required
# but can be handy if the user wants to ssh manually to any of them (for example
# to debug)
# public key to use to access all test VMs
# if empty will default to the user's public key (~/.ssh/id_rsa.pub) if it
# exists, otherwise will not provision any public key.
# If configured or available, a key pair will be added for each
# configured user.
#
public_key_file:
# SERVER SIDE CONFIG OPTIONS
server:
# Number of tenants to be created on the cloud
number_tenants: 1
@ -15,62 +45,21 @@ server:
networks_per_router: 1
# Number of VM instances to be created within the context of each Network
vms_per_network: 2
vms_per_network: 1
# Number of security groups per network
secgroups_per_network: 1
# Number of keypairs per network
keypairs_per_network: 1
# Assign floating IP for every VM
use_floatingip: True
# SSH configuration
ssh_vm_username: 'ubuntu'
ssh_retry_count: 50
private_key_file: './ssh/id_rsa'
# Configs that remain constant
keystone_admin_role: "admin"
cleanup_resources: True
public_key_file: '../ssh/id_rsa.pub'
image_name: 'Scale Image v5'
flavor_type: 'm1.small'
use_floatingip: False
# CLIENT SIDE CONFIG OPTIONS
client:
# Number of tenants to be created on the cloud
number_tenants: 1
# Number of Users to be created inside the tenant
users_per_tenant: 1
# Number of routers to be created within the context of each User
# For now support only 1 router per user
routers_per_user: 1
# Number of networks to be created within the context of each Router
# Assumes 1 subnet per network
networks_per_router: 1
# Number of VM instances to be created within the context of each Network
vms_per_network: 2
# Number of security groups per network
secgroups_per_network: 1
# Number of keypairs per network
keypairs_per_network: 1
# Assign floating IP for every VM
use_floatingip: True
use_floatingip: False
# SSH configuration
ssh_vm_username: 'ubuntu'
ssh_retry_count: 50
private_key_file: './ssh/id_rsa'
# Redis server configuration
# (TEMP) Redis server configuration
redis_server: '172.29.172.180'
redis_server_port: 6379
redis_retry_count: 50
@ -82,29 +71,26 @@ client:
dest_path: '/var/tmp/nuttcp-7.3.2'
http_tool:
name: 'wrk'
dest_path: '/var/tmp/wrk-4.0.1'
dest_path: '/var/tmp/wrk2-3.1.1'
# HTTP Tool Specific Configs
http_tool_configs:
# Threads to run tests
threads: 1
# Connections to be kept concurrently
# Connections to be kept concurrently per VM
connections: 1000
# Timeout for HTTP requests
timeout: 5
# Connection Type: "Keep-alive", "New"
connection_type: 'Keep-alive'
# Interval between 2 HTTP requests in msec for each connection
interval_per_connection: 1000
# Requested rps per VM (for all connections)
rate_limit: 1000
# Duration of testing tools (seconds)
duration: 30
# Prompt before running benchmarking tools
prompt_before_run: False
# Configs that remain constant
keystone_admin_role: "admin"
cleanup_resources: True
public_key_file: '../ssh/id_rsa.pub'
image_name: 'Scale Image v5'
flavor_type: 'm1.small'

View File

@ -108,7 +108,8 @@ class KBScheduler(object):
payload = eval(msg['data'])
vm_name = payload['sender-id']
instance = self.client_dict[vm_name]
if payload['cmd'] == 'READY':
cmd = payload['cmd']
if cmd == 'READY':
# If a READY packet is received, the corresponding VM is up
# running. We mark the flag for that VM, and skip all READY
# messages received afterwards.
@ -118,7 +119,7 @@ class KBScheduler(object):
clist[vm_name].up_flag = True
clist.pop(vm_name)
cnt_succ = cnt_succ + 1
elif payload['cmd'] == 'DONE':
elif cmd == 'DONE':
self.result[vm_name] = payload['data']
clist.pop(vm_name)
if self.result[vm_name]['status']:
@ -128,6 +129,10 @@ class KBScheduler(object):
else:
# Command returned with zero, command succeed
cnt_succ = cnt_succ + 1
elif cmd == 'DEBUG':
LOG.info('[%s] %s' + (vm_name, payload['data']))
else:
LOG.error('[%s] received invalid command: %s' + (vm_name, cmd))
LOG.info("%d Succeed, %d Failed, %d Pending... Retry #%d" %
(cnt_succ, cnt_failed, len(clist), retry))
@ -157,6 +162,7 @@ class KBScheduler(object):
def run_http_test(self):
func = {'cmd': 'run_http_test'}
LOG.info(func)
self.send_cmd('EXEC', 'http', func)
# Give additional 30 seconds for everybody to report results
timeout = self.config.http_tool_configs.duration + 30

View File

@ -97,6 +97,7 @@ class KB_VM_Agent(object):
self.vm_name = socket.gethostname().lower()
self.orches_chan_name = "kloudbuster_orches"
self.report_chan_name = "kloudbuster_report"
self.last_cmd = None
def setup_channels(self):
# Check for connections to redis server
@ -139,8 +140,16 @@ class KB_VM_Agent(object):
# Unfortunately, there is no thread.stop() in Python 2.x
self.stop_hello.set()
elif msg['cmd'] == 'EXEC':
cmd_res_tuple = eval('self.exec_' + msg['data']['cmd'] + '()')
cmd_res_dict = dict(zip(("status", "stdout", "stderr"), cmd_res_tuple))
self.last_cmd = ""
try:
cmd_res_tuple = eval('self.exec_' + msg['data']['cmd'] + '()')
cmd_res_dict = dict(zip(("status", "stdout", "stderr"), cmd_res_tuple))
except Exception as exc:
cmd_res_dict = {
"status": 1,
"stdout": self.last_cmd,
"stderr": str(exc)
}
self.report('DONE', msg['client-type'], cmd_res_dict)
elif msg['cmd'] == 'ABORT':
# TODO(Add support to abort a session)
@ -155,24 +164,26 @@ class KB_VM_Agent(object):
self.process_cmd(msg)
def exec_setup_static_route(self):
cmd = KB_Instance.get_static_route(self.user_data['target_subnet_ip'])
result = self.exec_command(cmd)
self.last_cmd = KB_Instance.get_static_route(self.user_data['target_subnet_ip'])
result = self.exec_command(self.last_cmd)
if (self.user_data['target_subnet_ip'] not in result[1]):
cmd = KB_Instance.add_static_route(self.user_data['target_subnet_ip'],
self.user_data['target_shared_interface_ip'])
return self.exec_command(cmd)
self.last_cmd = \
KB_Instance.add_static_route(self.user_data['target_subnet_ip'],
self.user_data['target_shared_interface_ip'])
return self.exec_command(self.last_cmd)
else:
return (0, '', '')
def exec_check_http_service(self):
cmd = KB_Instance.check_http_service(self.user_data['target_url'])
return self.exec_command(cmd)
self.last_cmd = KB_Instance.check_http_service(self.user_data['target_url'])
return self.exec_command(self.last_cmd)
def exec_run_http_test(self):
cmd = KB_Instance.run_http_test(dest_path=self.user_data['http_tool']['dest_path'],
target_url=self.user_data['target_url'],
**self.user_data['http_tool_configs'])
return self.exec_command(cmd)
self.last_cmd = \
KB_Instance.run_http_test(dest_path=self.user_data['http_tool']['dest_path'],
target_url=self.user_data['target_url'],
**self.user_data['http_tool_configs'])
return self.exec_command(self.last_cmd)
if __name__ == "__main__":

View File

@ -135,19 +135,21 @@ class KloudBuster(object):
4. Networks per router
5. Instances per network
"""
def __init__(self, cred, testing_cred):
def __init__(self, cred, testing_cred, server_cfg, client_cfg):
# List of tenant objects to keep track of all tenants
self.tenant_list = []
self.tenant = None
self.tenant_list_testing = []
self.tenant_testing = None
self.server_cfg = server_cfg
self.client_cfg = client_cfg
# TODO(check on same auth_url instead)
if cred == testing_cred:
self.single_cloud = True
else:
self.single_cloud = False
self.kloud = Kloud(config_scale.server, cred)
self.testing_kloud = Kloud(config_scale.client, testing_cred, testing_side=True)
self.kloud = Kloud(server_cfg, cred)
self.testing_kloud = Kloud(client_cfg, testing_cred, testing_side=True)
self.final_result = None
def print_provision_info(self):
@ -218,14 +220,14 @@ class KloudBuster(object):
client_list = self.testing_kloud.get_all_instances()
server_list = self.kloud.get_all_instances()
kbscheduler = kb_scheduler.KBScheduler(client_list,
config_scale.client,
self.client_cfg,
self.single_cloud)
kbscheduler.run()
self.final_result = kbscheduler.tool_result
self.final_result['total_server_vms'] = len(server_list)
self.final_result['total_client_vms'] = len(client_list)
self.final_result['total_connetcions'] =\
len(client_list) * config_scale.client.http_tool_configs.connections
self.final_result['total_connections'] =\
len(client_list) * self.client_cfg.http_tool_configs.connections
LOG.info(self.final_result)
except KeyboardInterrupt:
traceback.format_exc()
@ -234,12 +236,12 @@ class KloudBuster(object):
# Cleanup: start with tested side first
# then testing side last (order is important because of the shared network)
if config_scale.server['cleanup_resources']:
if self.server_cfg['cleanup_resources']:
try:
self.kloud.delete_resources()
except Exception:
traceback.print_exc()
if config_scale.client['cleanup_resources']:
if self.client_cfg['cleanup_resources']:
try:
self.testing_kloud.delete_resources()
except Exception:
@ -247,6 +249,33 @@ class KloudBuster(object):
if kbscheduler:
kbscheduler.dispose()
def get_total_vm_count(config):
return (config['number_tenants'] * config['users_per_tenant'] *
config['routers_per_user'] * config['networks_per_router'] *
config['vms_per_network'])
# Some hardcoded client side options we do not want users to change
hardcoded_client_cfg = {
# Number of tenants to be created on the cloud
'number_tenants': 1,
# Number of Users to be created inside the tenant
'users_per_tenant': 1,
# Number of routers to be created within the context of each User
# For now support only 1 router per user
'routers_per_user': 1,
# Number of networks to be created within the context of each Router
# Assumes 1 subnet per network
'networks_per_router': 1,
# Number of VM instances to be created within the context of each Network
'vms_per_network': 1,
# Number of security groups per network
'secgroups_per_network': 1
}
if __name__ == '__main__':
# The default configuration file for KloudBuster
@ -288,6 +317,33 @@ if __name__ == '__main__':
alt_config = configure.Configuration.from_file(CONF.config).configure()
config_scale = config_scale.merge(alt_config)
# Initialize the key pair name
if config_scale['public_key_file']:
# verify the public key file exists
if not os.path.exists(config_scale['public_key_file']):
LOG.error('Error: Invalid public key file: ' + config_scale['public_key_file'])
sys.exit(1)
else:
# pick the user's public key if there is one
pub_key = os.path.expanduser('~/.ssh/id_rsa.pub')
if os.path.isfile(pub_key):
config_scale['public_key_file'] = pub_key
LOG.info('Using %s as public key for all test VMs' % (pub_key))
# A bit of config dict surgery, extract out the client and server side
# and transplant the remaining (common part) into the client and server dict
server_side_cfg = config_scale.pop('server')
client_side_cfg = config_scale.pop('client')
server_side_cfg.update(config_scale)
client_side_cfg.update(config_scale)
# Hardcode a few client side options
client_side_cfg.update(hardcoded_client_cfg)
# Adjust the VMs per network on the client side to match the total
# VMs on the server side (1:1)
client_side_cfg['vms_per_network'] = get_total_vm_count(server_side_cfg)
# Retrieve the credentials
cred = credentials.Credentials(CONF.tested_rc, CONF.passwd_tested, CONF.no_env)
if CONF.testing_rc and CONF.testing_rc != CONF.tested_rc:
@ -303,7 +359,7 @@ if __name__ == '__main__':
# The KloudBuster class is just a wrapper class
# levarages tenant and user class for resource creations and
# deletion
kloudbuster = KloudBuster(cred, cred_testing)
kloudbuster = KloudBuster(cred, cred_testing, server_side_cfg, client_side_cfg)
kloudbuster.run()
if CONF.json:

View File

@ -13,8 +13,6 @@
# under the License.
#
import sshutils
from base_compute import BaseCompute
import log as logging
from wrk_tool import WrkTool
@ -99,15 +97,6 @@ class PerfInstance(BaseCompute):
res['results'] = http_tool_res
return res
# Setup the ssh connectivity
# Returns True if success
def setup_ssh(self, host_access):
# used for displaying the source IP in json results
self.ssh_access = host_access
self.ssh = sshutils.SSH(self.ssh_access,
connect_retry_count=self.config.ssh_retry_count)
return True
# Send a command on the ssh session
def exec_command(self, cmd, timeout=30):
(status, cmd_output, err) = self.ssh.execute(cmd, timeout=timeout)

View File

@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import base_compute
import base_network
import keystoneclient.openstack.common.apiclient.exceptions as keystone_exception
import log as logging
@ -43,6 +44,9 @@ class User(object):
self.neutron_client = None
self.nova_client = None
self.admin_user = self._get_user()
# Each user is associated to 1 key pair at most
self.key_pair = None
self.key_name = None
# Create the user within the given tenant associate
# admin role with user. We need admin role for user
@ -98,6 +102,10 @@ class User(object):
def delete_resources(self):
LOG.info("Deleting all user resources for user %s" % self.user_name)
# Delete key pair
if self.key_pair:
self.key_pair.remove_public_key()
# Delete all user routers
for router in self.router_list:
router.delete_router()
@ -131,6 +139,13 @@ class User(object):
self.nova_client = Client(**creden_nova)
config_scale = self.tenant.kloud.scale_cfg
# Create the user's keypair if configured
if config_scale.public_key_file:
self.key_pair = base_compute.KeyPair(self.nova_client)
self.key_name = self.user_name + '-K'
self.key_pair.add_public_key(self.key_name, config_scale.public_key_file)
# Find the external network that routers need to attach to
# if redis_server is configured, we need to attach the router to the
# external network in order to reach the redis_server
@ -138,6 +153,7 @@ class User(object):
external_network = base_network.find_external_network(self.neutron_client)
else:
external_network = None
# Create the required number of routers and append them to router list
LOG.info("Creating routers and networks for user %s" % self.user_name)
for router_count in range(config_scale['routers_per_user']):