Browse Source

Initial commit to openstack/kloudbuster

Change-Id: Id7e009e3a9ed61e86c45c8e4839208ecfa30bd77
changes/81/208681/1
Yichen Wang 7 years ago
parent
commit
90fbf3012b
  1. 4
      .coveragerc
  2. 8
      .dockerignore
  3. 5
      .gitignore
  4. 2
      .mailmap
  5. 17
      CONTRIBUTING.rst
  6. 23
      Dockerfile
  7. 2
      HACKING.rst
  8. 133
      README.rst
  9. 185
      cfg.default.yaml
  10. 10
      cfg.existing.yaml
  11. 463
      compute.py
  12. 110
      credentials.py
  13. 8
      doc/Makefile
  14. 409
      doc/source/_static/example.json
  15. 23
      doc/source/conf.py
  16. 113
      doc/source/contributing.rst
  17. BIN
      doc/source/images/flows.png
  18. BIN
      doc/source/images/genchart-sample.png
  19. 19
      doc/source/implementation.rst
  20. 21
      doc/source/index.rst
  21. 117
      doc/source/installation.rst
  22. 9
      doc/source/issue.rst
  23. 33
      doc/source/setup.rst
  24. 322
      doc/source/usage.rst
  25. 273
      genchart.py
  26. 320
      instance.py
  27. 206
      iperf_tool.py
  28. 2
      kloudbuster/__init__.py
  29. 0
      kloudbuster/base_compute.py
  30. 0
      kloudbuster/base_network.py
  31. 0
      kloudbuster/cfg.scale.yaml
  32. 0
      kloudbuster/cfg.tenants.yaml
  33. 0
      kloudbuster/cfg.topo.yaml
  34. 0
      kloudbuster/credentials.py
  35. 0
      kloudbuster/dib/README.rst
  36. 0
      kloudbuster/dib/Vagrantfile
  37. 0
      kloudbuster/dib/build-image.sh
  38. 0
      kloudbuster/dib/elements/kloudbuster/README.rst
  39. 0
      kloudbuster/dib/elements/kloudbuster/element-deps
  40. 0
      kloudbuster/dib/elements/kloudbuster/package-installs.yaml
  41. 0
      kloudbuster/dib/elements/kloudbuster/post-install.d/01-kb-script
  42. 0
      kloudbuster/dib/elements/kloudbuster/post-install.d/99-cloudcfg-edit
  43. 0
      kloudbuster/dib/elements/kloudbuster/static/etc/nginx/nginx.conf
  44. 0
      kloudbuster/dib/elements/kloudbuster/static/kb_test/kb_vm_agent.py
  45. 0
      kloudbuster/force_cleanup
  46. 0
      kloudbuster/kb_config.py
  47. 0
      kloudbuster/kb_gen_chart.py
  48. 0
      kloudbuster/kb_runner.py
  49. 0
      kloudbuster/kb_scheduler.py
  50. 0
      kloudbuster/kb_server/MANIFEST.in
  51. 0
      kloudbuster/kb_server/README
  52. 0
      kloudbuster/kb_server/config.py
  53. 0
      kloudbuster/kb_server/kb_server/__init__.py
  54. 0
      kloudbuster/kb_server/kb_server/app.py
  55. 0
      kloudbuster/kb_server/kb_server/controllers/__init__.py
  56. 0
      kloudbuster/kb_server/kb_server/controllers/config.py
  57. 0
      kloudbuster/kb_server/kb_server/controllers/root.py
  58. 0
      kloudbuster/kb_server/kb_server/model/__init__.py
  59. 0
      kloudbuster/kb_server/setup.cfg
  60. 0
      kloudbuster/kb_server/setup.py
  61. 0
      kloudbuster/kb_tpl.jinja
  62. 0
      kloudbuster/kb_vm_agent.py
  63. 6
      kloudbuster/kloudbuster.py
  64. 0
      kloudbuster/log.py
  65. 0
      kloudbuster/perf_instance.py
  66. 0
      kloudbuster/perf_tool.py
  67. 0
      kloudbuster/tenant.py
  68. 0
      kloudbuster/tests/__init__.py
  69. 0
      kloudbuster/tests/base.py
  70. 8
      kloudbuster/tests/test_kloudbuster.py
  71. 0
      kloudbuster/users.py
  72. 0
      kloudbuster/wrk_tool.py
  73. 443
      monitor.py
  74. 390
      network.py
  75. 203
      nuttcp_tool.py
  76. 2
      openstack-common.conf
  77. 110
      perf_instance.py
  78. 293
      perf_tool.py
  79. 142
      pns_mongo.py
  80. 328
      pnsdb_summary.py
  81. 17
      pylintrc
  82. 8
      requirements.txt
  83. 2
      run_tests.sh
  84. 8
      scale/README
  85. 668
      scale/sshutils.py
  86. 23
      setup.cfg
  87. 9
      setup.py
  88. 668
      sshutils.py
  89. 7
      test-requirements.txt
  90. BIN
      tools/iperf
  91. BIN
      tools/nuttcp-7.3.2
  92. 28
      tox.ini
  93. 897
      vmtp.py

4
.coveragerc

@ -1,7 +1,7 @@
[run]
branch = True
source = vmtp
omit = vmtp/tests/*,vmtp/openstack/*
source = kloudbuster
omit = kloudbuster/openstack/*
[report]
ignore-errors = True

8
.dockerignore

@ -1,8 +0,0 @@
ansible
installer
requirements-dev.txt
cloud_init*
.git
.gitignore
.gitreview
.pylintrc

5
.gitignore vendored

@ -54,11 +54,8 @@ ChangeLog
*cscope*
.ropeproject/
# vmtp
*.local*
*.json
# KloudBuster
*.json
*.html
*.qcow2
scale/dib/kloudbuster.d/

2
.mailmap

@ -2,4 +2,4 @@
# <preferred e-mail> <other e-mail 1>
# <preferred e-mail> <other e-mail 2>
vmtp-core@cisco.com
kloudbuster-core@lists.launchpad.net

17
CONTRIBUTING.rst

@ -0,0 +1,17 @@
If you would like to contribute to the development of OpenStack, you must
follow the steps in this page:
http://docs.openstack.org/infra/manual/developers.html
If you already have a good understanding of how the system works and your
OpenStack accounts are set up, you can skip to the development workflow
section of this documentation to learn how changes to OpenStack should be
submitted for review via the Gerrit tool:
http://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs should be filed on Launchpad, not GitHub:
https://bugs.launchpad.net/kloudbuster

23
Dockerfile

@ -1,23 +0,0 @@
# docker file for creating a container that has vmtp installed and ready to use
FROM ubuntu:14.04
MAINTAINER vmtp-core <vmtp-core@lists.launchpad.net>
# Install VMTP script and dependencies
RUN apt-get update && apt-get install -y \
lib32z1-dev \
libffi-dev \
libssl-dev \
libxml2-dev \
libxslt1-dev \
libyaml-dev \
openssh-client \
python \
python-dev \
python-lxml \
python-pip \
&& rm -rf /var/lib/apt/lists/*
COPY . /vmtp/
RUN pip install -r /vmtp/requirements.txt

2
HACKING.rst

@ -1,4 +1,4 @@
vmtp Style Commandments
kloudbuster Style Commandments
===============================================
Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/

133
README.rst

@ -1,126 +1,19 @@
========
Overview
========
===============================
kloudbuster
===============================
VMTP is a data path performance measurement tool for OpenStack clouds.
KloudBuster is a open source tool that allows anybody to load any Neutron OpenStack cloud at massive data plane scale swiftly and effortlessly.
Please feel here a long description which must be at least 3 lines wrapped on
80 cols, so that distribution package maintainers can use it in their packages.
Note that this is a hard requirement.
* Free software: Apache license
* Documentation: http://docs.openstack.org/developer/kloudbuster
* Source: http://git.openstack.org/cgit/openstack/kloudbuster
* Bugs: http://bugs.launchpad.net/kloudbuster
Features
--------
Have you ever had the need for a quick, simple and automatable way to get VM-level or host-level single-flow throughput and latency numbers from any OpenStack cloud, and take into account various Neutron topologies? Or check whether some OpenStack configuration option, Neutron plug-in performs to expectation or if there is any data path impact for upgrading to a different OpenStack release?
VMTP is a small python application that will automatically perform ping connectivity, round trip time measurement (latency) and TCP/UDP throughput measurement for the following East/West flows on any OpenStack deployment:
* VM to VM same network (private fixed IP, flow #1)
* VM to VM different network using fixed IP (same as intra-tenant L3 fixed IP, flow #2)
* VM to VM different network using floating IP and NAT (same as floating IP inter-tenant L3, flow #3)
Optionally, when an external Linux host is available for testing North/South flows:
* External host/VM download and upload throughput/latency (L3/floating IP, flow #4 and #5)
.. image:: images/flows.png
Optionally, when SSH login to any Linux host (native or virtual) is available:
* Host to host process-level throughput/latency (intra-node and inter-node)
Optionally, VMTP can extract automatically CPU usage from all native hosts in the cloud during the throughput tests, provided the Ganglia monitoring service (gmond) is installed and enabled on those hosts.
For VM-related flows, VMTP will automatically create the necessary OpenStack resources (router, networks, subnets, key pairs, security groups, test VMs) using the public OpenStack API, install the test tools then orchestrate them to gather the throughput measurements then cleanup all related resources before exiting.
See the usage page for the description of all the command line arguments supported by VMTP.
Pre-requisite
-------------
VMTP runs on any Python 2.X envirnment (validated on Linux and MacOSX).
For VM related performance measurements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* Access to the cloud Horizon Dashboard (to retrieve the openrc file)
* 1 working external network pre-configured on the cloud (VMTP will pick the first one found)
* At least 2 floating IP if an external router is configured or 3 floating IP if there is no external router configured
* 1 Linux image available in OpenStack (any distribution)
* A configuration file that is properly set for the cloud to test (see "Configuration File" section below)
For native/external host throughputs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* A public key must be installed on the target hosts (see ssh password-less access below)
For pre-existing native host throughputs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* Firewalls must be configured to allow TCP/UDP ports 5001 and TCP port 5002
For running VMTP Docker Image
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* Docker is installed. See `here <https://docs.docker.com/installation/#installation/>`_ for instructions.
Sample Results Output
---------------------
VMTP will display the results to stdout with the following data:
.. code::
- Session general information (date, auth_url, OpenStack encaps, VMTP version, OpenStack release, Agent type, CPU...)
- List of results per flow, for each flow:
| flow name
| to and from IP addresses
| to and from availability zones (if VM)
| - results:
| | -TCP
| | | packet size
| | | throughput value
| | | number of retransmissions
| | | round trip time in ms
| | | - CPU usage (if enabled), for each host in the openstack cluster
| | | | baseline (before test starts)
| | | | 1 or more readings during test
| | -UDP
| | | - for each packet size
| | | | throughput value
| | | | loss rate
| | | | CPU usage (if enabled)
| | - ICMP
| | | average, min, max and stddev round trip time in ms
Detailed results can also be stored in a file in JSON format using the *--json* command line argument and/or stored directly into a MongoDB server. See :download:`here <_static/example.json>` for an example JSON file that is generated by VMTP.
The packaged python tool genchart.py can be used to generate from the JSON result files column charts in HTML format visible from any browser.
Example of column chart generated by genchart.py:
.. image:: images/genchart-sample.png
Limitations and Caveats
-----------------------
VMTP only measures performance for single-flows at the socket/TCP/UDP level (in a VM or natively). Measured numbers therefore reflect what most applications will see.
It is not designed to measure driver level data path performance from inside a VM (such as bypassing the kernel TCP stack and write directly to virtio), there are better tools that can address this type of mesurement.
Licensing
---------
VMTP is licensed under Apache License 2.0 and comes packaged with the following tools for convenience:
* iperf: BSD License (https://iperf.fr/license.html, source code: https://iperf.fr)
* nuttcp: GPL v2 License (http://nuttcp.net/nuttcp/beta/LICENSE, source code: http://nuttcp.net/nuttcp/beta/nuttcp-7.3.2.c)
Redistribution of nuttcp and iperf is governed by their respective licenses. Please make sure you read and understand each one before further redistributing VMTP downstream.
Links
-----
* Documentation: http://vmtp.readthedocs.org/en/latest
* Source: http://git.openstack.org/cgit/stackforge/vmtp
* Supports/Bugs: https://launchpad.net/vmtp
* Mailing List: vmtp-core@lists.launchpad.net
* TODO

185
cfg.default.yaml

@ -1,185 +0,0 @@
#
# VMTP default configuration file
#
# This configuration file is ALWAYS loaded by VMTP and should never be modified by users.
# To specify your own property values, always define them in a separate config file
# and pass that file to the script using -c or --config <file>
# Property values in that config file will override the default values in the current file
#
---
# Name of the image to use for launching the test VMs. This name must be
# the exact same name used in OpenStack (as shown from 'nova image-list')
# Any image running Linux should work (Fedora, Ubuntu, CentOS...)
image_name: 'Ubuntu Server 14.04'
#image_name: 'Fedora 21'
# User name to use to ssh to the test VMs
# This is specific to the image being used
ssh_vm_username: 'ubuntu'
#ssh_vm_username: fedora
# Name of the flavor to use for the test VMs
# This name must be an exact match to a flavor name known by the target
# OpenStack deployment (as shown from 'nova flavor-list')
flavor_type: 'm1.small'
# Name of the availability zone to use for the test VMs
# Must be one of the zones listed by 'nova availability-zone-list'
# If the zone selected contains more than 1 compute node, the script
# will determine inter-node and intra-node throughput. If it contains only
# 1 compute node, only intra-node troughput will be measured.
# If empty (default), VMTP will automatically pick the first 2 hosts
# that are compute nodes regardless of the availability zone
#availability_zone: 'nova'
availability_zone:
# DNS server IP addresses to use for the VM (list of 1 or more DNS servers)
# This default DNS server is available on the Internet,
# Change this to use a different DNS server if necessary,
dns_nameservers: [ '8.8.8.8' ]
# VMTP can automatically download a VM image if the image named by
# image_name is missing, for that you need to specify a URL where
# the image can be retrieved
#
# A link to a Ubuntu Server 14.04 qcow2 image can be used here:
# https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
vm_image_url: ''
# -----------------------------------------------------------------------------
# These variables are not likely to be changed
# Set this variable to a network name if you want the script to reuse
# a specific existing external network. If empty, the script will reuse the
# first external network it can find (the cloud must have at least 1
# external network defined and available for use)
# When set, ignore floating ip creation and reuse existing management network for tests
reuse_network_name :
# Use of the script for special deployments
floating_ip: True
# Set this to an existing VM name if the script should not create new VM
# and reuse existing VM
reuse_existing_vm :
# Set config drive to true to bypass metadata service and use config drive
# An option of config_drive to True is provided to nova boot to enable this
config_drive:
# ipv6 mode. Set this to one of the following 3 modes
# slaac : VM obtains IPV6 address from Openstack radvd using SLAAC
# dhcpv6-stateful : VM obtains ipv6 address from dnsmasq using DHCPv6 stateful
# dhcpv6-stateless : VM obtains ipv6 address from Openstack radvd using SLAAC and options from dnsmasq
# If left blank use ipv4
ipv6_mode:
# Default name for the router to use to connect the internal mgmt network
# with the external network. If a router exists with this name it will be
# reused, otherwise a new router will be created
router_name: 'pns-router'
# Defaul names for the internal networks used by the
# script. If an existing network with this name exists it will be reused.
# Otherwise a new internal network will be created with that name.
# 2 networks are needed to test the case of network to network communication
internal_network_name: ['pns-internal-net', 'pns-internal-net2']
# Name of the subnets associated to the internal mgmt network
internal_subnet_name: ['pns-internal-subnet', 'pns-internal-subnet2']
# Name of the subnets for ipv6
internal_subnet_name_ipv6: ['pns-internal-v6-subnet','pns-internal-v6-subnet2']
# Default CIDRs to use for the internal mgmt subnet
internal_cidr: ['192.168.1.0/24' , '192.168.2.0/24']
# Default CIDRs to use for data network for ipv6
internal_cidr_v6: ['2001:45::/64','2001:46::/64']
# The public and private keys to use to ssh to all targets (VMs, containers, hosts)
# By default the SSH library will try several methods to authenticate:
# - password if provided on the command line
# - user's own key pair (under the home directory $HOME) if already setup
# - the below key pair if not empty
# If you want to use a specific key pair, specify the key pair files here.
# This can be a pathname that is absolute or relative to the current directory
public_key_file:
private_key_file:
# Name of the P&S public key in OpenStack to create for all test VMs
public_key_name: 'pns_public_key'
# name of the server VM
vm_name_server: 'TestServer'
# name of the client VM
vm_name_client: 'TestClient'
# name of the security group to create and use
security_group_name: 'pns-security'
# Location to the performance test tools.
# If relative, is relative to the vmtp directory
perf_tool_path: './tools'
# ping variables
ping_count: 2
ping_pass_threshold: 80
# Max retry count for ssh to a VM (5 seconds between retries)
ssh_retry_count: 50
# General retry count
generic_retry_count: 50
# Times to run when measuring TCP Throughput
tcp_tp_loop_count: 3
# TCP throughput list of packet sizes to measure
# Can be overridden at the command line using --tcpbuf
tcp_pkt_sizes: [65536]
# UDP throughput list of packet sizes to measure
# By default we measure for small, medium and large packets
# Can be overridden at the command line using --udpbuf
udp_pkt_sizes: [128, 1024, 8192]
# UDP packet loss rate threshold in percentage beyond which bandwidth
# iterations stop and below which iteration with a higher
# bandwidth continues
# The first number is the minimal loss rate (inclusive)
# The second number is the maximum loss rate (inclusive)
# Iteration to find the "optimal" bandwidth will stop as soon as the loss rate
# falls within that range: min <= loss_rate <= max
# The final throughput measurement may return a loss rate out of this range
# as that measurement is taken on a longer time than when iterating to find
# the optimal throughput
#
udp_loss_rate_range: [2, 5]
# The default bandwidth limit (in Kbps) for TCP/UDP flow measurement
# 0 means unlimited, which can be overridden at the command line using --bandwidth
vm_bandwidth: 0
#######################################
# VMTP MongoDB Connection information
#######################################
########################################
# Default MongoDB port is 27017, to override
#vmtp_mongod_port: <port no>
########################################
# MongoDB pns database.
# use "official_db" for offical runs only.
########################################
vmtp_db: "client_db"
########################################
# MongoDB collection name.
########################################
vmtp_collection: "pns_web_entry"

10
cfg.existing.yaml

@ -1,10 +0,0 @@
#
# Example of configuration where we froce the use of a specific external network and
# use provider network (no floating IP)
reuse_network_name : 'prov1'
# Floating ip false is a provider network where we simply attach to it
floating_ip : False
# Floating ip is true by default:
# attach to existing network, create a floating ip and attach instance to it

463
compute.py

@ -1,463 +0,0 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''Module for Openstack compute operations'''
import os
import subprocess
import time
import novaclient
import novaclient.exceptions as exceptions
class Compute(object):
def __init__(self, nova_client, config):
self.novaclient = nova_client
self.config = config
def find_image(self, image_name):
try:
image = self.novaclient.images.find(name=image_name)
return image
except novaclient.exceptions.NotFound:
return None
def upload_image_via_url(self, glance_client, final_image_name, image_url, retry_count=60):
'''
Directly uploads image to Nova via URL if image is not present
'''
# Here is the deal:
# Idealy, we should better to use the python library glanceclient to perform the
# image uploades. However, due to a limitation of the v2.0 API right now, it is
# impossible to tell Glance to download the image from a URL directly.
#
# There are two steps to create the image:
# (1) Store the binary image data into Glance;
# (2) Store the metadata about the image into Glance;
# PS: The order does not matter.
#
# The REST API allows to do two steps in one if a Location header is provided with
# the POST request. (REF: http://developer.openstack.org/api-ref-image-v2.html)
#
# However the python API doesn't support a customized header in POST request.
# So we have to do two steps in two calls.
#
# The good thing is: the API does support (2) perfectly, but for (1) it is only
# accepting the data from local, not remote URL. So... Ur... Let's keep the CLI
# version as the workaround for now.
# # upload in glance
# image = glance_client.images.create(
# name=str(final_image_name), disk_format="qcow2", container_format="bare",
# Location=image_url)
# glance_client.images.add_location(image.id, image_url, image)
# sys.exit(0)
# for retry_attempt in range(retry_count):
# if image.status == "active":
# print 'Image: %s successfully uploaded to Nova' % (final_image_name)
# return 1
# # Sleep between retries
# if self.config.debug:
# print "Image is not yet active, retrying %s of %s... [%s]" \
# % ((retry_attempt + 1), retry_count, image.status)
# time.sleep(5)
# upload in glance
glance_cmd = "glance image-create --name=\"" + str(final_image_name) + \
"\" --disk-format=qcow2" + " --container-format=bare " + \
" --is-public True --copy-from " + image_url
if self.config.debug:
print "Will update image to glance via CLI: %s" % (glance_cmd)
subprocess.check_output(glance_cmd, shell=True)
# check for the image in glance
glance_check_cmd = "glance image-list --name \"" + str(final_image_name) + "\""
for retry_attempt in range(retry_count):
result = subprocess.check_output(glance_check_cmd, shell=True)
if "active" in result:
print 'Image: %s successfully uploaded to Nova' % (final_image_name)
return 1
# Sleep between retries
if self.config.debug:
print "Image not yet active, retrying %s of %s..." \
% ((retry_attempt + 1), retry_count)
time.sleep(2)
print 'ERROR: Cannot upload image %s from URL: %s' % (final_image_name, image_url)
return 0
# Remove keypair name from openstack if exists
def remove_public_key(self, name):
keypair_list = self.novaclient.keypairs.list()
for key in keypair_list:
if key.name == name:
self.novaclient.keypairs.delete(name)
print 'Removed public key %s' % (name)
break
# Test if keypair file is present if not create it
def create_keypair(self, name, private_key_pair_file):
self.remove_public_key(name)
keypair = self.novaclient.keypairs.create(name)
# Now write the keypair to the file if requested
if private_key_pair_file:
kpf = os.open(private_key_pair_file,
os.O_WRONLY | os.O_CREAT, 0o600)
with os.fdopen(kpf, 'w') as kpf:
kpf.write(keypair.private_key)
return keypair
# Add an existing public key to openstack
def add_public_key(self, name, public_key_file):
self.remove_public_key(name)
# extract the public key from the file
public_key = None
try:
with open(os.path.expanduser(public_key_file)) as pkf:
public_key = pkf.read()
except IOError as exc:
print 'ERROR: Cannot open public key file %s: %s' % \
(public_key_file, exc)
return None
keypair = self.novaclient.keypairs.create(name, public_key)
return keypair
def init_key_pair(self, kp_name, ssh_access):
'''Initialize the key pair for all test VMs
if a key pair is specified in access, use that key pair else
create a temporary key pair
'''
if ssh_access.public_key_file:
return self.add_public_key(kp_name, ssh_access.public_key_file)
else:
keypair = self.create_keypair(kp_name, None)
ssh_access.private_key = keypair.private_key
return keypair
def find_network(self, label):
net = self.novaclient.networks.find(label=label)
return net
# Create a server instance with name vmname
# and check that it gets into the ACTIVE state
def create_server(self, vmname, image, flavor, key_name,
nic, sec_group, avail_zone=None, user_data=None,
config_drive=None,
retry_count=10):
if sec_group:
security_groups = [sec_group.id]
else:
security_groups = None
# Also attach the created security group for the test
instance = self.novaclient.servers.create(name=vmname,
image=image,
flavor=flavor,
key_name=key_name,
nics=nic,
availability_zone=avail_zone,
userdata=user_data,
config_drive=config_drive,
security_groups=security_groups)
if not instance:
return None
# Verify that the instance gets into the ACTIVE state
for retry_attempt in range(retry_count):
instance = self.novaclient.servers.get(instance.id)
if instance.status == 'ACTIVE':
return instance
if instance.status == 'ERROR':
print 'Instance creation error:' + instance.fault['message']
break
if self.config.debug:
print "[%s] VM status=%s, retrying %s of %s..." \
% (vmname, instance.status, (retry_attempt + 1), retry_count)
time.sleep(2)
# instance not in ACTIVE state
print('Instance failed status=' + instance.status)
self.delete_server(instance)
return None
def get_server_list(self):
servers_list = self.novaclient.servers.list()
return servers_list
def find_floating_ips(self):
floating_ip = self.novaclient.floating_ips.list()
return floating_ip
# Return the server network for a server
def find_server_network(self, vmname):
servers_list = self.get_server_list()
for server in servers_list:
if server.name == vmname and server.status == "ACTIVE":
return server.networks
return None
# Returns True if server is present false if not.
# Retry for a few seconds since after VM creation sometimes
# it takes a while to show up
def find_server(self, vmname, retry_count):
for retry_attempt in range(retry_count):
servers_list = self.get_server_list()
for server in servers_list:
if server.name == vmname and server.status == "ACTIVE":
return True
# Sleep between retries
if self.config.debug:
print "[%s] VM not yet found, retrying %s of %s..." \
% (vmname, (retry_attempt + 1), retry_count)
time.sleep(2)
print "[%s] VM not found, after %s attempts" % (vmname, retry_count)
return False
# Returns True if server is found and deleted/False if not,
# retry the delete if there is a delay
def delete_server_by_name(self, vmname):
servers_list = self.get_server_list()
for server in servers_list:
if server.name == vmname:
print 'deleting server %s' % (server)
self.novaclient.servers.delete(server)
return True
return False
def delete_server(self, server):
self.novaclient.servers.delete(server)
def find_flavor(self, flavor_type):
flavor = self.novaclient.flavors.find(name=flavor_type)
return flavor
def normalize_az_host(self, az, host):
if not az:
az = self.config.availability_zone
return az + ':' + host
def auto_fill_az(self, host_list, host):
'''
no az provided, if there is a host list we can auto-fill the az
else we use the configured az if available
else we return an error
'''
if host_list:
for hyp in host_list:
if hyp.host_name == host:
return self.normalize_az_host(hyp.zone, host)
# no match on host
print('Error: passed host name does not exist: ' + host)
return None
if self.config.availability_zone:
return self.normalize_az_host(None, host)
print('Error: --hypervisor passed without an az and no az configured')
return None
def sanitize_az_host(self, host_list, az_host):
'''
host_list: list of hosts as retrieved from openstack (can be empty)
az_host: either a host or a az:host string
if a host, will check host is in the list, find the corresponding az and
return az:host
if az:host is passed will check the host is in the list and az matches
if host_list is empty, will return the configured az if there is no
az passed
'''
if ':' in az_host:
# no host_list, return as is (no check)
if not host_list:
return az_host
# if there is a host_list, extract and verify the az and host
az_host_list = az_host.split(':')
zone = az_host_list[0]
host = az_host_list[1]
for hyp in host_list:
if hyp.host_name == host:
if hyp.zone == zone:
# matches
return az_host
# else continue - another zone with same host name?
# no match
print('Error: no match for availability zone and host ' + az_host)
return None
else:
return self.auto_fill_az(host_list, az_host)
#
# Return a list of 0, 1 or 2 az:host
#
# The list is computed as follows:
# The list of all hosts is retrieved first from openstack
# if this fails, checks and az auto-fill are disabled
#
# If the user provides a list of hypervisors (--hypervisor)
# that list is checked and returned
#
# If the user provides a configured az name (config.availability_zone)
# up to the first 2 hosts from the list that match the az are returned
#
# If the user did not configure an az name
# up to the first 2 hosts from the list are returned
# Possible return values:
# [ az ]
# [ az:hyp ]
# [ az1:hyp1, az2:hyp2 ]
# [] if an error occurred (error message printed to console)
#
def get_az_host_list(self):
avail_list = []
host_list = []
try:
host_list = self.novaclient.services.list()
except novaclient.exceptions.Forbidden:
print ('Warning: Operation Forbidden: could not retrieve list of hosts'
' (likely no permission)')
# the user has specified a list of 1 or 2 hypervisors to use
if self.config.hypervisors:
for hyp in self.config.hypervisors:
hyp = self.sanitize_az_host(host_list, hyp)
if hyp:
avail_list.append(hyp)
else:
return []
# if the user did not specify an az, insert the configured az
if ':' not in hyp:
if self.config.availability_zone:
hyp = self.normalize_az_host(None, hyp)
else:
return []
# pick first 2 matches at most
if len(avail_list) == 2:
break
print 'Using hypervisors:' + ', '.join(avail_list)
else:
for host in host_list:
# this host must be a compute node
if host.binary != 'nova-compute' or host.state != 'up':
continue
candidate = None
if self.config.availability_zone:
if host.zone == self.config.availability_zone:
candidate = self.normalize_az_host(None, host.host)
else:
candidate = self.normalize_az_host(host.zone, host.host)
if candidate:
avail_list.append(candidate)
# pick first 2 matches at most
if len(avail_list) == 2:
break
# if empty we insert the configured az
if not avail_list:
if not self.config.availability_zone:
print('Error: availability_zone must be configured')
elif host_list:
print('Error: no host matching the selection for availability zone: '
+ self.config.availability_zone)
avail_list = []
else:
avail_list = [self.config.availability_zone]
return avail_list
# Given 2 VMs test if they are running on same Host or not
def check_vm_placement(self, vm_instance1, vm_instance2):
try:
server_instance_1 = self.novaclient.servers.get(vm_instance1)
server_instance_2 = self.novaclient.servers.get(vm_instance2)
if server_instance_1.hostId == server_instance_2.hostId:
return True
else:
return False
except novaclient.exceptions:
print "Exception in retrieving the hostId of servers"
# Create a new security group with appropriate rules
def security_group_create(self):
# check first the security group exists
# May throw exceptions.NoUniqueMatch or NotFound
try:
group = self.novaclient.security_groups.find(name=self.config.security_group_name)
return group
except exceptions.NotFound:
group = self.novaclient.security_groups.create(name=self.config.security_group_name,
description="PNS Security group")
# Once security group try to find it iteratively
# (this check may no longer be necessary)
for _ in range(self.config.generic_retry_count):
group = self.novaclient.security_groups.get(group)
if group:
self.security_group_add_rules(group)
return group
else:
time.sleep(1)
return None
# except exceptions.NoUniqueMatch as exc:
# raise exc
# Delete a security group
def security_group_delete(self, group):
if group:
print "Deleting security group"
self.novaclient.security_groups.delete(group)
# Add rules to the security group
def security_group_add_rules(self, group):
# Allow ping traffic
self.novaclient.security_group_rules.create(group.id,
ip_protocol="icmp",
from_port=-1,
to_port=-1)
if self.config.ipv6_mode:
self.novaclient.security_group_rules.create(group.id,
ip_protocol="icmp",
from_port=-1,
to_port=-1,
cidr="::/0")
# Allow SSH traffic
self.novaclient.security_group_rules.create(group.id,
ip_protocol="tcp",
from_port=22,
to_port=22)
# Allow TCP/UDP traffic for perf tools like iperf/nuttcp
# 5001: Data traffic (standard iperf data port)
# 5002: Control traffic (non standard)
# note that 5000/tcp is already picked by openstack keystone
if not self.config.ipv6_mode:
self.novaclient.security_group_rules.create(group.id,
ip_protocol="tcp",
from_port=5001,
to_port=5002)
self.novaclient.security_group_rules.create(group.id,
ip_protocol="udp",
from_port=5001,
to_port=5001)
else:
# IPV6 rules addition
self.novaclient.security_group_rules.create(group.id,
ip_protocol="tcp",
from_port=5001,
to_port=5002,
cidr="::/0")
self.novaclient.security_group_rules.create(group.id,
ip_protocol="udp",
from_port=5001,
to_port=5001,
cidr="::/0")

110
credentials.py

@ -1,110 +0,0 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Module for credentials in Openstack
import getpass
import os
import re
class Credentials(object):
def get_credentials(self):
dct = {}
dct['username'] = self.rc_username
dct['password'] = self.rc_password
dct['auth_url'] = self.rc_auth_url
dct['tenant_name'] = self.rc_tenant_name
return dct
def get_nova_credentials(self):
dct = {}
dct['username'] = self.rc_username
dct['api_key'] = self.rc_password
dct['auth_url'] = self.rc_auth_url
dct['project_id'] = self.rc_tenant_name
return dct
def get_nova_credentials_v2(self):
dct = self.get_nova_credentials()
dct['version'] = 2
return dct
#
# Read a openrc file and take care of the password
# The 2 args are passed from the command line and can be None
#
def __init__(self, openrc_file, pwd, no_env):
self.rc_password = None
self.rc_username = None
self.rc_tenant_name = None
self.rc_auth_url = None
success = True
if openrc_file:
if os.path.exists(openrc_file):
export_re = re.compile('export OS_([A-Z_]*)="?(.*)')
for line in open(openrc_file):
line = line.strip()
mstr = export_re.match(line)
if mstr:
# get rif of posible trailing double quote
# the first one was removed by the re
name = mstr.group(1)
value = mstr.group(2)
if value.endswith('"'):
value = value[:-1]
# get rid of password assignment
# echo "Please enter your OpenStack Password: "
# read -sr OS_PASSWORD_INPUT
# export OS_PASSWORD=$OS_PASSWORD_INPUT
if value.startswith('$'):
continue
# now match against wanted variable names
if name == 'USERNAME':
self.rc_username = value
elif name == 'AUTH_URL':
self.rc_auth_url = value
elif name == 'TENANT_NAME':
self.rc_tenant_name = value
else:
print 'Error: rc file does not exist %s' % (openrc_file)
success = False
elif not no_env:
# no openrc file passed - we assume the variables have been
# sourced by the calling shell
# just check that they are present
for varname in ['OS_USERNAME', 'OS_AUTH_URL', 'OS_TENANT_NAME']:
if varname not in os.environ:
# print 'Warning: %s is missing' % (varname)
success = False
if success:
self.rc_username = os.environ['OS_USERNAME']
self.rc_auth_url = os.environ['OS_AUTH_URL']
self.rc_tenant_name = os.environ['OS_TENANT_NAME']
# always override with CLI argument if provided
if pwd:
self.rc_password = pwd
# if password not know, check from env variable
elif self.rc_auth_url and not self.rc_password and success:
if 'OS_PASSWORD' in os.environ and not no_env:
self.rc_password = os.environ['OS_PASSWORD']
else:
# interactively ask for password
self.rc_password = getpass.getpass(
'Please enter your OpenStack Password: ')
if not self.rc_password:
self.rc_password = ""

8
doc/Makefile

@ -85,17 +85,17 @@ qthelp:
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/vmtp.qhcp"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/kloudbuster.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/vmtp.qhc"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/kloudbuster.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/vmtp"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/vmtp"
@echo "# mkdir -p $$HOME/.local/share/devhelp/kloudbuster"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/kloudbuster"
@echo "# devhelp"
epub:

409
doc/source/_static/example.json

@ -1,409 +0,0 @@
{
"args": "vmtp.py -c cfg.default.yaml -r ../admin-openrc.sh -p <MASKED> --json juno_ovs_vxlan_2.json --mongod_server 172.29.87.29 --controller-node <MASKED> -d --test_description Yichen's testbed",
"auth_url": "http://172.29.87.180:5000/v2.0",
"cpu_info": "40 * Intel(R) Xeon(R) CPU E5-2660 v2 @ 2.20GHz",
"date": "2015-03-04 22:33:40",
"distro": "CentOS Linux 7",
"encapsulation": "vxlan",
"flows": [
{
"az_from": "nova:hh23-6",
"az_to": "nova:hh23-6",
"desc": "VM to VM same network fixed IP (intra-node)",
"distro_id": "Ubuntu",
"distro_version": "14.04",
"ip_from": "192.168.1.4",
"ip_to": "192.168.1.2",
"results": [
{
"pkt_size": 65536,
"protocol": "TCP",
"rtt_ms": 0.28,
"throughput_kbps": 14318464,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"rtt_ms": 0.12,
"throughput_kbps": 14426352,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"rtt_ms": 0.13,
"throughput_kbps": 14247563,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 0.11,
"pkt_size": 128,
"protocol": "UDP",
"throughput_kbps": 127744,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 0.12,
"pkt_size": 1024,
"protocol": "UDP",
"throughput_kbps": 1021703,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 0.17,
"pkt_size": 8192,
"protocol": "UDP",
"throughput_kbps": 2496542,
"tool": "nuttcp-7.3.2"
},
{
"protocol": "ICMP",
"rtt_avg_ms": "0.321",
"rtt_max_ms": "0.741",
"rtt_min_ms": "0.187",
"rtt_stddev": "0.212",
"rx_packets": "5",
"tool": "ping",
"tx_packets": "5"
}
]
},
{
"az_from": "nova:hh23-6",
"az_to": "nova:hh23-6",
"desc": "VM to VM different network fixed IP (intra-node)",
"distro_id": "Ubuntu",
"distro_version": "14.04",
"ip_from": "192.168.2.2",
"ip_to": "192.168.1.2",
"results": [
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 116,
"rtt_ms": 0.67,
"throughput_kbps": 1292957,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 218,
"rtt_ms": 0.58,
"throughput_kbps": 1602299,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 606,
"rtt_ms": 0.59,
"throughput_kbps": 1583186,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 0.94,
"pkt_size": 128,
"protocol": "UDP",
"throughput_kbps": 152745,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 0.39,
"pkt_size": 1024,
"protocol": "UDP",
"throughput_kbps": 1222784,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 2.52,
"pkt_size": 8192,
"protocol": "UDP",
"throughput_kbps": 1342442,
"tool": "nuttcp-7.3.2"
},
{
"protocol": "ICMP",
"rtt_avg_ms": "0.771",
"rtt_max_ms": "1.126",
"rtt_min_ms": "0.677",
"rtt_stddev": "0.180",
"rx_packets": "5",
"tool": "ping",
"tx_packets": "5"
}
]
},
{
"az_from": "nova:hh23-6",
"az_to": "nova:hh23-6",
"desc": "VM to VM different network floating IP (intra-node)",
"distro_id": "Ubuntu",
"distro_version": "14.04",
"ip_from": "192.168.2.2",
"ip_to": "172.29.87.183",
"results": [
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 560,
"rtt_ms": 0.69,
"throughput_kbps": 1407148,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 184,
"rtt_ms": 0.62,
"throughput_kbps": 1475068,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 310,
"rtt_ms": 0.59,
"throughput_kbps": 1529674,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 3.62,
"pkt_size": 128,
"protocol": "UDP",
"throughput_kbps": 153493,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 4.14,
"pkt_size": 1024,
"protocol": "UDP",
"throughput_kbps": 1241424,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 4.37,
"pkt_size": 8192,
"protocol": "UDP",
"throughput_kbps": 1311624,
"tool": "nuttcp-7.3.2"
},
{
"protocol": "ICMP",
"rtt_avg_ms": "0.646",
"rtt_max_ms": "0.693",
"rtt_min_ms": "0.613",
"rtt_stddev": "0.043",
"rx_packets": "5",
"tool": "ping",
"tx_packets": "5"
}
]
},
{
"az_from": "nova:hh23-5",
"az_to": "nova:hh23-6",
"desc": "VM to VM same network fixed IP (inter-node)",
"distro_id": "Ubuntu",
"distro_version": "14.04",
"ip_from": "192.168.1.5",
"ip_to": "192.168.1.2",
"results": [
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 99,
"rtt_ms": 0.34,
"throughput_kbps": 2340466,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 67,
"rtt_ms": 0.43,
"throughput_kbps": 2313315,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 63,
"rtt_ms": 0.32,
"throughput_kbps": 2020005,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 50.66,
"pkt_size": 128,
"protocol": "UDP",
"throughput_kbps": 76095,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 24.04,
"pkt_size": 1024,
"protocol": "UDP",
"throughput_kbps": 920877,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 28.84,
"pkt_size": 8192,
"protocol": "UDP",
"throughput_kbps": 1901142,
"tool": "nuttcp-7.3.2"
},
{
"protocol": "ICMP",
"rtt_avg_ms": "0.657",
"rtt_max_ms": "1.555",
"rtt_min_ms": "0.331",
"rtt_stddev": "0.453",
"rx_packets": "5",
"tool": "ping",
"tx_packets": "5"
}
]
},
{
"az_from": "nova:hh23-5",
"az_to": "nova:hh23-6",
"desc": "VM to VM different network fixed IP (inter-node)",
"distro_id": "Ubuntu",
"distro_version": "14.04",
"ip_from": "192.168.2.4",
"ip_to": "192.168.1.2",
"results": [
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 121,
"rtt_ms": 0.68,
"throughput_kbps": 1344370,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 224,
"rtt_ms": 0.61,
"throughput_kbps": 1448398,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 75,
"rtt_ms": 0.5,
"throughput_kbps": 1301634,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 1.04,
"pkt_size": 128,
"protocol": "UDP",
"throughput_kbps": 161581,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 0.98,
"pkt_size": 1024,
"protocol": "UDP",
"throughput_kbps": 1207335,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 3.82,
"pkt_size": 8192,
"protocol": "UDP",
"throughput_kbps": 1330237,
"tool": "nuttcp-7.3.2"
},
{
"protocol": "ICMP",
"rtt_avg_ms": "0.648",
"rtt_max_ms": "0.984",
"rtt_min_ms": "0.489",
"rtt_stddev": "0.175",
"rx_packets": "5",
"tool": "ping",
"tx_packets": "5"
}
]
},
{
"az_from": "nova:hh23-5",
"az_to": "nova:hh23-6",
"desc": "VM to VM different network floating IP (inter-node)",
"distro_id": "Ubuntu",
"distro_version": "14.04",
"ip_from": "192.168.2.4",
"ip_to": "172.29.87.183",
"results": [
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 201,
"rtt_ms": 0.65,
"throughput_kbps": 1371518,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 133,
"rtt_ms": 0.57,
"throughput_kbps": 1388169,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 68,
"rtt_ms": 0.56,
"throughput_kbps": 1250003,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 2.66,
"pkt_size": 128,
"protocol": "UDP",
"throughput_kbps": 148525,
"tool": "nuttcp-7.3.2"
},
{