Migrate code to python3/Ubuntu 20.04

Change-Id: I18a21e04d009afdee3afc2723afdbade24bfdf71
This commit is contained in:
ahothan 2020-07-24 01:44:41 -07:00
parent de38fad996
commit 1316bd443d
50 changed files with 817 additions and 475 deletions

2
.gitignore vendored
View File

@ -68,3 +68,5 @@ scale/dib/kloudbuster.d/
# kb_web # kb_web
!kb_server/public/ui/components/*/*.css !kb_server/public/ui/components/*/*.css
!kb_server/public/ui/components/*/*.js !kb_server/public/ui/components/*/*.js
.pytest_cache/

View File

@ -1,30 +1,33 @@
# docker file for creating a container that has kloudbuster installed and ready to use # docker file for creating a container that has kloudbuster installed and ready to use
# this will build from uptreams master latest # this will build from uptreams master latest
FROM ubuntu:16.04 FROM ubuntu:20.04
MAINTAINER kloudbuster-core <kloudbuster-core@lists.launchpad.net>
# Simpler would be to clone direct from upstream (latest) # Simpler would be to clone direct from upstream (latest)
# but the content might differ from the curent repo # but the content might differ from the curent repo
# So we'd rather copy the current kloudbuster directory # So we'd rather copy the current kloudbuster directory
# along with the pre-built qcow2 image # along with the pre-built qcow2 image
COPY ./ /kloudbuster/
# The name of the kloudbuster wheel package
# must be placed under ./dist directory before calling docker build
# example: ./dist/kloudbuster-8.0.0-py3-none-any.whl
ARG WHEEL_PKG
# The name of the kloudbuster VM qcow2 image
# must be placed in the current directory
# example: ./kloudbuster-8.0.0.qcow2
ARG VM_IMAGE
# copy the wheel package so it can be installed inside the container
COPY ./dist/$WHEEL_PKG /
# copy the VM image under /
COPY $VM_IMAGE /
# copy the VM Image
# Install KloudBuster script and dependencies # Install KloudBuster script and dependencies
# Note the dot_git directory must be renamed to .git RUN apt-get update \
# in order for pip install -e . to work properly && apt-get install -y python3 python3-pip python-is-python3 \
RUN apt-get update && apt-get install -y \ && pip3 install /$WHEEL_PKG \
git \ && rm -f /$WHEEL_PKG
libyaml-dev \
python \
python-dev \
python-pip \
&& pip install -U -q pip \
&& hash -r pip \
&& pip install -U -q setuptools \
&& cd /kloudbuster \
&& pip install -q -e . \
&& rm -rf .git \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/*

View File

@ -1,5 +1,5 @@
===================== =====================
KloudBuster version 7 KloudBuster version 8
===================== =====================
How good is your OpenStack **data plane** or **storage service** under real How good is your OpenStack **data plane** or **storage service** under real

View File

@ -1,5 +1,5 @@
===================== =====================
KloudBuster version 7 KloudBuster version 8
===================== =====================
How good is your OpenStack **data plane** or **storage service** under real How good is your OpenStack **data plane** or **storage service** under real
@ -89,8 +89,6 @@ Feature List
* Aggregated results provide an easy to understand way to assess the scale of * Aggregated results provide an easy to understand way to assess the scale of
the cloud under test the cloud under test
* KloudBuster VM image pre-built and available from the OpenStack Community App
Catalog (https://apps.openstack.org/)
**Diagrams** describing how the scale test resources are staged and how the **Diagrams** describing how the scale test resources are staged and how the
traffic flows are available in :ref:`arch`. traffic flows are available in :ref:`arch`.
@ -100,6 +98,15 @@ graphical charts generated straight off the tool.
**Examples of results** are available in :ref:`gallery`. **Examples of results** are available in :ref:`gallery`.
New in Release 8
----------------
* Kloudbuster is now fully python 3 compatible, python 2.7 is no longer supported.
* Validated againts OpenStack Train release
New in Release 7 New in Release 7
---------------- ----------------

View File

@ -10,12 +10,6 @@ export DIB_DEV_USER_PWDLESS_SUDO=Y
# Set the data sources to have ConfigDrive only # Set the data sources to have ConfigDrive only
export DIB_CLOUD_INIT_DATASOURCES="ConfigDrive" export DIB_CLOUD_INIT_DATASOURCES="ConfigDrive"
# Check we are in a virtual environment
function check_in_venv {
IN_VENV=$(python -c 'import sys; print hasattr(sys, "real_prefix")')
echo $IN_VENV
}
function cleanup_qcow2 { function cleanup_qcow2 {
echo echo
echo "Error: found unrelated qcow2 files that would make the container image too large." echo "Error: found unrelated qcow2 files that would make the container image too large."
@ -34,17 +28,12 @@ function build_vm {
fi fi
echo "Building $kb_image_name.qcow2..." echo "Building $kb_image_name.qcow2..."
pip install "diskimage-builder>=2.15" pip3 install "diskimage-builder>=2.15"
cd ./kb_dib cd ./kb_dib
# Add the kloudbuster elements directory to the DIB elements path # Add the kloudbuster elements directory to the DIB elements path
export ELEMENTS_PATH=./elements export ELEMENTS_PATH=./elements
# canned user/password for direct login
export DIB_DEV_USER_USERNAME=kloudbuster
export DIB_DEV_USER_PASSWORD=kloudbuster
export DIB_DEV_USER_PWDLESS_SUDO=Y
# Install Ubuntu 18.04 # Install Ubuntu 18.04
export DIB_RELEASE=bionic export DIB_RELEASE=bionic
@ -64,10 +53,21 @@ function build_vm {
# Build container # Build container
function build_container { function build_container {
echo "docker build --tag=berrypatch/kloudbuster:$KB_TAG ." # Create a wheel package
sudo docker build --tag=berrypatch/kloudbuster:$KB_TAG . # ./dist/kloudbuster-$KB_TAG-py3-none-any.whl
echo "sudo docker build --tag=berrypatch/kloudbuster:latest ." python setup.py build bdist_wheel || { echo "Error building package"; exit 5; }
sudo docker build --tag=berrypatch/kloudbuster:latest . wheel_pkg="kloudbuster-$KB_TAG-py3-none-any.whl"
if [ -f ./dist/$wheel_pkg ]; then
echo "Created package: ./dist/$wheel_pkg"
else
echo "Error: Cannot find created package: ./dist/$wheel_pkg"
exit 4
fi
build_args="--build-arg WHEEL_PKG=$wheel_pkg --build-arg VM_IMAGE=$kb_image_name.qcow2"
echo "docker build $build_args --tag=berrypatch/kloudbuster:$KB_TAG ."
sudo docker build $build_args --tag=berrypatch/kloudbuster:$KB_TAG .
echo "sudo docker build $build_args --tag=berrypatch/kloudbuster:latest ."
sudo docker build $build_args --tag=berrypatch/kloudbuster:latest .
} }
function help { function help {
@ -78,7 +78,7 @@ function help {
echo "Builds the KloudBuster VM and Docker container images" echo "Builds the KloudBuster VM and Docker container images"
echo "The Docker container image will include the VM image for easier upload" echo "The Docker container image will include the VM image for easier upload"
echo echo
echo "Must run in a virtual environment and must be called from the root of the repository" echo "Kloudbuster must be installed for this script to run (typically would run from a virtual environment)"
exit 1 exit 1
} }
@ -96,27 +96,50 @@ while [[ $# -gt 0 ]]; do
# Shift after checking all the cases to get the next option # Shift after checking all the cases to get the next option
shift shift
done done
in_venv=$(check_in_venv)
if [ $in_venv != "True" ]; then # check that we have python3/pip3 enabled
echo "Error: Must be in a virtual environment to run!" python -c 'print 0' >/dev/null 2>/dev/null
exit 2 if [ $? -eq 0 ]; then
echo "Error: python 3 is required as default python version"
exit 3
fi fi
# check that we are in a virtual environment
INVENV=$(python -c 'import sys;print(hasattr(sys, "real_prefix") or (hasattr(sys, "base_prefix") and sys.base_prefix != sys.prefix))')
if [ $INVENV != "True" ]; then
echo "Error: must run inside a venv as many packages will be installed"
exit 4
fi
# check that kloudbuster binary is installed
# Get the kloudbuster version (must be retrieved from stderr)
KB_TAG=$(kloudbuster --version 2>&1)
if [ $? != 0 ]; then
echo "Installing kloudbuster..."
# Install kloudbuster in the virtual env in editable mode
pip3 install -q -e .
KB_TAG=$(kloudbuster --version 2>&1)
if [ $? != 0 ]; then
echo "Error: cannot retrieve version from kloudbuster..."
echo
kloudbuster --version
exit 2
fi
fi
# check that docker is installed
if [ $build_vm_only = 0 ]; then
docker --version >/dev/null 2>/dev/null
if [ $? -ne 0 ]; then
echo "Error: docker is not installed"
exit 4
fi
fi
# check we're at the root of the kloudbuster repo # check we're at the root of the kloudbuster repo
if [ ! -d kloudbuster -o ! -f Dockerfile ]; then if [ ! -d kloudbuster -o ! -f Dockerfile ]; then
echo "Error: Must be called from the root of the kloudbuster repository to run!" echo "Error: Must be called from the root of the kloudbuster repository to run!"
exit 2 exit 2
fi fi
# Install kloudbuster in the virtual env
pip install -q -U setuptools
pip install -q -e .
# Get the kloudbuster version (must be retrieved from stderr)
KB_TAG=$(kloudbuster --version 2>&1)
if [ $? != 0 ]; then
echo "Error retrieving kloudbuster version:"
echo
kloudbuster --version
exit 2
fi
echo echo
echo "Building KloudBuster with tag $KB_TAG" echo "Building KloudBuster with tag $KB_TAG"

View File

@ -10,7 +10,7 @@ The same image can run using one of the following roles (Assigned from the user-
- Client VM for a given traffic type (e.g. http client or tcp/udp client) - Client VM for a given traffic type (e.g. http client or tcp/udp client)
- Redis server (only 1 instance in the client cloud) - Redis server (only 1 instance in the client cloud)
The default login on the VM is VMs are launched using cloud config and can be access with ssh:
- username: kb - username: cloud-user
- password: kb - no password, use key pairs to create the VM

View File

@ -6,8 +6,8 @@ libssl-dev:
libyaml-dev: libyaml-dev:
nginx: nginx:
ntpdate: ntpdate:
python-pip: python3-pip:
python-dev: python3-dev:
redis-server: redis-server:
xfsprogs: xfsprogs:
zlib1g-dev: zlib1g-dev:

View File

@ -1,5 +1,3 @@
#!/bin/sh #!/bin/sh
pip install --upgrade pip pip3 install setuptools wheel
hash -r pip
pip install setuptools wheel

View File

@ -56,7 +56,7 @@ echo 'mkdir -p /mnt/config' >> /etc/rc.local
echo 'mount /dev/disk/by-label/config-2 /mnt/config' >> /etc/rc.local echo 'mount /dev/disk/by-label/config-2 /mnt/config' >> /etc/rc.local
echo 'cp /mnt/config/openstack/latest/user_data /kb_test/' >> /etc/rc.local echo 'cp /mnt/config/openstack/latest/user_data /kb_test/' >> /etc/rc.local
echo 'cd /kb_test' >> /etc/rc.local echo 'cd /kb_test' >> /etc/rc.local
echo 'python kb_vm_agent.py &' >> /etc/rc.local echo 'python3 kb_vm_agent.py &' >> /etc/rc.local
chmod +x /etc/rc.local chmod +x /etc/rc.local
# ================= # =================
@ -65,24 +65,24 @@ chmod +x /etc/rc.local
cd /kb_test cd /kb_test
git clone https://opendev.org/x/kloudbuster.git git clone https://opendev.org/x/kloudbuster.git
cd kloudbuster cd kloudbuster
pip install -r requirements.txt pip3 install -r requirements.txt
# ====== # ======
# Client # Client
# ====== # ======
# python redis client, HdrHistorgram_py # python redis client, HdrHistorgram_py
pip install redis hdrhistogram pip3 install redis hdrhistogram
# Install HdrHistorgram_c # Install HdrHistorgram_c
cd /tmp cd /tmp
git clone git://github.com/HdrHistogram/HdrHistogram_c.git git clone https://github.com/HdrHistogram/HdrHistogram_c.git
cd HdrHistogram_c cd HdrHistogram_c
cmake . cmake .
make install make install
# Install the http traffic generator # Install the http traffic generator
cd /tmp cd /tmp
git clone git://github.com/yicwang/wrk2.git git clone https://github.com/yicwang/wrk2.git
cd wrk2 cd wrk2
make make
mv wrk /usr/local/bin/wrk2 mv wrk /usr/local/bin/wrk2
@ -113,7 +113,7 @@ rm -rf /tmp/wrk2
rm -rf /tmp/fio rm -rf /tmp/fio
# Uninstall unneeded packages # Uninstall unneeded packages
apt-get -y --purge remove libyaml-dev libssl-dev zlib1g-dev libaio-dev python-pip python-dev build-essential cmake apt-get -y --purge remove libyaml-dev libssl-dev zlib1g-dev libaio-dev python3-pip python3-dev build-essential cmake
apt-get -y --purge autoremove apt-get -y --purge autoremove
apt-get -y install python ## apt-get -y install python
apt-get -y autoclean apt-get -y autoclean

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
import yaml import yaml
cloudcfg = "/etc/cloud/cloud.cfg" cloudcfg = "/etc/cloud/cloud.cfg"
@ -7,11 +7,12 @@ user = "cloud-user"
with open(cloudcfg) as f: with open(cloudcfg) as f:
cfg = yaml.safe_load(f) cfg = yaml.safe_load(f)
synver = "1"
try: try:
if cfg['system_info']['default_user']['name']: if cfg['system_info']['default_user']['name']:
synver = "2" synver = "2"
except KeyError: except KeyError:
synver = "1" pass
if synver == "1": if synver == "1":
if cfg['user'] == user: if cfg['user'] == user:
@ -27,7 +28,7 @@ elif synver == "2":
# Change the user to cloud-user # Change the user to cloud-user
cfg['system_info']['default_user']['name'] = user cfg['system_info']['default_user']['name'] = user
cfg['system_info']['default_user']['gecos'] = "Cloud User" cfg['system_info']['default_user']['gecos'] = "Cloud User"
print cfg['system_info']['default_user']['name'] print(cfg['system_info']['default_user']['name'])
with open(cloudcfg, "w") as f: with open(cloudcfg, "w") as f:
yaml.dump(cfg, f, default_flow_style=False) yaml.dump(cfg, f, default_flow_style=False)

View File

@ -13,10 +13,8 @@
# under the License. # under the License.
# #
from hdrh.histogram import HdrHistogram
import json import json
import multiprocessing import multiprocessing
import redis
import socket import socket
import struct import struct
import subprocess import subprocess
@ -27,6 +25,9 @@ import threading
import time import time
import traceback import traceback
from hdrh.histogram import HdrHistogram
import redis
# Define the version of the KloudBuster agent and VM image # Define the version of the KloudBuster agent and VM image
# #
# When VM is up running, the agent will send the READY message to the # When VM is up running, the agent will send the READY message to the
@ -36,11 +37,11 @@ import traceback
# and can be left constant moving forward. # and can be left constant moving forward.
__version__ = '7' __version__ = '7'
# TODO(Logging on Agent) # to add later logging on Agent
def exec_command(cmd, cwd=None): def exec_command(cmd, cwd=None):
p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate() (_, stderr) = p.communicate()
if p.returncode: if p.returncode:
syslog.syslog("Command failed: " + ' '.join(cmd)) syslog.syslog("Command failed: " + ' '.join(cmd))
if stderr: if stderr:
@ -54,7 +55,7 @@ def refresh_clock(clocks, force_sync=False):
command = "sudo ntpdate" + step + clocks command = "sudo ntpdate" + step + clocks
exec_command(command.split(" ")) exec_command(command.split(" "))
class KB_Instance(object): class KB_Instance():
# Check whether the HTTP Service is up running # Check whether the HTTP Service is up running
@staticmethod @staticmethod
@ -73,7 +74,7 @@ class KB_Instance(object):
if if_name: if if_name:
debug_msg += " and %s" % if_name debug_msg += " and %s" % if_name
cmd += " dev %s" % if_name cmd += " dev %s" % if_name
print debug_msg print(debug_msg)
return cmd return cmd
@staticmethod @staticmethod
@ -105,7 +106,7 @@ class KB_Instance(object):
else: else:
debug_msg = "with next hop %s" % if_name debug_msg = "with next hop %s" % if_name
cmd += " dev %s" % if_name cmd += " dev %s" % if_name
print debug_msg print(debug_msg)
return cmd return cmd
# Run the HTTP benchmarking tool # Run the HTTP benchmarking tool
@ -167,7 +168,7 @@ class KB_Instance(object):
cmd = '%s %s %s %s' % (dest_path, fixed_opt, required_opt, optional_opt) cmd = '%s %s %s %s' % (dest_path, fixed_opt, required_opt, optional_opt)
return cmd return cmd
class KBA_Client(object): class KBA_Client():
def __init__(self, user_data): def __init__(self, user_data):
host = user_data['redis_server'] host = user_data['redis_server']
@ -185,10 +186,10 @@ class KBA_Client(object):
def setup_channels(self): def setup_channels(self):
# Check for connections to redis server # Check for connections to redis server
while (True): while True:
try: try:
self.redis_obj.get("test") self.redis_obj.get("test")
except (redis.exceptions.ConnectionError): except redis.exceptions.ConnectionError:
time.sleep(1) time.sleep(1)
continue continue
break break
@ -230,6 +231,8 @@ class KBA_Client(object):
self.last_process = p self.last_process = p
lines_iterator = iter(p.stdout.readline, b"") lines_iterator = iter(p.stdout.readline, b"")
for line in lines_iterator: for line in lines_iterator:
# line is bytes, so need to make it a str
line = line.decode('utf-8')
# One exception, if this is the very last report, we will send it # One exception, if this is the very last report, we will send it
# through "DONE" command, not "REPORT". So what's happening here # through "DONE" command, not "REPORT". So what's happening here
# is to determine whether this is the last report. # is to determine whether this is the last report.
@ -267,23 +270,25 @@ class KBA_Client(object):
# When 'ACK' is received, means the master node # When 'ACK' is received, means the master node
# acknowledged the current VM. So stopped sending more # acknowledged the current VM. So stopped sending more
# "hello" packet to the master node. # "hello" packet to the master node.
# Unfortunately, there is no thread.stop() in Python 2.x
self.stop_hello.set() self.stop_hello.set()
elif message['cmd'] == 'EXEC': elif message['cmd'] == 'EXEC':
self.last_cmd = "" self.last_cmd = ""
arange = message['data']['active_range'] arange = message['data']['active_range']
my_id = int(self.vm_name[self.vm_name.rindex('I') + 1:]) my_id = int(self.vm_name[self.vm_name.rindex('I') + 1:])
if (not arange) or (my_id >= arange[0] and my_id <= arange[1]): if (not arange) or (arange[0] <= my_id <= arange[1]):
try: try:
par = message['data'].get('parameter', '') par = message['data'].get('parameter', '')
str_par = 'par' if par else '' str_par = 'par' if par else ''
cmd_res_tuple = eval('self.exec_%s(%s)' % (message['data']['cmd'], str_par)) cmd = message['data']['cmd']
if isinstance(cmd, bytes):
cmd = cmd.decode('utf-8')
cmd_res_tuple = eval('self.exec_%s(%s)' % (cmd, str_par))
cmd_res_dict = dict(zip(("status", "stdout", "stderr"), cmd_res_tuple)) cmd_res_dict = dict(zip(("status", "stdout", "stderr"), cmd_res_tuple))
except Exception as exc: except Exception:
cmd_res_dict = { cmd_res_dict = {
"status": 1, "status": 1,
"stdout": self.last_cmd, "stdout": self.last_cmd,
"stderr": str(exc) "stderr": traceback.format_exc() + '\nmessage: ' + str(message['data'])
} }
if self.__class__.__name__ == "KBA_Multicast_Client": if self.__class__.__name__ == "KBA_Multicast_Client":
self.report('DONE_MC', message['client-type'], cmd_res_dict) self.report('DONE_MC', message['client-type'], cmd_res_dict)
@ -291,14 +296,14 @@ class KBA_Client(object):
self.report('DONE', message['client-type'], cmd_res_dict) self.report('DONE', message['client-type'], cmd_res_dict)
else: else:
# Unexpected # Unexpected
print 'ERROR: Unexpected command received!' print('ERROR: Unexpected command received!')
class KBA_HTTP_Client(KBA_Client): class KBA_HTTP_Client(KBA_Client):
def exec_setup_static_route(self): def exec_setup_static_route(self):
self.last_cmd = KB_Instance.get_static_route(self.user_data['target_subnet_ip']) self.last_cmd = KB_Instance.get_static_route(self.user_data['target_subnet_ip'])
result = self.exec_command(self.last_cmd) result = self.exec_command(self.last_cmd)
if (self.user_data['target_subnet_ip'] not in result[1]): if self.user_data['target_subnet_ip'] not in result[1]:
self.last_cmd = KB_Instance.add_static_route( self.last_cmd = KB_Instance.add_static_route(
self.user_data['target_subnet_ip'], self.user_data['target_subnet_ip'],
self.user_data['target_shared_interface_ip']) self.user_data['target_shared_interface_ip'])
@ -323,7 +328,7 @@ class KBA_Multicast_Client(KBA_Client):
self.last_cmd = KB_Instance.get_static_route(self.user_data['target_subnet_ip']) self.last_cmd = KB_Instance.get_static_route(self.user_data['target_subnet_ip'])
result = self.exec_command(self.last_cmd) result = self.exec_command(self.last_cmd)
if (self.user_data['target_subnet_ip'] not in result[1]): if self.user_data['target_subnet_ip'] not in result[1]:
self.last_cmd = KB_Instance.add_static_route( self.last_cmd = KB_Instance.add_static_route(
self.user_data['target_subnet_ip'], self.user_data['target_subnet_ip'],
self.user_data['target_shared_interface_ip']) self.user_data['target_shared_interface_ip'])
@ -340,10 +345,10 @@ class KBA_Multicast_Client(KBA_Client):
'megabytes': 'megabytes', 'rate_Mbps': 'mbps', 'msmaxjitter': 'jitter', 'megabytes': 'megabytes', 'rate_Mbps': 'mbps', 'msmaxjitter': 'jitter',
'msavgOWD': 'latency'} # Format/Include Keys 'msavgOWD': 'latency'} # Format/Include Keys
try: try:
return {kmap[k]: abs(float(v)) return {
for (k, v) in [c.split("=") kmap[k]: abs(float(v)) for (k, v) in [c.split("=")
for c in p_out.split(" ")] for c in p_out.split(" ")] if k in kmap
if k in kmap} }
except Exception: except Exception:
return {'error': '0'} return {'error': '0'}
@ -365,12 +370,12 @@ class KBA_Multicast_Client(KBA_Client):
queue.put([cmds[cmd][0], out]) queue.put([cmds[cmd][0], out])
# End Function # # End Function #
for cmd in cmds: for _ in cmds:
multiprocessing.Process(target=spawn, args=(cmd_index, queue)).start() multiprocessing.Process(target=spawn, args=(cmd_index, queue)).start()
cmd_index += 1 cmd_index += 1
p_err = "" p_err = ""
try: try:
while(j < len(cmds)): while j < len(cmds):
out = queue.get(True, timeout) out = queue.get(True, timeout)
key = out[0] key = out[0]
j += 1 j += 1
@ -500,7 +505,7 @@ class KBA_Storage_Client(KBA_Client):
grp_msb_bits = clat['FIO_IO_U_PLAT_BITS'] grp_msb_bits = clat['FIO_IO_U_PLAT_BITS']
buckets_per_grp = clat['FIO_IO_U_PLAT_VAL'] buckets_per_grp = clat['FIO_IO_U_PLAT_VAL']
for bucket in xrange(total_buckets): for bucket in range(total_buckets):
if clat[str(bucket)]: if clat[str(bucket)]:
grp = bucket / buckets_per_grp grp = bucket / buckets_per_grp
subbucket = bucket % buckets_per_grp subbucket = bucket % buckets_per_grp
@ -511,7 +516,8 @@ class KBA_Storage_Client(KBA_Client):
val = int(base + (base / buckets_per_grp) * (subbucket - 0.5)) val = int(base + (base / buckets_per_grp) * (subbucket - 0.5))
histogram.record_value(val, clat[str(bucket)]) histogram.record_value(val, clat[str(bucket)])
p_output['jobs'][0][test]['clat']['hist'] = histogram.encode() # histogram.encode() returns a base64 compressed histogram as bytes
p_output['jobs'][0][test]['clat']['hist'] = histogram.encode().decode('utf-8')
p_output['jobs'][0][test]['clat'].pop('bins') p_output['jobs'][0][test]['clat'].pop('bins')
p_output['jobs'][0][test]['clat'].pop('percentile') p_output['jobs'][0][test]['clat'].pop('percentile')
@ -534,7 +540,7 @@ class KBA_Storage_Client(KBA_Client):
return self.encode_bins(p_out) return self.encode_bins(p_out)
class KBA_Server(object): class KBA_Server():
def __init__(self, user_data): def __init__(self, user_data):
self.user_data = user_data self.user_data = user_data
@ -544,14 +550,14 @@ class KBA_Server(object):
html_size = self.user_data['http_server_configs']['html_size'] html_size = self.user_data['http_server_configs']['html_size']
cmd_str = 'dd if=/dev/zero of=/data/www/index.html bs=%s count=1' % html_size cmd_str = 'dd if=/dev/zero of=/data/www/index.html bs=%s count=1' % html_size
cmd = cmd_str.split() cmd = cmd_str.split()
return False if exec_command(cmd) else True return not bool(exec_command(cmd))
def start_nginx_server(self): def start_nginx_server(self):
cmd = ['sudo', 'service', 'nginx', 'start'] cmd = ['sudo', 'service', 'nginx', 'start']
return exec_command(cmd) return exec_command(cmd)
def start_nuttcp_server(self): def start_nuttcp_server(self):
cmd = ['/usr/local/bin/nuttcp', '-S' '-P5000'] cmd = ['/usr/local/bin/nuttcp', '-S', '-P5000']
return exec_command(cmd) return exec_command(cmd)
def start_multicast_listener(self, mc_addrs, multicast_ports, start_address="231.0.0.128"): def start_multicast_listener(self, mc_addrs, multicast_ports, start_address="231.0.0.128"):
@ -574,7 +580,7 @@ class KBA_Server(object):
s.bind((m_addr, port)) s.bind((m_addr, port))
s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
while True: while True:
d, e = s.recvfrom(10240) s.recvfrom(10240)
# End Function # # End Function #
@ -587,7 +593,7 @@ class KBA_Server(object):
while True: while True:
continue continue
class KBA_Proxy(object): class KBA_Proxy():
def start_redis_server(self): def start_redis_server(self):
cmd = ['sudo', 'service', 'redis-server', 'start'] cmd = ['sudo', 'service', 'redis-server', 'start']
return exec_command(cmd) return exec_command(cmd)
@ -600,18 +606,19 @@ if __name__ == "__main__":
except Exception as e: except Exception as e:
# KloudBuster starts without user-data # KloudBuster starts without user-data
cwd = 'kloudbuster/kb_server' cwd = 'kloudbuster/kb_server'
cmd = ['python', 'setup.py', 'develop'] cmd = ['python3', 'setup.py', 'develop']
rc = exec_command(cmd, cwd=cwd) rc = exec_command(cmd, cwd=cwd)
if not rc: if not rc:
syslog.syslog("Starting kloudbuster HTTP server") syslog.syslog("Starting kloudbuster HTTP server")
cmd = ['/usr/local/bin/pecan', 'serve', 'config.py'] cmd = ['/usr/local/bin/pecan', 'serve', 'config.py']
sys.exit(exec_command(cmd, cwd=cwd)) sys.exit(exec_command(cmd, cwd=cwd))
if user_data.get('role') == 'KB-PROXY': role = user_data.get('role')
if role == 'KB-PROXY':
agent = KBA_Proxy() agent = KBA_Proxy()
syslog.syslog("Starting kloudbuster proxy server") syslog.syslog("Starting kloudbuster proxy server")
sys.exit(agent.start_redis_server()) sys.exit(agent.start_redis_server())
if user_data.get('role').endswith('Server'): if role.endswith('Server'):
agent = KBA_Server(user_data) agent = KBA_Server(user_data)
if user_data['role'].startswith('Multicast'): if user_data['role'].startswith('Multicast'):
KB_Instance.add_multicast_route() KB_Instance.add_multicast_route()
@ -631,11 +638,11 @@ if __name__ == "__main__":
sys.exit(agent.start_nginx_server()) sys.exit(agent.start_nginx_server())
else: else:
sys.exit(1) sys.exit(1)
elif user_data.get('role').endswith('Client'): elif role.endswith('Client'):
if user_data['role'].startswith('HTTP'): if role.startswith('HTTP'):
syslog.syslog("Starting kloudbuster HTTP client") syslog.syslog("Starting kloudbuster HTTP client")
agent = KBA_HTTP_Client(user_data) agent = KBA_HTTP_Client(user_data)
elif user_data['role'].startswith('Multicast'): elif role.startswith('Multicast'):
KB_Instance.add_multicast_route() KB_Instance.add_multicast_route()
refresh_clock(user_data.get('ntp_clocks'), force_sync=True) refresh_clock(user_data.get('ntp_clocks'), force_sync=True)
agent = KBA_Multicast_Client(user_data) agent = KBA_Multicast_Client(user_data)

View File

@ -29,7 +29,7 @@ from pecan import response
LOG = logging.getLogger("kloudbuster") LOG = logging.getLogger("kloudbuster")
class ConfigController(object): class ConfigController():
# Decorator to check for missing or invalid session ID # Decorator to check for missing or invalid session ID
def check_session_id(func): def check_session_id(func):
@ -198,7 +198,7 @@ class ConfigController(object):
allowed_status = ['READY'] allowed_status = ['READY']
except Exception as e: except Exception as e:
response.status = 400 response.status = 400
response.text = u"Invalid JSON: \n%s" % (e.message) response.text = u"Invalid JSON: \n%s" % str(e)
return response.text return response.text
# http_tool_configs and storage_tool_config for client VMs is allowed to be # http_tool_configs and storage_tool_config for client VMs is allowed to be

View File

@ -26,7 +26,7 @@ from pecan import response
LOG = logging.getLogger("kloudbuster") LOG = logging.getLogger("kloudbuster")
class KBController(object): class KBController():
def __init__(self): def __init__(self):
self.kb_thread = None self.kb_thread = None

View File

@ -17,7 +17,7 @@ import threading
KB_SESSIONS = {} KB_SESSIONS = {}
KB_SESSIONS_LOCK = threading.Lock() KB_SESSIONS_LOCK = threading.Lock()
class KBSessionManager(object): class KBSessionManager():
@staticmethod @staticmethod
def has(session_id): def has(session_id):
@ -46,7 +46,7 @@ class KBSessionManager(object):
KB_SESSIONS_LOCK.release() KB_SESSIONS_LOCK.release()
class KBSession(object): class KBSession():
def __init__(self): def __init__(self):
self.kb_status = 'READY' self.kb_status = 'READY'
self.first_run = True self.first_run = True

View File

@ -19,7 +19,7 @@ from pecan import expose
from pecan import response from pecan import response
class APIController(object): class APIController():
@expose() @expose()
def _lookup(self, primary_key, *remainder): def _lookup(self, primary_key, *remainder):
if primary_key == "config": if primary_key == "config":
@ -30,7 +30,7 @@ class APIController(object):
abort(404) abort(404)
class RootController(object): class RootController():
@expose() @expose()
def index(self): def index(self):
response.status = 301 response.status = 301

View File

@ -15,7 +15,7 @@
import os import os
import time import time
import log as logging import kloudbuster.log as logging
from novaclient.exceptions import BadRequest from novaclient.exceptions import BadRequest
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -24,7 +24,7 @@ class KBVolAttachException(Exception):
pass pass
class BaseCompute(object): class BaseCompute():
""" """
The Base class for nova compute resources The Base class for nova compute resources
1. Creates virtual machines with specific configs 1. Creates virtual machines with specific configs
@ -46,13 +46,12 @@ class BaseCompute(object):
self.shared_interface_ip = None self.shared_interface_ip = None
self.vol = None self.vol = None
# Create a server instance with associated
# security group, keypair with a provided public key
def create_server(self, image_name, flavor_type, keyname, def create_server(self, image_name, flavor_type, keyname,
nic, sec_group, avail_zone=None, user_data=None, nic, sec_group, avail_zone=None, user_data=None,
config_drive=True, retry_count=100): config_drive=True, retry_count=100):
""" """
Create a server instance with associated security group, keypair with a provided public key.
Create a VM instance given following parameters Create a VM instance given following parameters
1. VM Name 1. VM Name
2. Image Name 2. Image Name
@ -93,6 +92,7 @@ class BaseCompute(object):
LOG.error('Instance creation error:' + instance.fault['message']) LOG.error('Instance creation error:' + instance.fault['message'])
return None return None
time.sleep(2) time.sleep(2)
return None
def attach_vol(self): def attach_vol(self):
if self.vol.status != 'available': if self.vol.status != 'available':
@ -117,7 +117,7 @@ class BaseCompute(object):
def detach_vol(self): def detach_vol(self):
if self.instance and self.vol: if self.instance and self.vol:
attached_vols = self.novaclient.volumes.get_server_volumes(self.instance.id) attached_vols = self.novaclient.volumes.get_server_volumes(self.instance.id)
if len(attached_vols): if attached_vols:
try: try:
self.novaclient.volumes.delete_server_volume(self.instance.id, self.vol.id) self.novaclient.volumes.delete_server_volume(self.instance.id, self.vol.id)
except BadRequest: except BadRequest:
@ -133,7 +133,7 @@ class BaseCompute(object):
return flavor return flavor
class SecGroup(object): class SecGroup():
def __init__(self, novaclient, neutronclient): def __init__(self, novaclient, neutronclient):
self.secgroup = None self.secgroup = None
@ -238,7 +238,7 @@ class SecGroup(object):
LOG.error('Failed while deleting security group %s.' % self.secgroup['id']) LOG.error('Failed while deleting security group %s.' % self.secgroup['id'])
return False return False
class KeyPair(object): class KeyPair():
def __init__(self, novaclient): def __init__(self, novaclient):
self.keypair = None self.keypair = None
@ -268,7 +268,7 @@ class KeyPair(object):
if self.keypair: if self.keypair:
self.novaclient.keypairs.delete(self.keypair) self.novaclient.keypairs.delete(self.keypair)
class Flavor(object): class Flavor():
def __init__(self, novaclient): def __init__(self, novaclient):
self.novaclient = novaclient self.novaclient = novaclient
@ -304,7 +304,7 @@ class Flavor(object):
except Exception: except Exception:
pass pass
class NovaQuota(object): class NovaQuota():
def __init__(self, novaclient, tenant_id): def __init__(self, novaclient, tenant_id):
self.novaclient = novaclient self.novaclient = novaclient

View File

@ -14,11 +14,11 @@
import time import time
from perf_instance import PerfInstance from kloudbuster.perf_instance import PerfInstance
import base_compute import kloudbuster.base_compute as base_compute
import base_storage import kloudbuster.base_storage as base_storage
import log as logging import kloudbuster.log as logging
import netaddr import netaddr
from neutronclient.common.exceptions import NetworkInUseClient from neutronclient.common.exceptions import NetworkInUseClient
@ -101,7 +101,7 @@ def find_provider_network(neutron_client, name):
networks = neutron_client.list_networks()['networks'] networks = neutron_client.list_networks()['networks']
for network in networks: for network in networks:
if network['provider:physical_network']: if network['provider:physical_network']:
if name == "" or name == network['name']: if name in ("", network['name']):
return network return network
if name != "": if name != "":
LOG.error("The provider network: " + name + " was not found.") LOG.error("The provider network: " + name + " was not found.")
@ -116,11 +116,11 @@ def find_first_network(neutron_client):
If no external network is found return None If no external network is found return None
""" """
networks = neutron_client.list_networks()['networks'] networks = neutron_client.list_networks()['networks']
if (len(networks) > 0): if networks:
return networks[0] return networks[0]
return None return None
class BaseNetwork(object): class BaseNetwork():
""" """
The Base class for neutron network operations The Base class for neutron network operations
1. Creates networks with 1 subnet inside each network 1. Creates networks with 1 subnet inside each network
@ -177,7 +177,7 @@ class BaseNetwork(object):
vol_size = 0 vol_size = 0
# Schedule to create the required number of VMs # Schedule to create the required number of VMs
for instance_count in xrange(vm_total): for instance_count in range(vm_total):
vm_name = network_prefix + "-I" + str(instance_count) vm_name = network_prefix + "-I" + str(instance_count)
perf_instance = PerfInstance(vm_name, self, config_scale) perf_instance = PerfInstance(vm_name, self, config_scale)
self.instance_list.append(perf_instance) self.instance_list.append(perf_instance)
@ -197,7 +197,8 @@ class BaseNetwork(object):
if config_scale['use_floatingip']: if config_scale['use_floatingip']:
# Create the floating ip for the instance # Create the floating ip for the instance
# store it and the ip address in perf_instance object # store it and the ip address in perf_instance object
perf_instance.fip = create_floating_ip(self.neutron_client, external_network) port_id = perf_instance.instance.interface_list()[0].id
perf_instance.fip = create_floating_ip(self.neutron_client, external_network, port_id)
perf_instance.fip_ip = perf_instance.fip['floatingip']['floating_ip_address'] perf_instance.fip_ip = perf_instance.fip['floatingip']['floating_ip_address']
self.res_logger.log('floating_ips', self.res_logger.log('floating_ips',
perf_instance.fip['floatingip']['floating_ip_address'], perf_instance.fip['floatingip']['floating_ip_address'],
@ -270,7 +271,7 @@ class BaseNetwork(object):
if len(self.network['subnets']) > 0: if len(self.network['subnets']) > 0:
subnet = self.neutron_client.show_subnet(self.network['subnets'][0])['subnet'] subnet = self.neutron_client.show_subnet(self.network['subnets'][0])['subnet']
self.network['subnet_ip'] = subnet['cidr'] self.network['subnet_ip'] = subnet['cidr']
self.network['is_ipv6'] = True if subnet['ipv6_address_mode'] else False self.network['is_ipv6'] = bool(subnet['ipv6_address_mode'])
def get_cidr_from_subnet_id(self, subnetID): def get_cidr_from_subnet_id(self, subnetID):
sub = self.neutron_client.show_subnet(subnetID) sub = self.neutron_client.show_subnet(subnetID)
@ -281,6 +282,7 @@ class BaseNetwork(object):
"""Generate next CIDR for network or subnet, without IP overlapping. """Generate next CIDR for network or subnet, without IP overlapping.
""" """
global cidr global cidr
# pylint: disable=not-callable
cidr = str(netaddr.IPNetwork(cidr).next()) cidr = str(netaddr.IPNetwork(cidr).next())
return cidr return cidr
@ -304,7 +306,7 @@ class BaseNetwork(object):
def get_all_instances(self): def get_all_instances(self):
return self.instance_list return self.instance_list
class Router(object): class Router():
""" """
Router class to create a new routers Router class to create a new routers
Supports addition and deletion Supports addition and deletion
@ -496,7 +498,7 @@ class Router(object):
class NeutronQuota(object): class NeutronQuota():
def __init__(self, neutronclient, tenant_id): def __init__(self, neutronclient, tenant_id):
self.neutronclient = neutronclient self.neutronclient = neutronclient

View File

@ -14,14 +14,14 @@
import time import time
import log as logging import kloudbuster.log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class KBVolCreationException(Exception): class KBVolCreationException(Exception):
pass pass
class BaseStorage(object): class BaseStorage():
""" """
The Base class for cinder storage resources The Base class for cinder storage resources
""" """
@ -69,7 +69,7 @@ class BaseStorage(object):
# self.cinderclient.volumes.detach(volume) # self.cinderclient.volumes.detach(volume)
class CinderQuota(object): class CinderQuota():
def __init__(self, cinderclient, tenant_id): def __init__(self, cinderclient, tenant_id):
self.cinderclient = cinderclient self.cinderclient = cinderclient

View File

@ -32,8 +32,9 @@ image_name:
# vm_image_file: /kloudbuster/kloudbuster-7.0.0.qcow2 # vm_image_file: /kloudbuster/kloudbuster-7.0.0.qcow2
# If empty, KloudBuster will attempt to locate that file (with the default name) # If empty, KloudBuster will attempt to locate that file (with the default name)
# under the following directories: # under the following directories:
# - root of the kloudbuster package
# - current directory # - current directory
# - home directory
# - top directory ("/")
vm_image_file: vm_image_file:
# Keystone admin role name (default should work in most deployments) # Keystone admin role name (default should work in most deployments)

View File

@ -21,11 +21,11 @@ from keystoneauth1 import session
import os import os
import re import re
import log as logging import kloudbuster.log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class Credentials(object): class Credentials():
def get_session(self): def get_session(self):
dct = { dct = {

View File

@ -15,7 +15,7 @@
import json import json
from perf_tool import PerfTool from kloudbuster.perf_tool import PerfTool
from hdrh.histogram import HdrHistogram from hdrh.histogram import HdrHistogram
@ -99,7 +99,7 @@ class FioTool(PerfTool):
histogram.decode_and_add(item['results'][clat]) histogram.decode_and_add(item['results'][clat])
latency_dict = histogram.get_percentile_to_value_dict(perc_list) latency_dict = histogram.get_percentile_to_value_dict(perc_list)
for key, value in latency_dict.iteritems(): for key, value in latency_dict.items():
all_res[clat].append([key, value]) all_res[clat].append([key, value])
all_res[clat].sort() all_res[clat].sort()
@ -108,10 +108,10 @@ class FioTool(PerfTool):
@staticmethod @staticmethod
def consolidate_samples(results, vm_count): def consolidate_samples(results, vm_count):
all_res = FioTool.consolidate_results(results) all_res = FioTool.consolidate_results(results)
total_count = float(len(results)) / vm_count total_count = len(results) // vm_count
if not total_count: if not total_count:
return all_res return all_res
all_res['read_iops'] = int(all_res['read_iops'] / total_count) all_res['read_iops'] = all_res['read_iops'] // total_count
all_res['write_iops'] = int(all_res['write_iops'] / total_count) all_res['write_iops'] = all_res['write_iops'] // total_count
return all_res return all_res

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
# Copyright 2016 Cisco Systems, Inc. All rights reserved. # Copyright 2016 Cisco Systems, Inc. All rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -26,7 +26,7 @@
# # # #
# It is safe to use the script with the resource list generated by # # It is safe to use the script with the resource list generated by #
# KloudBuster, usage: # # KloudBuster, usage: #
# $ python force_cleanup.py --file kb_20150807_183001_svr.log # # $ python3 force_cleanup.py --file kb_20150807_183001_svr.log #
# # # #
# Note: If running under single-tenant or tenant/user reusing mode, you have # # Note: If running under single-tenant or tenant/user reusing mode, you have #
# to cleanup the server resources first, then client resources. # # to cleanup the server resources first, then client resources. #
@ -57,20 +57,25 @@ import traceback
# openstack python clients # openstack python clients
import cinderclient import cinderclient
from keystoneclient import client as keystoneclient from cinderclient.client import Client as CinderClient
import keystoneclient
from keystoneclient.client import Client as KeystoneClient
import neutronclient import neutronclient
from neutronclient.neutron.client import Client as NeutronClient
from novaclient.client import Client as NovaClient
from novaclient.exceptions import NotFound from novaclient.exceptions import NotFound
from tabulate import tabulate from tabulate import tabulate
# kloudbuster base code # kloudbuster base code
import credentials import kloudbuster.credentials as credentials
resource_name_re = None resource_name_re = None
def prompt_to_run(): def prompt_to_run():
print "Warning: You didn't specify a resource list file as the input. "\ print("Warning: You didn't specify a resource list file as the input. "
"The script will delete all resources shown above." "The script will delete all resources shown above.")
answer = raw_input("Are you sure? (y/n) ") answer = input("Are you sure? (y/n) ")
if answer.lower() != 'y': if answer.lower() != 'y':
sys.exit(0) sys.exit(0)
@ -83,7 +88,7 @@ def fetch_resources(fetcher, options=None):
except Exception as e: except Exception as e:
res_list = [] res_list = []
traceback.print_exc() traceback.print_exc()
print "Warning exception while listing resources:" + str(e) print('Warning exception while listing resources:', str(e))
resources = {} resources = {}
for res in res_list: for res in res_list:
# some objects provide direct access some # some objects provide direct access some
@ -98,16 +103,15 @@ def fetch_resources(fetcher, options=None):
resources[resid] = resname resources[resid] = resname
return resources return resources
class AbstractCleaner(object): class AbstractCleaner(metaclass=ABCMeta):
__metaclass__ = ABCMeta
def __init__(self, res_category, res_desc, resources, dryrun): def __init__(self, res_category, res_desc, resources, dryrun):
self.dryrun = dryrun self.dryrun = dryrun
self.category = res_category self.category = res_category
self.resources = {} self.resources = {}
if not resources: if not resources:
print 'Discovering %s resources...' % (res_category) print('Discovering %s resources...' % (res_category))
for rtype, fetch_args in res_desc.iteritems(): for rtype, fetch_args in res_desc.items():
if resources: if resources:
if rtype in resources: if rtype in resources:
self.resources[rtype] = resources[rtype] self.resources[rtype] = resources[rtype]
@ -116,20 +120,20 @@ class AbstractCleaner(object):
def report_deletion(self, rtype, name): def report_deletion(self, rtype, name):
if self.dryrun: if self.dryrun:
print ' + ' + rtype + ' ' + name + ' should be deleted (but is not deleted: dry run)' print(' + ' + rtype + ' ' + name + ' should be deleted (but is not deleted: dry run)')
else: else:
print ' + ' + rtype + ' ' + name + ' is successfully deleted' print(' + ' + rtype + ' ' + name + ' is successfully deleted')
def report_not_found(self, rtype, name): def report_not_found(self, rtype, name):
print ' ? ' + rtype + ' ' + name + ' not found (already deleted?)' print(' ? ' + rtype + ' ' + name + ' not found (already deleted?)')
def report_error(self, rtype, name, reason): def report_error(self, rtype, name, reason):
print ' - ' + rtype + ' ' + name + ' ERROR:' + reason print(' - ' + rtype + ' ' + name + ' ERROR:' + reason)
def get_resource_list(self): def get_resource_list(self):
result = [] result = []
for rtype, rdict in self.resources.iteritems(): for rtype, rdict in self.resources.items():
for resid, resname in rdict.iteritems(): for resid, resname in rdict.items():
result.append([rtype, resname, resid]) result.append([rtype, resname, resid])
return result return result
@ -139,21 +143,20 @@ class AbstractCleaner(object):
class StorageCleaner(AbstractCleaner): class StorageCleaner(AbstractCleaner):
def __init__(self, sess, resources, dryrun): def __init__(self, sess, resources, dryrun):
from cinderclient import client as cclient
from novaclient import client as nclient
self.nova = nclient.Client('2', endpoint_type='publicURL', session=sess)
self.cinder = cclient.Client('2', endpoint_type='publicURL', session=sess) self.nova = NovaClient('2', endpoint_type='publicURL', session=sess)
self.cinder = CinderClient('2', endpoint_type='publicURL', session=sess)
res_desc = {'volumes': [self.cinder.volumes.list, {"all_tenants": 1}]} res_desc = {'volumes': [self.cinder.volumes.list, {"all_tenants": 1}]}
super(StorageCleaner, self).__init__('Storage', res_desc, resources, dryrun) super(StorageCleaner, self).__init__('Storage', res_desc, resources, dryrun)
def clean(self): def clean(self):
print '*** STORAGE cleanup' print('*** STORAGE cleanup')
try: try:
kb_volumes = [] kb_volumes = []
kb_detaching_volumes = [] kb_detaching_volumes = []
for id, name in self.resources['volumes'].iteritems(): for id, name in self.resources['volumes'].items():
try: try:
vol = self.cinder.volumes.get(id) vol = self.cinder.volumes.get(id)
if vol.attachments: if vol.attachments:
@ -162,15 +165,15 @@ class StorageCleaner(AbstractCleaner):
if not self.dryrun: if not self.dryrun:
ins_id = vol.attachments[0]['server_id'] ins_id = vol.attachments[0]['server_id']
self.nova.volumes.delete_server_volume(ins_id, id) self.nova.volumes.delete_server_volume(ins_id, id)
print ' . VOLUME ' + vol.name + ' detaching...' print(' . VOLUME ' + vol.name + ' detaching...')
else: else:
print ' . VOLUME ' + vol.name + ' to be detached...' print(' . VOLUME ' + vol.name + ' to be detached...')
kb_detaching_volumes.append(vol) kb_detaching_volumes.append(vol)
except NotFound: except NotFound:
print 'WARNING: Volume %s attached to an instance that no longer '\ print('WARNING: Volume %s attached to an instance that no longer '
'exists (will require manual cleanup of the database)' % (id) 'exists (will require manual cleanup of the database)' % id)
except Exception as e: except Exception as e:
print str(e) print(str(e))
else: else:
# no attachments # no attachments
kb_volumes.append(vol) kb_volumes.append(vol)
@ -180,8 +183,8 @@ class StorageCleaner(AbstractCleaner):
# check that the volumes are no longer attached # check that the volumes are no longer attached
if kb_detaching_volumes: if kb_detaching_volumes:
if not self.dryrun: if not self.dryrun:
print ' . Waiting for %d volumes to be fully detached...' % \ print(' . Waiting for %d volumes to be fully detached...' %
(len(kb_detaching_volumes)) (len(kb_detaching_volumes)))
retry_count = 5 + len(kb_detaching_volumes) retry_count = 5 + len(kb_detaching_volumes)
while True: while True:
retry_count -= 1 retry_count -= 1
@ -190,19 +193,19 @@ class StorageCleaner(AbstractCleaner):
latest_vol = self.cinder.volumes.get(kb_detaching_volumes[0].id) latest_vol = self.cinder.volumes.get(kb_detaching_volumes[0].id)
if self.dryrun or not latest_vol.attachments: if self.dryrun or not latest_vol.attachments:
if not self.dryrun: if not self.dryrun:
print ' + VOLUME ' + vol.name + ' detach complete' print(' + VOLUME ' + vol.name + ' detach complete')
kb_detaching_volumes.remove(vol) kb_detaching_volumes.remove(vol)
kb_volumes.append(vol) kb_volumes.append(vol)
if kb_detaching_volumes and not self.dryrun: if kb_detaching_volumes and not self.dryrun:
if retry_count: if retry_count:
print ' . VOLUME %d left to be detached, retries left=%d...' % \ print(' . VOLUME %d left to be detached, retries left=%d...' %
(len(kb_detaching_volumes), retry_count) len(kb_detaching_volumes), retry_count)
time.sleep(2) time.sleep(2)
else: else:
print ' - VOLUME detach timeout, %d volumes left:' % \ print(' - VOLUME detach timeout, %d volumes left:' %
(len(kb_detaching_volumes)) len(kb_detaching_volumes))
for vol in kb_detaching_volumes: for vol in kb_detaching_volumes:
print ' ', vol.name, vol.status, vol.id, vol.attachments print(' ', vol.name, vol.status, vol.id, vol.attachments)
break break
else: else:
break break
@ -213,17 +216,15 @@ class StorageCleaner(AbstractCleaner):
try: try:
vol.force_delete() vol.force_delete()
except cinderclient.exceptions.BadRequest as exc: except cinderclient.exceptions.BadRequest as exc:
print str(exc) print(str(exc))
self.report_deletion('VOLUME', vol.name) self.report_deletion('VOLUME', vol.name)
except KeyError: except KeyError:
pass pass
class ComputeCleaner(AbstractCleaner): class ComputeCleaner(AbstractCleaner):
def __init__(self, sess, resources, dryrun): def __init__(self, sess, resources, dryrun):
from neutronclient.neutron import client as nclient self.neutron_client = NeutronClient('2.0', endpoint_type='publicURL', session=sess)
from novaclient import client as novaclient self.nova_client = NovaClient('2', endpoint_type='publicURL', session=sess)
self.neutron_client = nclient.Client('2.0', endpoint_type='publicURL', session=sess)
self.nova_client = novaclient.Client('2', endpoint_type='publicURL', session=sess)
res_desc = { res_desc = {
'instances': [self.nova_client.servers.list, {"all_tenants": 1}], 'instances': [self.nova_client.servers.list, {"all_tenants": 1}],
'flavors': [self.nova_client.flavors.list], 'flavors': [self.nova_client.flavors.list],
@ -232,15 +233,16 @@ class ComputeCleaner(AbstractCleaner):
super(ComputeCleaner, self).__init__('Compute', res_desc, resources, dryrun) super(ComputeCleaner, self).__init__('Compute', res_desc, resources, dryrun)
def clean(self): def clean(self):
print '*** COMPUTE cleanup' print('*** COMPUTE cleanup')
try: try:
# Get a list of floating IPs # Get a list of floating IPs
fip_lst = self.neutron_client.list_floatingips()['floatingips'] fip_lst = self.neutron_client.list_floatingips()['floatingips']
deleting_instances = self.resources['instances'] deleting_instances = self.resources['instances']
for id, name in self.resources['instances'].iteritems(): for id, name in self.resources['instances'].items():
try: try:
if self.nova_client.servers.get(id).addresses.values(): addrs = list(self.nova_client.servers.get(id).addresses.values())
ins_addr = self.nova_client.servers.get(id).addresses.values()[0] if addrs:
ins_addr = addrs[0]
fips = [x['addr'] for x in ins_addr if x['OS-EXT-IPS:type'] == 'floating'] fips = [x['addr'] for x in ins_addr if x['OS-EXT-IPS:type'] == 'floating']
else: else:
fips = [] fips = []
@ -259,33 +261,36 @@ class ComputeCleaner(AbstractCleaner):
deleting_instances.remove(id) deleting_instances.remove(id)
self.report_not_found('INSTANCE', name) self.report_not_found('INSTANCE', name)
if not self.dryrun and len(deleting_instances): if not self.dryrun and deleting_instances:
print ' . Waiting for %d instances to be fully deleted...' % \ print(' . Waiting for %d instances to be fully deleted...' %
(len(deleting_instances)) len(deleting_instances))
retry_count = 5 + len(deleting_instances) retry_count = 5 + len(deleting_instances)
while True: while True:
retry_count -= 1 retry_count -= 1
for ins_id in deleting_instances.keys(): # get a copy of the initial list content
instances_list = list(deleting_instances)
for ins_id in instances_list:
try: try:
self.nova_client.servers.get(ins_id) self.nova_client.servers.get(ins_id)
except NotFound: except NotFound:
self.report_deletion('INSTANCE', deleting_instances[ins_id]) self.report_deletion('INSTANCE', deleting_instances[ins_id])
deleting_instances.pop(ins_id) deleting_instances.pop(ins_id)
if not len(deleting_instances): if not deleting_instances:
# all deleted
break break
if retry_count: if retry_count:
print ' . INSTANCE %d left to be deleted, retries left=%d...' % \ print(' . INSTANCE %d left to be deleted, retries left=%d...' %
(len(deleting_instances), retry_count) (len(deleting_instances), retry_count))
time.sleep(2) time.sleep(2)
else: else:
print ' - INSTANCE deletion timeout, %d instances left:' % \ print(' - INSTANCE deletion timeout, %d instances left:' %
(len(deleting_instances)) len(deleting_instances))
for ins_id in deleting_instances.keys(): for ins_id in deleting_instances.keys():
try: try:
ins = self.nova_client.servers.get(ins_id) ins = self.nova_client.servers.get(ins_id)
print ' ', ins.name, ins.status, ins.id print(' ', ins.name, ins.status, ins.id)
except NotFound: except NotFound:
print(' ', deleting_instances[ins_id], print(' ', deleting_instances[ins_id],
'(just deleted)', ins_id) '(just deleted)', ins_id)
@ -294,7 +299,7 @@ class ComputeCleaner(AbstractCleaner):
pass pass
try: try:
for id, name in self.resources['flavors'].iteritems(): for id, name in self.resources['flavors'].items():
try: try:
flavor = self.nova_client.flavors.find(name=name) flavor = self.nova_client.flavors.find(name=name)
if not self.dryrun: if not self.dryrun:
@ -306,7 +311,7 @@ class ComputeCleaner(AbstractCleaner):
pass pass
try: try:
for id, name in self.resources['keypairs'].iteritems(): for id, name in self.resources['keypairs'].items():
try: try:
if self.dryrun: if self.dryrun:
self.nova_client.keypairs.get(name) self.nova_client.keypairs.get(name)
@ -321,8 +326,7 @@ class ComputeCleaner(AbstractCleaner):
class NetworkCleaner(AbstractCleaner): class NetworkCleaner(AbstractCleaner):
def __init__(self, sess, resources, dryrun): def __init__(self, sess, resources, dryrun):
from neutronclient.neutron import client as nclient self.neutron = NeutronClient('2.0', endpoint_type='publicURL', session=sess)
self.neutron = nclient.Client('2.0', endpoint_type='publicURL', session=sess)
# because the response has an extra level of indirection # because the response has an extra level of indirection
# we need to extract it to present the list of network or router objects # we need to extract it to present the list of network or router objects
@ -357,10 +361,10 @@ class NetworkCleaner(AbstractCleaner):
pass pass
def clean(self): def clean(self):
print '*** NETWORK cleanup' print('*** NETWORK cleanup')
try: try:
for id, name in self.resources['sec_groups'].iteritems(): for id, name in self.resources['sec_groups'].items():
try: try:
if self.dryrun: if self.dryrun:
self.neutron.show_security_group(id) self.neutron.show_security_group(id)
@ -373,7 +377,7 @@ class NetworkCleaner(AbstractCleaner):
pass pass
try: try:
for id, name in self.resources['floating_ips'].iteritems(): for id, name in self.resources['floating_ips'].items():
try: try:
if self.dryrun: if self.dryrun:
self.neutron.show_floatingip(id) self.neutron.show_floatingip(id)
@ -386,7 +390,7 @@ class NetworkCleaner(AbstractCleaner):
pass pass
try: try:
for id, name in self.resources['routers'].iteritems(): for id, name in self.resources['routers'].items():
try: try:
if self.dryrun: if self.dryrun:
self.neutron.show_router(id) self.neutron.show_router(id)
@ -412,7 +416,7 @@ class NetworkCleaner(AbstractCleaner):
except KeyError: except KeyError:
pass pass
try: try:
for id, name in self.resources['networks'].iteritems(): for id, name in self.resources['networks'].items():
try: try:
if self.dryrun: if self.dryrun:
self.neutron.show_network(id) self.neutron.show_network(id)
@ -429,7 +433,7 @@ class NetworkCleaner(AbstractCleaner):
class KeystoneCleaner(AbstractCleaner): class KeystoneCleaner(AbstractCleaner):
def __init__(self, sess, resources, dryrun): def __init__(self, sess, resources, dryrun):
self.keystone = keystoneclient.Client(endpoint_type='publicURL', session=sess) self.keystone = KeystoneClient(endpoint_type='publicURL', session=sess)
self.tenant_api = self.keystone.tenants \ self.tenant_api = self.keystone.tenants \
if self.keystone.version == 'v2.0' else self.keystone.projects if self.keystone.version == 'v2.0' else self.keystone.projects
res_desc = { res_desc = {
@ -439,9 +443,9 @@ class KeystoneCleaner(AbstractCleaner):
super(KeystoneCleaner, self).__init__('Keystone', res_desc, resources, dryrun) super(KeystoneCleaner, self).__init__('Keystone', res_desc, resources, dryrun)
def clean(self): def clean(self):
print '*** KEYSTONE cleanup' print('*** KEYSTONE cleanup')
try: try:
for id, name in self.resources['users'].iteritems(): for id, name in self.resources['users'].items():
try: try:
if self.dryrun: if self.dryrun:
self.keystone.users.get(id) self.keystone.users.get(id)
@ -454,7 +458,7 @@ class KeystoneCleaner(AbstractCleaner):
pass pass
try: try:
for id, name in self.resources['tenants'].iteritems(): for id, name in self.resources['tenants'].items():
try: try:
if self.dryrun: if self.dryrun:
self.tenant_api.get(id) self.tenant_api.get(id)
@ -466,7 +470,7 @@ class KeystoneCleaner(AbstractCleaner):
except KeyError: except KeyError:
pass pass
class KbCleaners(object): class KbCleaners():
def __init__(self, creds_obj, resources, dryrun): def __init__(self, creds_obj, resources, dryrun):
self.cleaners = [] self.cleaners = []
@ -479,13 +483,13 @@ class KbCleaners(object):
for cleaner in self.cleaners: for cleaner in self.cleaners:
table.extend(cleaner.get_resource_list()) table.extend(cleaner.get_resource_list())
count = len(table) - 1 count = len(table) - 1
print print()
if count: if count:
print 'SELECTED RESOURCES:' print('SELECTED RESOURCES:')
print tabulate(table, headers="firstrow", tablefmt="psql") print(tabulate(table, headers="firstrow", tablefmt="psql"))
else: else:
print 'There are no resources to delete.' print('There are no resources to delete.')
print print()
return count return count
def clean(self): def clean(self):
@ -511,7 +515,7 @@ def get_resources_from_cleanup_log(logfile):
if not resid: if not resid:
# normally only the keypairs have no ID # normally only the keypairs have no ID
if restype != "keypairs": if restype != "keypairs":
print 'Error: resource type %s has no ID - ignored!!!' % (restype) print('Error: resource type %s has no ID - ignored!!!' % (restype))
else: else:
resid = '0' resid = '0'
if restype not in resources: if restype not in resources:
@ -556,9 +560,9 @@ def main():
try: try:
resource_name_re = re.compile(opts.filter) resource_name_re = re.compile(opts.filter)
except Exception as exc: except Exception as exc:
print 'Provided filter is not a valid python regular expression: ' + opts.filter print('Provided filter is not a valid python regular expression: ' + opts.filter)
print str(exc) print(str(exc))
sys.exit(1) return 1
else: else:
resource_name_re = re.compile('KB') resource_name_re = re.compile('KB')
@ -566,19 +570,21 @@ def main():
cleaners = KbCleaners(cred, resources, opts.dryrun) cleaners = KbCleaners(cred, resources, opts.dryrun)
if opts.dryrun: if opts.dryrun:
print print()
print('!!! DRY RUN - RESOURCES WILL BE CHECKED BUT WILL NOT BE DELETED !!!') print('!!! DRY RUN - RESOURCES WILL BE CHECKED BUT WILL NOT BE DELETED !!!')
print print()
# Display resources to be deleted # Display resources to be deleted
count = cleaners.show_resources() count = cleaners.show_resources()
if not count: if not count:
sys.exit(0) return 0
if not opts.file and not opts.dryrun: if not opts.file and not opts.dryrun:
prompt_to_run() prompt_to_run()
cleaners.clean() cleaners.clean()
return 0
if __name__ == '__main__': if __name__ == '__main__':
main() sys.exit(main())

View File

@ -15,14 +15,15 @@
import os import os
import sys import sys
import yaml import yaml
from pathlib import Path
from __init__ import __version__
from attrdict import AttrDict from attrdict import AttrDict
import log as logging
from oslo_config import cfg from oslo_config import cfg
from pkg_resources import resource_string from pkg_resources import resource_string
import credentials import kloudbuster.credentials as credentials
from kloudbuster.__init__ import __version__
import kloudbuster.log as logging
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -68,7 +69,7 @@ def get_absolute_path_for_file(file_name):
return abs_file_path return abs_file_path
class KBConfig(object): class KBConfig():
def __init__(self): def __init__(self):
# The default configuration file for KloudBuster # The default configuration file for KloudBuster
default_cfg = resource_string(__name__, "cfg.scale.yaml") default_cfg = resource_string(__name__, "cfg.scale.yaml")
@ -127,17 +128,13 @@ class KBConfig(object):
# Check if the default image is located at the default locations # Check if the default image is located at the default locations
# if vm_image_file is empty # if vm_image_file is empty
if not self.config_scale['vm_image_file']: if not self.config_scale['vm_image_file']:
# check current directory img_path_list = [os.getcwd(), str(Path.home()), '/']
default_image_file = default_image_name + '.qcow2' for img_path in img_path_list:
if os.path.isfile(default_image_file): default_image_file = os.path.join(img_path, default_image_name + '.qcow2')
self.config_scale['vm_image_file'] = default_image_file
else:
# check at the root of the package
# root is up one level where this module resides
pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
default_image_file = pkg_root + '/' + default_image_file
if os.path.isfile(default_image_file): if os.path.isfile(default_image_file):
self.config_scale['vm_image_file'] = default_image_file self.config_scale['vm_image_file'] = default_image_file
LOG.info('Found VM image: %s', default_image_file)
break
# A bit of config dict surgery, extract out the client and server side # A bit of config dict surgery, extract out the client and server side
# and transplant the remaining (common part) into the client and server dict # and transplant the remaining (common part) into the client and server dict
@ -243,4 +240,4 @@ class KBConfig(object):
self.config_scale['number_tenants'] = 1 self.config_scale['number_tenants'] = 1
except Exception as e: except Exception as e:
LOG.error('Cannot parse the count of tenant/user from the config file.') LOG.error('Cannot parse the count of tenant/user from the config file.')
raise KBConfigParseException(e.message) raise KBConfigParseException(str(e))

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import log as logging import kloudbuster.log as logging
from time import gmtime from time import gmtime
from time import strftime from time import strftime
@ -21,7 +21,7 @@ LOG = logging.getLogger(__name__)
class KBResTypeInvalid(Exception): class KBResTypeInvalid(Exception):
pass pass
class KBResLogger(object): class KBResLogger():
def __init__(self): def __init__(self):
self.resource_list = {} self.resource_list = {}

View File

@ -12,21 +12,31 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from __future__ import division
import abc import abc
from collections import deque from collections import deque
import json import json
import log as logging
import redis import redis
import sys
import threading import threading
import time import time
import kloudbuster.log as logging
# A set of warned VM version mismatches # A set of warned VM version mismatches
vm_version_mismatches = set() vm_version_mismatches = set()
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def cmp(x, y):
"""
Replacement for built-in function cmp that was removed in Python 3
Compare the two objects x and y and return an integer according to
the outcome. The return value is negative if x < y, zero if x == y
and strictly positive if x > y.
"""
return (x > y) - (x < y)
class KBException(Exception): class KBException(Exception):
pass pass
@ -36,7 +46,7 @@ class KBVMUpException(KBException):
class KBProxyConnectionException(KBException): class KBProxyConnectionException(KBException):
pass pass
class KBRunner(object): class KBRunner():
""" """
Control the testing VMs on the testing cloud Control the testing VMs on the testing cloud
""" """
@ -51,6 +61,7 @@ class KBRunner(object):
self.tool_result = {} self.tool_result = {}
self.agent_version = None self.agent_version = None
self.report = None self.report = None
self.msg_thread = None
# Redis # Redis
self.redis_obj = None self.redis_obj = None
@ -61,13 +72,13 @@ class KBRunner(object):
def msg_handler(self): def msg_handler(self):
for message in self.pubsub.listen(): for message in self.pubsub.listen():
if message['data'] == "STOP": if message['data'] == b"STOP":
break break
LOG.kbdebug(message) LOG.kbdebug(message)
self.message_queue.append(message) self.message_queue.append(message)
def setup_redis(self, redis_server, redis_server_port=6379, timeout=120): def setup_redis(self, redis_server, redis_server_port=6379, timeout=120):
LOG.info("Setting up the redis connections...") LOG.info("Connecting to redis server in proxy VM %s:%d...", redis_server, redis_server_port)
connection_pool = redis.ConnectionPool( connection_pool = redis.ConnectionPool(
host=redis_server, port=redis_server_port, db=0) host=redis_server, port=redis_server_port, db=0)
@ -77,14 +88,11 @@ class KBRunner(object):
success = False success = False
retry_count = max(timeout // self.config.polling_interval, 1) retry_count = max(timeout // self.config.polling_interval, 1)
# Check for connections to redis server # Check for connections to redis server
for retry in xrange(retry_count): for retry in range(retry_count):
try: try:
self.redis_obj.get("test") self.redis_obj.get("test")
success = True success = True
except (redis.exceptions.ConnectionError): except redis.exceptions.ConnectionError:
# clear active exception to avoid the exception summary
# appended to LOG.info by oslo log
sys.exc_clear()
LOG.info("Connecting to redis server... Retry #%d/%d", retry, retry_count) LOG.info("Connecting to redis server... Retry #%d/%d", retry, retry_count)
time.sleep(self.config.polling_interval) time.sleep(self.config.polling_interval)
continue continue
@ -125,9 +133,9 @@ class KBRunner(object):
retry = cnt_succ = cnt_failed = 0 retry = cnt_succ = cnt_failed = 0
clist = self.client_dict.copy() clist = self.client_dict.copy()
samples = [] samples = []
perf_tool = self.client_dict.values()[0].perf_tool perf_tool = list(self.client_dict.values())[0].perf_tool
while (retry < retry_count and len(clist)): while (retry < retry_count and clist):
time.sleep(polling_interval) time.sleep(polling_interval)
sample_count = 0 sample_count = 0
while True: while True:
@ -135,10 +143,9 @@ class KBRunner(object):
msg = self.message_queue.popleft() msg = self.message_queue.popleft()
except IndexError: except IndexError:
# No new message, commands are in executing # No new message, commands are in executing
# clear active exc to prevent LOG pollution
sys.exc_clear()
break break
# pylint: disable=eval-used
payload = eval(msg['data']) payload = eval(msg['data'])
vm_name = payload['sender-id'] vm_name = payload['sender-id']
cmd = payload['cmd'] cmd = payload['cmd']
@ -149,11 +156,10 @@ class KBRunner(object):
instance = self.full_client_dict[vm_name] instance = self.full_client_dict[vm_name]
if instance.up_flag: if instance.up_flag:
continue continue
else: clist[vm_name].up_flag = True
clist[vm_name].up_flag = True clist.pop(vm_name)
clist.pop(vm_name) cnt_succ = cnt_succ + 1
cnt_succ = cnt_succ + 1 self.agent_version = payload['data']
self.agent_version = payload['data']
elif cmd == 'REPORT': elif cmd == 'REPORT':
sample_count = sample_count + 1 sample_count = sample_count + 1
# Parse the results from HTTP Tools # Parse the results from HTTP Tools
@ -183,8 +189,8 @@ class KBRunner(object):
else: else:
LOG.error('[%s] received invalid command: %s' + (vm_name, cmd)) LOG.error('[%s] received invalid command: %s' + (vm_name, cmd))
log_msg = "VMs: %d Ready, %d Failed, %d Pending... Retry #%d" %\ log_msg = "VMs: %d Ready, %d Failed, %d Pending... Retry #%d/%d" %\
(cnt_succ, cnt_failed, len(clist), retry) (cnt_succ, cnt_failed, len(clist), retry, retry_count)
if sample_count != 0: if sample_count != 0:
log_msg += " (%d sample(s) received)" % sample_count log_msg += " (%d sample(s) received)" % sample_count
LOG.info(log_msg) LOG.info(log_msg)
@ -202,6 +208,7 @@ class KBRunner(object):
LOG.info("Waiting for agents on VMs to come up...") LOG.info("Waiting for agents on VMs to come up...")
cnt_succ = self.polling_vms(timeout)[0] cnt_succ = self.polling_vms(timeout)[0]
if cnt_succ != len(self.client_dict): if cnt_succ != len(self.client_dict):
print('Exception %d != %d' % (cnt_succ, len(self.client_dict)))
raise KBVMUpException("Some VMs failed to start.") raise KBVMUpException("Some VMs failed to start.")
self.send_cmd('ACK', None, None) self.send_cmd('ACK', None, None)
@ -213,7 +220,7 @@ class KBRunner(object):
self.host_stats[phy_host] = [] self.host_stats[phy_host] = []
self.host_stats[phy_host].append(self.result[vm]) self.host_stats[phy_host].append(self.result[vm])
perf_tool = self.client_dict.values()[0].perf_tool perf_tool = list(self.client_dict.values())[0].perf_tool
for phy_host in self.host_stats: for phy_host in self.host_stats:
self.host_stats[phy_host] = perf_tool.consolidate_results(self.host_stats[phy_host]) self.host_stats[phy_host] = perf_tool.consolidate_results(self.host_stats[phy_host])
@ -224,3 +231,8 @@ class KBRunner(object):
def stop(self): def stop(self):
self.send_cmd('ABORT', None, None) self.send_cmd('ABORT', None, None)
def get_sorted_vm_list(self):
vm_list = self.full_client_dict.keys()
vm_list.sort(cmp=lambda x, y: cmp(int(x[x.rfind('I') + 1:]), int(y[y.rfind('I') + 1:])))
return vm_list

View File

@ -12,11 +12,9 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from __future__ import division from kloudbuster.kb_runner_base import KBException
from kloudbuster.kb_runner_base import KBRunner
from kb_runner_base import KBException import kloudbuster.log as logging
from kb_runner_base import KBRunner
import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -84,8 +82,7 @@ class KBRunner_HTTP(KBRunner):
self.check_http_service(active_range) self.check_http_service(active_range)
if self.config.prompt_before_run: if self.config.prompt_before_run:
print "Press enter to start running benchmarking tools..." _ = input("Press enter to start running benchmarking tools...")
raw_input()
LOG.info("Running HTTP Benchmarking...") LOG.info("Running HTTP Benchmarking...")
self.report = {'seq': 0, 'report': None} self.report = {'seq': 0, 'report': None}
@ -93,9 +90,10 @@ class KBRunner_HTTP(KBRunner):
self.run_http_test(active_range) self.run_http_test(active_range)
# Call the method in corresponding tools to consolidate results # Call the method in corresponding tools to consolidate results
perf_tool = self.client_dict.values()[0].perf_tool perf_tool = list(self.client_dict.values())[0].perf_tool
LOG.kbdebug(self.result.values()) results = list(self.result.values())
self.tool_result = perf_tool.consolidate_results(self.result.values()) LOG.kbdebug(results)
self.tool_result = perf_tool.consolidate_results(results)
self.tool_result['http_rate_limit'] =\ self.tool_result['http_rate_limit'] =\
len(self.client_dict) * self.config.http_tool_configs.rate_limit len(self.client_dict) * self.config.http_tool_configs.rate_limit
self.tool_result['total_connections'] =\ self.tool_result['total_connections'] =\
@ -120,8 +118,7 @@ class KBRunner_HTTP(KBRunner):
multiple = self.config.progression.vm_multiple multiple = self.config.progression.vm_multiple
limit = self.config.progression.http_stop_limit limit = self.config.progression.http_stop_limit
timeout = self.config.http_tool_configs.timeout timeout = self.config.http_tool_configs.timeout
vm_list = self.full_client_dict.keys() vm_list = self.get_sorted_vm_list()
vm_list.sort(cmp=lambda x, y: cmp(int(x[x.rfind('I') + 1:]), int(y[y.rfind('I') + 1:])))
self.client_dict = {} self.client_dict = {}
cur_stage = 1 cur_stage = 1
@ -137,7 +134,7 @@ class KBRunner_HTTP(KBRunner):
if self.tool_result and 'latency_stats' in self.tool_result: if self.tool_result and 'latency_stats' in self.tool_result:
err = self.tool_result['http_sock_err'] + self.tool_result['http_sock_timeout'] err = self.tool_result['http_sock_err'] + self.tool_result['http_sock_timeout']
pert_dict = dict(self.tool_result['latency_stats']) pert_dict = dict(self.tool_result['latency_stats'])
if limit[1] in pert_dict.keys(): if limit[1] in pert_dict:
timeout_at_percentile = pert_dict[limit[1]] // 1000000 timeout_at_percentile = pert_dict[limit[1]] // 1000000
elif limit[1] != 0: elif limit[1] != 0:
LOG.warning('Percentile %s%% is not a standard statistic point.' % limit[1]) LOG.warning('Percentile %s%% is not a standard statistic point.' % limit[1])
@ -146,7 +143,7 @@ class KBRunner_HTTP(KBRunner):
'reaches the stop limit.') 'reaches the stop limit.')
break break
for idx in xrange(cur_vm_count, target_vm_count): for idx in range(cur_vm_count, target_vm_count):
self.client_dict[vm_list[idx]] = self.full_client_dict[vm_list[idx]] self.client_dict[vm_list[idx]] = self.full_client_dict[vm_list[idx]]
description = "-- %s --" % self.header_formatter(cur_stage, len(self.client_dict)) description = "-- %s --" % self.header_formatter(cur_stage, len(self.client_dict))
LOG.info(description) LOG.info(description)

View File

@ -12,11 +12,9 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from __future__ import division from kloudbuster.kb_runner_base import KBException
from kloudbuster.kb_runner_base import KBRunner
from kb_runner_base import KBException import kloudbuster.log as logging
from kb_runner_base import KBRunner
import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class KBMulticastServerUpException(KBException): class KBMulticastServerUpException(KBException):
@ -41,12 +39,12 @@ class KBRunner_Multicast(KBRunner):
def setup_static_route(self, active_range, timeout=30): def setup_static_route(self, active_range, timeout=30):
func = {'cmd': 'setup_static_route', 'active_range': active_range} func = {'cmd': 'setup_static_route', 'active_range': active_range}
self.send_cmd('EXEC', 'multicast', func) self.send_cmd('EXEC', 'multicast', func)
self.polling_vms(timeout)[0] _ = self.polling_vms(timeout)[0]
def check_multicast_service(self, active_range, timeout=30): def check_multicast_service(self, active_range, timeout=30):
func = {'cmd': 'check_multicast_service', 'active_range': active_range} func = {'cmd': 'check_multicast_service', 'active_range': active_range}
self.send_cmd('EXEC', 'multicast', func) self.send_cmd('EXEC', 'multicast', func)
self.polling_vms(timeout)[0] _ = self.polling_vms(timeout)[0]
def run_multicast_test(self, active_range, opts, timeout): def run_multicast_test(self, active_range, opts, timeout):
func = {'cmd': 'run_multicast_test', 'active_range': active_range, func = {'cmd': 'run_multicast_test', 'active_range': active_range,
@ -61,10 +59,10 @@ class KBRunner_Multicast(KBRunner):
@staticmethod @staticmethod
def json_to_csv(jsn): def json_to_csv(jsn):
csv = "Test,receivers,addresses,ports,bitrate,pkt_size," csv = "Test,receivers,addresses,ports,bitrate,pkt_size,"
firstKey = [x for x in jsn.keys()][0] firstKey = list(jsn)[0]
keys = jsn[firstKey].keys() keys = jsn[firstKey].keys()
csv += ",".join(keys) + "\r\n" csv += ",".join(keys) + "\r\n"
for obj_k in jsn.keys(): for obj_k in jsn:
obj = jsn[obj_k] obj = jsn[obj_k]
obj_vals = map(str, obj.values()) obj_vals = map(str, obj.values())
csv += '"' + obj_k + '"' + "," + obj_k + "," + ",".join(obj_vals) + "\r\n" csv += '"' + obj_k + '"' + "," + obj_k + "," + ",".join(obj_vals) + "\r\n"
@ -81,8 +79,7 @@ class KBRunner_Multicast(KBRunner):
self.check_multicast_service(active_range) self.check_multicast_service(active_range)
if self.config.prompt_before_run: if self.config.prompt_before_run:
print "Press enter to start running benchmarking tools..." _ = input("Press enter to start running benchmarking tools...")
raw_input()
LOG.info("Running Multicast Benchmarking...") LOG.info("Running Multicast Benchmarking...")
self.report = {'seq': 0, 'report': None} self.report = {'seq': 0, 'report': None}
@ -101,8 +98,7 @@ class KBRunner_Multicast(KBRunner):
def run(self, test_only=False, run_label=None): def run(self, test_only=False, run_label=None):
self.tool_result = {} self.tool_result = {}
vm_list = self.full_client_dict.keys() vm_list = self.get_sorted_vm_list()
vm_list.sort(cmp=lambda x, y: cmp(int(x[x.rfind('I') + 1:]), int(y[y.rfind('I') + 1:])))
self.client_dict = {} self.client_dict = {}
cur_stage = 1 cur_stage = 1
@ -125,7 +121,7 @@ class KBRunner_Multicast(KBRunner):
server_port = 5000 server_port = 5000
for nReceiver in receivers: for nReceiver in receivers:
for idx in range(0, nReceiver): for _ in range(0, nReceiver):
self.client_dict[vm_list[0]] = self.full_client_dict[vm_list[0]] self.client_dict[vm_list[0]] = self.full_client_dict[vm_list[0]]
if nReceiver > 1: if nReceiver > 1:

View File

@ -12,11 +12,9 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from __future__ import division from kloudbuster.kb_runner_base import KBException
from kloudbuster.kb_runner_base import KBRunner
from kb_runner_base import KBException import kloudbuster.log as logging
from kb_runner_base import KBRunner
import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -74,8 +72,7 @@ class KBRunner_Storage(KBRunner):
# timeout is calculated as 30s/GB/client VM # timeout is calculated as 30s/GB/client VM
timeout = 60 * self.config.storage_stage_configs.io_file_size * len(self.client_dict) timeout = 60 * self.config.storage_stage_configs.io_file_size * len(self.client_dict)
parameter = {'size': str(self.config.storage_stage_configs.io_file_size) + 'GiB'} parameter = {'size': str(self.config.storage_stage_configs.io_file_size) + 'GiB'}
parameter['mkfs'] = True \ parameter['mkfs'] = bool(self.config.storage_stage_configs.target == 'volume')
if self.config.storage_stage_configs.target == 'volume' else False
func = {'cmd': 'init_volume', 'active_range': active_range, func = {'cmd': 'init_volume', 'active_range': active_range,
'parameter': parameter} 'parameter': parameter}
@ -114,11 +111,10 @@ class KBRunner_Storage(KBRunner):
self.init_volume(active_range) self.init_volume(active_range)
if self.config.prompt_before_run: if self.config.prompt_before_run:
print "Press enter to start running benchmarking tools..." _ = input("Press enter to start running benchmarking tools...")
raw_input()
test_count = len(self.config.storage_tool_configs) test_count = len(self.config.storage_tool_configs)
perf_tool = self.client_dict.values()[0].perf_tool perf_tool = list(self.client_dict.values())[0].perf_tool
self.tool_result = [] self.tool_result = []
vm_count = active_range[1] - active_range[0] + 1\ vm_count = active_range[1] - active_range[0] + 1\
if active_range else len(self.full_client_dict) if active_range else len(self.full_client_dict)
@ -130,9 +126,10 @@ class KBRunner_Storage(KBRunner):
timeout_vms = self.run_storage_test(active_range, dict(cur_config)) timeout_vms = self.run_storage_test(active_range, dict(cur_config))
# Call the method in corresponding tools to consolidate results # Call the method in corresponding tools to consolidate results
LOG.kbdebug(self.result.values()) results = list(self.result.values())
LOG.kbdebug(results)
tc_result = perf_tool.consolidate_results(self.result.values()) tc_result = perf_tool.consolidate_results(results)
tc_result['description'] = cur_config['description'] tc_result['description'] = cur_config['description']
tc_result['mode'] = cur_config['mode'] tc_result['mode'] = cur_config['mode']
tc_result['block_size'] = cur_config['block_size'] tc_result['block_size'] = cur_config['block_size']
@ -168,8 +165,8 @@ class KBRunner_Storage(KBRunner):
start = self.config.progression.vm_start start = self.config.progression.vm_start
multiple = self.config.progression.vm_multiple multiple = self.config.progression.vm_multiple
limit = self.config.progression.storage_stop_limit limit = self.config.progression.storage_stop_limit
vm_list = self.full_client_dict.keys() vm_list = self.get_sorted_vm_list()
vm_list.sort(cmp=lambda x, y: cmp(int(x[x.rfind('I') + 1:]), int(y[y.rfind('I') + 1:])))
self.client_dict = {} self.client_dict = {}
cur_stage = 1 cur_stage = 1
@ -183,7 +180,7 @@ class KBRunner_Storage(KBRunner):
if target_vm_count > len(self.full_client_dict): if target_vm_count > len(self.full_client_dict):
break break
for idx in xrange(cur_vm_count, target_vm_count): for idx in range(cur_vm_count, target_vm_count):
self.client_dict[vm_list[idx]] = self.full_client_dict[vm_list[idx]] self.client_dict[vm_list[idx]] = self.full_client_dict[vm_list[idx]]
description = "-- %s --" % self.header_formatter(cur_stage, len(self.client_dict)) description = "-- %s --" % self.header_formatter(cur_stage, len(self.client_dict))
@ -210,9 +207,8 @@ class KBRunner_Storage(KBRunner):
if req_iops or req_rate: if req_iops or req_rate:
degrade_iops = (req_iops - cur_iops) * 100 / req_iops if req_iops else 0 degrade_iops = (req_iops - cur_iops) * 100 / req_iops if req_iops else 0
degrade_rate = (req_rate - cur_rate) * 100 / req_rate if req_rate else 0 degrade_rate = (req_rate - cur_rate) * 100 / req_rate if req_rate else 0
if ((cur_tc['mode'] in ['randread', 'randwrite'] and if (cur_tc['mode'] in ['randread', 'randwrite'] and degrade_iops > limit) or \
degrade_iops > limit) (cur_tc['mode'] in ['read', 'write'] and degrade_rate > limit):
or (cur_tc['mode'] in ['read', 'write'] and degrade_rate > limit)):
LOG.warning('KloudBuster is stopping the iteration ' LOG.warning('KloudBuster is stopping the iteration '
'because the result reaches the stop limit.') 'because the result reaches the stop limit.')
tc_flag = False tc_flag = False

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import log as logging import kloudbuster.log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -22,7 +22,7 @@ class KBVMMappingAlgoNotSup(Exception):
class KBVMPlacementAlgoNotSup(Exception): class KBVMPlacementAlgoNotSup(Exception):
pass pass
class KBScheduler(object): class KBScheduler():
""" """
1. VM Placements 1. VM Placements
2. Mapping client VMs to target servers 2. Mapping client VMs to target servers

View File

@ -1 +0,0 @@
../kb_dib/elements/kloudbuster/static/kb_test/kb_vm_agent.py

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
# Copyright 2016 Cisco Systems, Inc. All rights reserved. # Copyright 2016 Cisco Systems, Inc. All rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from __init__ import __version__ from kloudbuster.__init__ import __version__
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
import datetime import datetime
@ -26,30 +26,32 @@ import time
import traceback import traceback
import webbrowser import webbrowser
import base_compute from cinderclient.client import Client as CinderClient
import base_network
from cinderclient import client as cinderclient
from glanceclient import exc as glance_exception from glanceclient import exc as glance_exception
from glanceclient.v2 import client as glanceclient from glanceclient.v2.client import Client as GlanceClient
from kb_config import KBConfig
from kb_res_logger import KBResLogger
from kb_runner_base import KBException
from kb_runner_http import KBRunner_HTTP
from kb_runner_multicast import KBRunner_Multicast
from kb_runner_storage import KBRunner_Storage
from kb_scheduler import KBScheduler
import keystoneauth1 import keystoneauth1
from keystoneclient import client as keystoneclient from keystoneclient import client as keystoneclient
import log as logging from neutronclient.neutron.client import Client as NeutronClient
from neutronclient.neutron import client as neutronclient from novaclient.client import Client as NovaClient
from novaclient import client as novaclient
from oslo_config import cfg from oslo_config import cfg
from pkg_resources import resource_filename from pkg_resources import resource_filename
from pkg_resources import resource_string from pkg_resources import resource_string
from tabulate import tabulate from tabulate import tabulate
import tenant
import kloudbuster.base_compute as base_compute
import kloudbuster.base_network as base_network
from kloudbuster.kb_config import KBConfig
from kloudbuster.kb_res_logger import KBResLogger
from kloudbuster.kb_runner_base import KBException
from kloudbuster.kb_runner_http import KBRunner_HTTP
from kloudbuster.kb_runner_multicast import KBRunner_Multicast
from kloudbuster.kb_runner_storage import KBRunner_Storage
from kloudbuster.kb_scheduler import KBScheduler
import kloudbuster.log as logging
import kloudbuster.tenant as tenant
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -69,7 +71,7 @@ FLAVOR_KB_CLIENT = 'KB.client'
FLAVOR_KB_SERVER = 'KB.server' FLAVOR_KB_SERVER = 'KB.server'
class Kloud(object): class Kloud():
def __init__(self, scale_cfg, cred, reusing_tenants, vm_img, def __init__(self, scale_cfg, cred, reusing_tenants, vm_img,
testing_side=False, storage_mode=False, multicast_mode=False): testing_side=False, storage_mode=False, multicast_mode=False):
self.tenant_list = [] self.tenant_list = []
@ -96,12 +98,12 @@ class Kloud(object):
# these client handles use the kloudbuster credentials (usually admin) # these client handles use the kloudbuster credentials (usually admin)
# to do tenant creation, tenant nova+cinder quota allocation and the like # to do tenant creation, tenant nova+cinder quota allocation and the like
self.keystone = keystoneclient.Client(session=self.osclient_session) self.keystone = keystoneclient.Client(session=self.osclient_session)
self.neutron_client = neutronclient.Client('2.0', endpoint_type='publicURL', self.neutron_client = NeutronClient('2.0', endpoint_type='publicURL',
session=self.osclient_session) session=self.osclient_session)
self.nova_client = novaclient.Client('2', endpoint_type='publicURL', self.nova_client = NovaClient('2', endpoint_type='publicURL',
session=self.osclient_session) session=self.osclient_session)
self.cinder_client = cinderclient.Client('2', endpoint_type='publicURL', self.cinder_client = CinderClient('2', endpoint_type='publicURL',
session=self.osclient_session) session=self.osclient_session)
LOG.info("Creating kloud: " + self.prefix) LOG.info("Creating kloud: " + self.prefix)
if self.placement_az: if self.placement_az:
LOG.info('%s Availability Zone: %s' % (self.name, self.placement_az)) LOG.info('%s Availability Zone: %s' % (self.name, self.placement_az))
@ -113,6 +115,7 @@ class Kloud(object):
flavor_manager = base_compute.Flavor(self.nova_client) flavor_manager = base_compute.Flavor(self.nova_client)
fcand = {'vcpus': sys.maxint, 'ram': sys.maxint, 'disk': sys.maxint} fcand = {'vcpus': sys.maxint, 'ram': sys.maxint, 'disk': sys.maxint}
# find the smallest flavor that is at least 1vcpu, 1024MB ram and 10MB disk # find the smallest flavor that is at least 1vcpu, 1024MB ram and 10MB disk
find_flag = False
for flavor in flavor_manager.list(): for flavor in flavor_manager.list():
flavor = vars(flavor) flavor = vars(flavor)
if flavor['vcpus'] < 1 or flavor['ram'] < 1024 or flavor['disk'] < 10: if flavor['vcpus'] < 1 or flavor['ram'] < 1024 or flavor['disk'] < 10:
@ -150,7 +153,7 @@ class Kloud(object):
reusing_users=user_list) reusing_users=user_list)
self.tenant_list.append(tenant_instance) self.tenant_list.append(tenant_instance)
else: else:
for tenant_count in xrange(self.scale_cfg['number_tenants']): for tenant_count in range(self.scale_cfg['number_tenants']):
tenant_name = self.prefix + "-T" + str(tenant_count) tenant_name = self.prefix + "-T" + str(tenant_count)
tenant_instance = tenant.Tenant(tenant_name, self, tenant_quota) tenant_instance = tenant.Tenant(tenant_name, self, tenant_quota)
self.res_logger.log('tenants', tenant_instance.tenant_name, self.res_logger.log('tenants', tenant_instance.tenant_name,
@ -192,7 +195,7 @@ class Kloud(object):
def delete_resources(self): def delete_resources(self):
if not self.reusing_tenants: if not self.reusing_tenants:
for fn, flavor in self.flavors.iteritems(): for fn, flavor in self.flavors.items():
LOG.info('Deleting flavor %s', fn) LOG.info('Deleting flavor %s', fn)
try: try:
flavor.delete() flavor.delete()
@ -249,7 +252,10 @@ class Kloud(object):
if instance.vol: if instance.vol:
instance.attach_vol() instance.attach_vol()
instance.fixed_ip = instance.instance.networks.values()[0][0] # example:
# instance.instance.networks = OrderedDict([('KBc-T0-U-R0-N0', ['10.1.0.194'])])
# there should be only 1 item in the ordered dict
instance.fixed_ip = list(instance.instance.networks.values())[0][0]
u_fip = instance.config['use_floatingip'] u_fip = instance.config['use_floatingip']
if self.scale_cfg['provider_network']: if self.scale_cfg['provider_network']:
instance.fip = None instance.fip = None
@ -273,13 +279,13 @@ class Kloud(object):
def create_vms(self, vm_creation_concurrency): def create_vms(self, vm_creation_concurrency):
try: try:
with ThreadPoolExecutor(max_workers=vm_creation_concurrency) as executor: with ThreadPoolExecutor(max_workers=vm_creation_concurrency) as executor:
for feature in executor.map(self.create_vm, self.get_all_instances()): for _ in executor.map(self.create_vm, self.get_all_instances()):
self.vm_up_count += 1 self.vm_up_count += 1
except Exception: except Exception as exc:
self.exc_info = sys.exc_info() self.exc_info = exc
class KloudBuster(object): class KloudBuster():
""" """
Creates resources on the cloud for loading up the cloud Creates resources on the cloud for loading up the cloud
1. Tenants 1. Tenants
@ -317,8 +323,8 @@ class KloudBuster(object):
LOG.warning("REUSING MODE: The flavor configs will be ignored.") LOG.warning("REUSING MODE: The flavor configs will be ignored.")
else: else:
self.tenants_list = {'server': None, 'client': None} self.tenants_list = {'server': None, 'client': None}
# TODO(check on same auth_url instead) # !TODO(check on same auth_url instead)
self.single_cloud = False if client_cred else True self.single_cloud = bool(not client_cred)
if not client_cred: if not client_cred:
self.client_cred = server_cred self.client_cred = server_cred
# Automatically enable the floating IP for server cloud under dual-cloud mode # Automatically enable the floating IP for server cloud under dual-cloud mode
@ -340,7 +346,7 @@ class KloudBuster(object):
def get_hypervisor_list(self, cred): def get_hypervisor_list(self, cred):
ret_list = [] ret_list = []
sess = cred.get_session() sess = cred.get_session()
nova_client = novaclient('2', endpoint_type='publicURL', nova_client = NovaClient('2', endpoint_type='publicURL',
http_log_debug=True, session=sess) http_log_debug=True, session=sess)
for hypervisor in nova_client.hypervisors.list(): for hypervisor in nova_client.hypervisors.list():
if vars(hypervisor)['status'] == 'enabled': if vars(hypervisor)['status'] == 'enabled':
@ -351,7 +357,7 @@ class KloudBuster(object):
def get_az_list(self, cred): def get_az_list(self, cred):
ret_list = [] ret_list = []
sess = cred.get_session() sess = cred.get_session()
nova_client = novaclient('2', endpoint_type='publicURL', nova_client = NovaClient('2', endpoint_type='publicURL',
http_log_debug=True, session=sess) http_log_debug=True, session=sess)
for az in nova_client.availability_zones.list(): for az in nova_client.availability_zones.list():
zoneName = vars(az)['zoneName'] zoneName = vars(az)['zoneName']
@ -364,14 +370,11 @@ class KloudBuster(object):
def check_and_upload_image(self, kloud_name, image_name, image_url, sess, retry_count): def check_and_upload_image(self, kloud_name, image_name, image_url, sess, retry_count):
'''Check a VM image and upload it if not found '''Check a VM image and upload it if not found
''' '''
glance_client = glanceclient.Client('2', session=sess) glance_client = GlanceClient('2', session=sess)
try: # Search for the image
# Search for the image images = list(glance_client.images.list(filters={'name': image_name}))
img = glance_client.images.list(filters={'name': image_name}).next() if images:
# image found return images[0]
return img
except StopIteration:
sys.exc_clear()
# Trying to upload image # Trying to upload image
LOG.info("KloudBuster VM Image is not found in %s, trying to upload it..." % kloud_name) LOG.info("KloudBuster VM Image is not found in %s, trying to upload it..." % kloud_name)
@ -381,7 +384,7 @@ class KloudBuster(object):
retry = 0 retry = 0
try: try:
LOG.info("Uploading VM Image from %s..." % image_url) LOG.info("Uploading VM Image from %s..." % image_url)
with open(image_url) as f_image: with open(image_url, "rb") as f_image:
img = glance_client.images.create(name=image_name, img = glance_client.images.create(name=image_name,
disk_format="qcow2", disk_format="qcow2",
container_format="bare", container_format="bare",
@ -411,7 +414,7 @@ class KloudBuster(object):
return None return None
except Exception: except Exception:
LOG.error(traceback.format_exc()) LOG.error(traceback.format_exc())
LOG.error("Failed while uploading the image: %s", str(exc)) LOG.exception("Failed while uploading the image")
return None return None
return img return img
@ -448,7 +451,7 @@ class KloudBuster(object):
row = [instance.vm_name, instance.host, instance.fixed_ip, row = [instance.vm_name, instance.host, instance.fixed_ip,
instance.fip_ip, instance.subnet_ip, instance.shared_interface_ip] instance.fip_ip, instance.subnet_ip, instance.shared_interface_ip]
table.append(row) table.append(row)
LOG.info('Provision Details (Tested Kloud)\n' + LOG.info('Provision Details (Tested Kloud)\n%s',
tabulate(table, headers="firstrow", tablefmt="psql")) tabulate(table, headers="firstrow", tablefmt="psql"))
table = [["VM Name", "Host", "Internal IP", "Floating IP", "Subnet"]] table = [["VM Name", "Host", "Internal IP", "Floating IP", "Subnet"]]
@ -457,7 +460,7 @@ class KloudBuster(object):
row = [instance.vm_name, instance.host, instance.fixed_ip, row = [instance.vm_name, instance.host, instance.fixed_ip,
instance.fip_ip, instance.subnet_ip] instance.fip_ip, instance.subnet_ip]
table.append(row) table.append(row)
LOG.info('Provision Details (Testing Kloud)\n' + LOG.info('Provision Details (Testing Kloud)\n%s',
tabulate(table, headers="firstrow", tablefmt="psql")) tabulate(table, headers="firstrow", tablefmt="psql"))
def gen_server_user_data(self, test_mode): def gen_server_user_data(self, test_mode):
@ -544,7 +547,7 @@ class KloudBuster(object):
self.stage() self.stage()
self.run_test() self.run_test()
except KBException as e: except KBException as e:
LOG.error(e.message) LOG.error(str(e))
except base_network.KBGetProvNetException: except base_network.KBGetProvNetException:
pass pass
except Exception: except Exception:
@ -619,7 +622,7 @@ class KloudBuster(object):
cur_vm_count = 1 if start else multiple cur_vm_count = 1 if start else multiple
# Minus 1 for KB-Proxy # Minus 1 for KB-Proxy
total_vm = self.get_tenant_vm_count(self.client_cfg) - 1 total_vm = self.get_tenant_vm_count(self.client_cfg) - 1
while (cur_vm_count <= total_vm): while cur_vm_count <= total_vm:
log_info += "\n" + self.kb_runner.header_formatter(stage, cur_vm_count) log_info += "\n" + self.kb_runner.header_formatter(stage, cur_vm_count)
cur_vm_count = (stage + 1 - start) * multiple cur_vm_count = (stage + 1 - start) * multiple
stage += 1 stage += 1
@ -653,10 +656,10 @@ class KloudBuster(object):
self.client_vm_create_thread.join() self.client_vm_create_thread.join()
if self.testing_kloud and self.testing_kloud.exc_info: if self.testing_kloud and self.testing_kloud.exc_info:
raise self.testing_kloud.exc_info[1], None, self.testing_kloud.exc_info[2] raise self.testing_kloud.exc_info[1].with_traceback(self.testing_kloud.exc_info[2])
if self.kloud and self.kloud.exc_info: if self.kloud and self.kloud.exc_info:
raise self.kloud.exc_info[1], None, self.kloud.exc_info[2] raise self.kloud.exc_info[1].with_traceback(self.kloud.exc_info[2])
# Function that print all the provisioning info # Function that print all the provisioning info
self.print_provision_info() self.print_provision_info()
@ -673,11 +676,11 @@ class KloudBuster(object):
while 1: while 1:
if self.interactive: if self.interactive:
print() print()
runlabel = raw_input('>> KB ready, enter label for next run or "q" to quit: ') runlabel = input('>> KB ready, enter label for next run or "q" to quit: ')
if runlabel.lower() == "q": if runlabel.lower() == "q":
break break
for run_result in self.kb_runner.run(test_only, runlabel): for _ in self.kb_runner.run(test_only, runlabel):
if not self.multicast_mode or len(self.final_result['kb_result']) == 0: if not self.multicast_mode or len(self.final_result['kb_result']) == 0:
self.final_result['kb_result'].append(self.kb_runner.tool_result) self.final_result['kb_result'].append(self.kb_runner.tool_result)
if self.tsdb_connector: if self.tsdb_connector:
@ -740,8 +743,7 @@ class KloudBuster(object):
def get_tenant_vm_count(self, config): def get_tenant_vm_count(self, config):
# this does not apply for storage mode! # this does not apply for storage mode!
return (config['routers_per_tenant'] * config['networks_per_router'] * return config['routers_per_tenant'] * config['networks_per_router'] * config['vms_per_network']
config['vms_per_network'])
def calc_neutron_quota(self): def calc_neutron_quota(self):
total_vm = self.get_tenant_vm_count(self.server_cfg) total_vm = self.get_tenant_vm_count(self.server_cfg)
@ -751,7 +753,7 @@ class KloudBuster(object):
self.server_cfg['networks_per_router'] self.server_cfg['networks_per_router']
server_quota['subnet'] = server_quota['network'] server_quota['subnet'] = server_quota['network']
server_quota['router'] = self.server_cfg['routers_per_tenant'] server_quota['router'] = self.server_cfg['routers_per_tenant']
if (self.server_cfg['use_floatingip']): if self.server_cfg['use_floatingip']:
# (1) Each VM has one floating IP # (1) Each VM has one floating IP
# (2) Each Router has one external IP # (2) Each Router has one external IP
server_quota['floatingip'] = total_vm + server_quota['router'] server_quota['floatingip'] = total_vm + server_quota['router']
@ -775,7 +777,7 @@ class KloudBuster(object):
client_quota['network'] = 1 client_quota['network'] = 1
client_quota['subnet'] = 1 client_quota['subnet'] = 1
client_quota['router'] = 1 client_quota['router'] = 1
if (self.client_cfg['use_floatingip']): if self.client_cfg['use_floatingip']:
# (1) Each VM has one floating IP # (1) Each VM has one floating IP
# (2) Each Router has one external IP, total of 1 router # (2) Each Router has one external IP, total of 1 router
# (3) KB-Proxy node has one floating IP # (3) KB-Proxy node has one floating IP
@ -882,20 +884,23 @@ def generate_charts(json_results, html_file_name, is_config):
'''Save results in HTML format file.''' '''Save results in HTML format file.'''
LOG.info('Saving results to HTML file: ' + html_file_name + '...') LOG.info('Saving results to HTML file: ' + html_file_name + '...')
try: try:
if json_results['test_mode'] == "storage": test_mode = json_results['test_mode']
if test_mode == "storage":
template_path = resource_filename(__name__, 'template_storage.html') template_path = resource_filename(__name__, 'template_storage.html')
elif json_results['test_mode'] == "http": elif test_mode == "http":
template_path = resource_filename(__name__, 'template_http.html') template_path = resource_filename(__name__, 'template_http.html')
else: else:
raise LOG.error('Invalid test mode, : %s', test_mode)
return 1
except Exception: except Exception:
LOG.error('Invalid json file.') LOG.error('Invalid json file.')
sys.exit(1) return 1
with open(html_file_name, 'w') as hfp, open(template_path, 'r') as template: with open(html_file_name, 'w') as hfp, open(template_path, 'r') as template:
create_html(hfp, create_html(hfp,
template, template,
json.dumps(json_results, sort_keys=True), json.dumps(json_results, sort_keys=True),
is_config) is_config)
return 0
def main(): def main():
@ -994,26 +999,26 @@ def main():
if CONF.charts_from_json: if CONF.charts_from_json:
if not CONF.html: if not CONF.html:
LOG.error('Destination html filename must be specified using --html.') LOG.error('Destination html filename must be specified using --html.')
sys.exit(1) return 1
with open(CONF.charts_from_json, 'r') as jfp: with open(CONF.charts_from_json, 'r') as jfp:
json_results = json.load(jfp) json_results = json.load(jfp)
generate_charts(json_results, CONF.html, None) generate_charts(json_results, CONF.html, None)
sys.exit(0) return 0
if CONF.show_config: if CONF.show_config:
print resource_string(__name__, "cfg.scale.yaml") print(resource_string(__name__, "cfg.scale.yaml").decode('utf-8'))
sys.exit(0) return 0
if CONF.multicast and CONF.storage: if CONF.multicast and CONF.storage:
LOG.error('--multicast and --storage can not both be chosen.') LOG.error('--multicast and --storage can not both be chosen.')
sys.exit(1) return 1
try: try:
kb_config = KBConfig() kb_config = KBConfig()
kb_config.init_with_cli() kb_config.init_with_cli()
except TypeError: except TypeError:
LOG.exception('Error parsing the configuration file') LOG.exception('Error parsing the configuration file')
sys.exit(1) return 1
# The KloudBuster class is just a wrapper class # The KloudBuster class is just a wrapper class
# levarages tenant and user class for resource creations and deletion # levarages tenant and user class for resource creations and deletion
@ -1030,13 +1035,13 @@ def main():
kloudbuster.run() kloudbuster.run()
if CONF.json: if CONF.json:
'''Save results in JSON format file.''' # Save results in JSON format file
LOG.info('Saving results in json file: ' + CONF.json + "...") LOG.info('Saving results in json file: ' + CONF.json + "...")
with open(CONF.json, 'w') as jfp: with open(CONF.json, 'w') as jfp:
json.dump(kloudbuster.final_result, jfp, indent=4, sort_keys=True) json.dump(kloudbuster.final_result, jfp, indent=4, sort_keys=True)
if CONF.multicast and CONF.csv and 'kb_result' in kloudbuster.final_result: if CONF.multicast and CONF.csv and 'kb_result' in kloudbuster.final_result:
'''Save results in JSON format file.''' # Save results in JSON format file
if len(kloudbuster.final_result['kb_result']) > 0: if len(kloudbuster.final_result['kb_result']) > 0:
LOG.info('Saving results in csv file: ' + CONF.csv + "...") LOG.info('Saving results in csv file: ' + CONF.csv + "...")
with open(CONF.csv, 'w') as jfp: with open(CONF.csv, 'w') as jfp:
@ -1044,7 +1049,8 @@ def main():
if CONF.html: if CONF.html:
generate_charts(kloudbuster.final_result, CONF.html, kb_config.config_scale) generate_charts(kloudbuster.final_result, CONF.html, kb_config.config_scale)
return 0
if __name__ == '__main__': if __name__ == '__main__':
main() sys.exit(main())

View File

@ -43,6 +43,8 @@ WARN = logging.WARN
WARNING = logging.WARNING WARNING = logging.WARNING
def setup(product_name, logfile=None): def setup(product_name, logfile=None):
# pylint: disable=protected-access
dbg_color = handlers.ColorHandler.LEVEL_COLORS[logging.DEBUG] dbg_color = handlers.ColorHandler.LEVEL_COLORS[logging.DEBUG]
handlers.ColorHandler.LEVEL_COLORS[logging.KBDEBUG] = dbg_color handlers.ColorHandler.LEVEL_COLORS[logging.KBDEBUG] = dbg_color
CONF.logging_default_format_string = '%(asctime)s %(levelname)s %(message)s' CONF.logging_default_format_string = '%(asctime)s %(levelname)s %(message)s'
@ -62,6 +64,7 @@ def setup(product_name, logfile=None):
project=product_name).logger.setLevel(logging.KBDEBUG) project=product_name).logger.setLevel(logging.KBDEBUG)
def getLogger(name="unknown", version="unknown"): def getLogger(name="unknown", version="unknown"):
# pylint: disable=protected-access
if name not in oslogging._loggers: if name not in oslogging._loggers:
oslogging._loggers[name] = KloudBusterContextAdapter( oslogging._loggers[name] = KloudBusterContextAdapter(
logging.getLogger(name), {"project": "kloudbuster", logging.getLogger(name), {"project": "kloudbuster",

View File

@ -15,7 +15,7 @@
import json import json
from perf_tool import PerfTool from kloudbuster.perf_tool import PerfTool
class NuttcpTool(PerfTool): class NuttcpTool(PerfTool):

View File

@ -13,10 +13,10 @@
# under the License. # under the License.
# #
from base_compute import BaseCompute from kloudbuster.base_compute import BaseCompute
from fio_tool import FioTool from kloudbuster.fio_tool import FioTool
from nuttcp_tool import NuttcpTool from kloudbuster.nuttcp_tool import NuttcpTool
from wrk_tool import WrkTool from kloudbuster.wrk_tool import WrkTool
# An openstack instance (can be a VM or a LXC) # An openstack instance (can be a VM or a LXC)

View File

@ -15,14 +15,13 @@
import abc import abc
import log as logging import kloudbuster.log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
# A base class for all tools that can be associated to an instance # A base class for all tools that can be associated to an instance
class PerfTool(object): class PerfTool(metaclass=abc.ABCMeta):
__metaclass__ = abc.ABCMeta
def __init__(self, instance, tool_name): def __init__(self, instance, tool_name):
self.instance = instance self.instance = instance

View File

@ -17,7 +17,7 @@ import requests
import time import time
class Prometheus(object): class Prometheus():
def __init__(self, config): def __init__(self, config):
self.server_address = "http://{}:{}/api/v1/".format(config['server_ip'], self.server_address = "http://{}:{}/api/v1/".format(config['server_ip'],
config['server_port']) config['server_port'])
@ -38,5 +38,5 @@ class Prometheus(object):
self.step_size)).json() self.step_size)).json()
except requests.exceptions.RequestException as e: except requests.exceptions.RequestException as e:
print e print(e)
return None return None

View File

@ -6,7 +6,6 @@ pytz>=2016.4
pbr>=3.0.1 pbr>=3.0.1
Babel>=2.3.4 Babel>=2.3.4
futures>=3.1.1
python-cinderclient>=2.0.1 python-cinderclient>=2.0.1
python-glanceclient>=2.6.0 python-glanceclient>=2.6.0
python-openstackclient>=3.11.0 python-openstackclient>=3.11.0
@ -27,5 +26,3 @@ tabulate>=0.7.7
pyyaml>=3.12 pyyaml>=3.12
requests requests
# Workaround for pip install failed on RHEL/CentOS
functools32>=3.2.3

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
# Copyright 2016 Cisco Systems, Inc. All rights reserved. # Copyright 2016 Cisco Systems, Inc. All rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -22,7 +22,7 @@ def exec_command(cmd, cwd=None, show_console=False):
p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if show_console: if show_console:
for line in iter(p.stdout.readline, b""): for line in iter(p.stdout.readline, b""):
print line, print(line)
p.communicate() p.communicate()
return p.returncode return p.returncode
@ -40,10 +40,10 @@ def launch_kb(cwd):
except OSError: except OSError:
continue continue
if os.uname()[0] == "Darwin": if os.uname()[0] == "Darwin":
print print()
print "To run the KloudBuster web server you need to install the coreutils package:" print("To run the KloudBuster web server you need to install the coreutils package:")
print " brew install coreutils" print(" brew install coreutils")
print print()
raise OSError('Cannot find stdbuf or gstdbuf command') raise OSError('Cannot find stdbuf or gstdbuf command')
def main(): def main():
@ -52,8 +52,9 @@ def main():
try: try:
return launch_kb(cwd) return launch_kb(cwd)
except KeyboardInterrupt: except KeyboardInterrupt:
print 'Terminating server...' print('Terminating server...')
return 1 return 1
if __name__ == '__main__': if __name__ == '__main__':
sys.exit(main()) sys.exit(main())

View File

@ -12,20 +12,19 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import base_compute
import base_network
import base_storage
from keystoneclient import exceptions as keystone_exception from keystoneclient import exceptions as keystone_exception
import log as logging import kloudbuster.base_compute as base_compute
import users import kloudbuster.base_network as base_network
import kloudbuster.base_storage as base_storage
import kloudbuster.log as logging
import kloudbuster.users as users
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class KBQuotaCheckException(Exception): class KBQuotaCheckException(Exception):
pass pass
class Tenant(object): class Tenant():
""" """
Holds the tenant resources Holds the tenant resources
1. Provides ability to create users in a tenant 1. Provides ability to create users in a tenant
@ -109,7 +108,7 @@ class Tenant(object):
meet_quota = True meet_quota = True
quota = quota_manager.get() quota = quota_manager.get()
for key, value in self.tenant_quota[quota_type].iteritems(): for key, value in self.tenant_quota[quota_type].items():
if quota[key] < value: if quota[key] < value:
meet_quota = False meet_quota = False
break break

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
# Copyright 2018 Cisco Systems, Inc. All rights reserved. # Copyright 2018 Cisco Systems, Inc. All rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -16,7 +16,7 @@
import time import time
class TSDB(object): class TSDB():
def __init__(self, config): def __init__(self, config):
pass pass

View File

@ -12,17 +12,18 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import base_compute import kloudbuster.base_compute as base_compute
import base_network import kloudbuster.base_network as base_network
import kloudbuster.log as logging
from cinderclient import client as cinderclient from cinderclient import client as cinderclient
from keystoneclient import exceptions as keystone_exception from keystoneclient import exceptions as keystone_exception
import log as logging
from neutronclient.neutron import client as neutronclient from neutronclient.neutron import client as neutronclient
from novaclient import client as novaclient from novaclient import client as novaclient
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class User(object): class User():
""" """
User class that stores router list User class that stores router list
Creates and deletes N routers based on num of routers Creates and deletes N routers based on num of routers
@ -143,8 +144,8 @@ class User(object):
self.key_pair.add_public_key(self.key_name, config_scale.public_key_file) self.key_pair.add_public_key(self.key_name, config_scale.public_key_file)
# Find the external network that routers need to attach to # Find the external network that routers need to attach to
if self.tenant.kloud.multicast_mode or ( if self.tenant.kloud.multicast_mode or (self.tenant.kloud.storage_mode and
self.tenant.kloud.storage_mode and config_scale.provider_network): config_scale.provider_network):
router_instance = base_network.Router( router_instance = base_network.Router(
self, provider_network=config_scale.provider_network) self, provider_network=config_scale.provider_network)
self.router_list.append(router_instance) self.router_list.append(router_instance)

View File

@ -15,10 +15,9 @@
import json import json
from perf_tool import PerfTool
from hdrh.histogram import HdrHistogram from hdrh.histogram import HdrHistogram
import log as logging from kloudbuster.perf_tool import PerfTool
import kloudbuster.log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -119,7 +118,7 @@ class WrkTool(PerfTool):
err_flag = True err_flag = True
perc_list = [50, 75, 90, 99, 99.9, 99.99, 99.999] perc_list = [50, 75, 90, 99, 99.9, 99.99, 99.999]
latency_dict = histogram.get_percentile_to_value_dict(perc_list) latency_dict = histogram.get_percentile_to_value_dict(perc_list)
for key, value in latency_dict.iteritems(): for key, value in latency_dict.items():
all_res['latency_stats'].append([key, value]) all_res['latency_stats'].append([key, value])
all_res['latency_stats'].sort() all_res['latency_stats'].sort()

287
pylintrc Normal file
View File

@ -0,0 +1,287 @@
[MASTER]
extension-pkg-whitelist=netifaces,lxml
ignore=CVS
ignore-patterns=
jobs=1
limit-inference-results=100
load-plugins=
persistent=yes
suggestion-mode=yes
unsafe-load-any-extension=no
init-hook=import sys; sys.path.append('installer/')
[MESSAGES CONTROL]
confidence=
disable=missing-docstring,
invalid-name,
global-statement,
broad-except,
useless-object-inheritance,
useless-else-on-loop,
no-member,
arguments-differ,
redundant-keyword-arg,
cell-var-from-loop,
no-self-use,
consider-using-set-comprehension,
wrong-import-position,
wrong-import-order,
redefined-outer-name,
no-else-return,
assignment-from-no-return,
dangerous-default-value,
no-name-in-module,
function-redefined,
redefined-builtin,
unused-argument,
too-many-instance-attributes,
too-many-locals,
too-many-function-args,
too-many-branches,
too-many-arguments
enable=c-extension-no-member
[REPORTS]
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
output-format=text
reports=no
score=yes
[REFACTORING]
max-nested-blocks=10
never-returning-functions=sys.exit
[LOGGING]
logging-format-style=old
logging-modules=logging
[SPELLING]
max-spelling-suggestions=4
spelling-dict=
spelling-ignore-words=
spelling-private-dict-file=
spelling-store-unknown-words=no
[MISCELLANEOUS]
notes=XXX,
TODO
[TYPECHECK]
contextmanager-decorators=contextlib.contextmanager
generated-members=
ignore-mixin-members=yes
ignore-none=yes
ignore-on-opaque-inference=yes
missing-member-hint=yes
missing-member-hint-distance=1
missing-member-max-choices=1
[VARIABLES]
additional-builtins=mibBuilder,OPENSTACK_NEUTRON_NETWORK
allow-global-unused-variables=yes
callbacks=cb_,
_cb
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
ignored-argument-names=_.*|^ignored_|^unused_
init-import=no
redefining-builtins-modules=builtins,io
[FORMAT]
expected-line-ending-format=
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
indent-after-paren=4
indent-string=' '
max-line-length=150
max-module-lines=2500
no-space-check=trailing-comma,
dict-separator
single-line-class-stmt=no
single-line-if-stmt=no
[SIMILARITIES]
ignore-comments=yes
ignore-docstrings=yes
ignore-imports=no
min-similarity-lines=10
[BASIC]
argument-naming-style=snake_case
attr-naming-style=snake_case
bad-names=foo,
bar,
baz,
toto,
tutu,
tata
class-attribute-naming-style=any
class-naming-style=PascalCase
const-naming-style=UPPER_CASE
docstring-min-length=-1
function-naming-style=snake_case
good-names=i,
j,
k,
ex,
Run,
_
include-naming-hint=yes
inlinevar-naming-style=any
method-naming-style=snake_case
module-naming-style=snake_case
name-group=
no-docstring-rgx=^_
property-classes=abc.abstractproperty
variable-naming-style=snake_case
[STRING]
check-str-concat-over-line-jumps=no
[IMPORTS]
allow-wildcard-with-all=no
analyse-fallback-blocks=no
deprecated-modules=optparse,tkinter.tix
ext-import-graph=
import-graph=
int-import-graph=
known-standard-library=
known-third-party=enchant
[CLASSES]
defining-attr-methods=__init__,
__new__,
setUp
exclude-protected=_asdict,
_fields,
_replace,
_source,
_make
valid-classmethod-first-arg=cls
valid-metaclass-classmethod-first-arg=cls
[DESIGN]
max-args=15
max-attributes=32
max-bool-expr=10
max-branches=80
max-locals=40
max-parents=12
additional-builtins=OPENSTACK_NEUTRON_NETWORK
max-public-methods=100
max-returns=50
max-statements=300
min-public-methods=0
[EXCEPTIONS]
overgeneral-exceptions=BaseException,
Exception

View File

@ -6,7 +6,6 @@ pytz>=2016.4
pbr>=3.0.1 pbr>=3.0.1
Babel>=2.3.4 Babel>=2.3.4
futures>=3.1.1
python-cinderclient>=2.0.1 python-cinderclient>=2.0.1
python-glanceclient>=2.6.0 python-glanceclient>=2.6.0
python-openstackclient>=3.11.0 python-openstackclient>=3.11.0
@ -14,7 +13,7 @@ python-neutronclient>=6.2.0
python-novaclient>=9.0.0 python-novaclient>=9.0.0
python-keystoneclient>=3.10.0 python-keystoneclient>=3.10.0
attrdict>=2.0.0 attrdict>=2.0.0
hdrhistogram>=0.5.2 hdrhistogram>=0.8.0
# ipaddress is required to get TLS working # ipaddress is required to get TLS working
# otherwise certificates with numeric IP addresses in the ServerAltName field will fail # otherwise certificates with numeric IP addresses in the ServerAltName field will fail
ipaddress>= 1.0.16 ipaddress>= 1.0.16
@ -24,6 +23,3 @@ pecan>=1.2.1
redis>=2.10.5 redis>=2.10.5
tabulate>=0.7.7 tabulate>=0.7.7
pyyaml>=3.12 pyyaml>=3.12
# Workaround for pip install failed on RHEL/CentOS
functools32>=3.2.3

View File

@ -16,8 +16,8 @@ classifier =
Operating System :: POSIX :: Linux Operating System :: POSIX :: Linux
Operating System :: MacOS Operating System :: MacOS
Programming Language :: Python Programming Language :: Python
Programming Language :: Python :: 2 Programming Language :: Python :: 3
Programming Language :: Python :: 2.7 Programming Language :: Python :: 3.6
[files] [files]
packages = packages =

View File

@ -25,6 +25,7 @@ except ImportError:
pass pass
setuptools.setup( setuptools.setup(
setup_requires=['pbr'], setup_requires=['pbr', 'wheel'],
scripts=['kloudbuster/kb_extract_img_from_docker.sh'], scripts=['kloudbuster/kb_extract_img_from_docker.sh'],
pbr=True) pbr=True,
python_requires='>=3.6')

View File

@ -2,15 +2,6 @@
# of appearance. Changing the order has an impact on the overall integration # of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later. # process, which may cause wedges in the gate later.
hacking<0.11,>=0.10.0
coverage>=3.6
discover
python-subunit>=0.0.18
sphinx>=1.4.0,<2.0 sphinx>=1.4.0,<2.0
sphinx_rtd_theme>=0.1.9 sphinx_rtd_theme>=0.1.9
oslosphinx>=2.5.0 # Apache-2.0 oslosphinx>=2.5.0
oslotest>=1.10.0 # Apache-2.0
testrepository>=0.0.18
testscenarios>=0.4
testtools>=1.4.0

View File

@ -18,3 +18,8 @@ test_kloudbuster
Tests for `kloudbuster` module. Tests for `kloudbuster` module.
""" """
from kloudbuster.kb_config import KBConfig
def test_config():
cfg = KBConfig()
cfg.update_configs()

58
tox.ini
View File

@ -1,41 +1,51 @@
[tox] [tox]
minversion = 1.6 minversion = 1.6
envlist = py27,pep8 envlist = py3,pylint,pep8
skipsdist = True skipsdist = True
basepython = python3
[testenv] [testenv:py3]
usedevelop = True deps =
install_command = pip install -U {opts} {packages} pytest>=5.4
setenv = pytest-cov>=2.8
VIRTUAL_ENV={envdir} mock>=4.0
deps = -r{toxinidir}/requirements.txt -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt commands =
# commands = python setup.py test --slowest --testr-args='{posargs}' {posargs:pytest --cov=kloudbuster --cov-report=term-missing -vv tests}
[testenv:pep8] [testenv:pep8]
commands = flake8 deps =
pep8>=1.5.7
flake8>=3.8.3
-r{toxinidir}/requirements.txt
whitelist_externals = flake8
commands = flake8 kloudbuster
[testenv:venv] [testenv:pylint]
commands = {posargs} deps =
pylint>=2.4
[testenv:cover] pytest>=5.4
commands = python setup.py test --coverage --testr-args='{posargs}' pytest-cov>=2.8
mock>=4.0
-r{toxinidir}/requirements.txt
commands = pylint --rcfile=pylintrc kloudbuster
[testenv:docs] [testenv:docs]
commands = python setup.py build_sphinx deps =
sphinx>=1.4.0
[testenv:debug] sphinx_rtd_theme>=0.1.9
commands = oslo_debug_helper {posargs} oslosphinx>=2.5.0
commands = python3 setup.py build_sphinx
[flake8] [flake8]
max-line-length = 100 max-line-length = 150
show-source = True show-source = True
# H233: Python 3.x incompatible use of print operator # E302: expected 2 blank lines
# H236: Python 3.x incompatible __metaclass__, use six.add_metaclass()
# E302: expected 2 blank linee
# E303: too many blank lines (2) # E303: too many blank lines (2)
# H306: imports not in alphabetical order
# H404: multi line docstring should start without a leading new line # H404: multi line docstring should start without a leading new line
# H405: multi line docstring summary not separated with an empty line # H405: multi line docstring summary not separated with an empty line
ignore = H233,H236,E302,E303,H404,H405 # W504 line break after binary operator
ignore = E302,E303,H306,H404,H405,W504
builtins = _ builtins = _
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build