Browse Source

Migrate code to python3/Ubuntu 20.04

Change-Id: I18a21e04d009afdee3afc2723afdbade24bfdf71
changes/26/742826/5 8.0.0
ahothan 2 years ago
parent
commit
1316bd443d
  1. 2
      .gitignore
  2. 41
      Dockerfile
  3. 2
      README.rst
  4. 13
      doc/source/readme.rst
  5. 87
      kb_build.sh
  6. 6
      kb_dib/elements/kloudbuster/README.rst
  7. 4
      kb_dib/elements/kloudbuster/package-installs.yaml
  8. 4
      kb_dib/elements/kloudbuster/post-install.d/01-pip-package
  9. 14
      kb_dib/elements/kloudbuster/post-install.d/02-kb-script
  10. 7
      kb_dib/elements/kloudbuster/post-install.d/99-cloudcfg-edit
  11. 81
      kb_dib/elements/kloudbuster/static/kb_test/kb_vm_agent.py
  12. 4
      kb_server/kb_server/controllers/api_cfg.py
  13. 2
      kb_server/kb_server/controllers/api_kb.py
  14. 4
      kb_server/kb_server/controllers/kb_session.py
  15. 4
      kb_server/kb_server/controllers/root.py
  16. 20
      kloudbuster/base_compute.py
  17. 26
      kloudbuster/base_network.py
  18. 6
      kloudbuster/base_storage.py
  19. 3
      kloudbuster/cfg.scale.yaml
  20. 4
      kloudbuster/credentials.py
  21. 10
      kloudbuster/fio_tool.py
  22. 172
      kloudbuster/force_cleanup.py
  23. 25
      kloudbuster/kb_config.py
  24. 4
      kloudbuster/kb_res_logger.py
  25. 58
      kloudbuster/kb_runner_base.py
  26. 25
      kloudbuster/kb_runner_http.py
  27. 24
      kloudbuster/kb_runner_multicast.py
  28. 32
      kloudbuster/kb_runner_storage.py
  29. 4
      kloudbuster/kb_scheduler.py
  30. 1
      kloudbuster/kb_vm_agent.py
  31. 148
      kloudbuster/kloudbuster.py
  32. 3
      kloudbuster/log.py
  33. 2
      kloudbuster/nuttcp_tool.py
  34. 8
      kloudbuster/perf_instance.py
  35. 5
      kloudbuster/perf_tool.py
  36. 4
      kloudbuster/prometheus.py
  37. 3
      kloudbuster/requirements.txt
  38. 15
      kloudbuster/start_server.py
  39. 15
      kloudbuster/tenant.py
  40. 4
      kloudbuster/tsdb.py
  41. 13
      kloudbuster/users.py
  42. 7
      kloudbuster/wrk_tool.py
  43. 287
      pylintrc
  44. 6
      requirements.txt
  45. 4
      setup.cfg
  46. 5
      setup.py
  47. 11
      test-requirements.txt
  48. 0
      tests/__init__.py
  49. 5
      tests/test_kloudbuster.py
  50. 58
      tox.ini

2
.gitignore vendored

@ -68,3 +68,5 @@ scale/dib/kloudbuster.d/
# kb_web
!kb_server/public/ui/components/*/*.css
!kb_server/public/ui/components/*/*.js
.pytest_cache/

41
Dockerfile

@ -1,30 +1,33 @@
# docker file for creating a container that has kloudbuster installed and ready to use
# this will build from uptreams master latest
FROM ubuntu:16.04
MAINTAINER kloudbuster-core <kloudbuster-core@lists.launchpad.net>
FROM ubuntu:20.04
# Simpler would be to clone direct from upstream (latest)
# but the content might differ from the curent repo
# So we'd rather copy the current kloudbuster directory
# along with the pre-built qcow2 image
COPY ./ /kloudbuster/
# The name of the kloudbuster wheel package
# must be placed under ./dist directory before calling docker build
# example: ./dist/kloudbuster-8.0.0-py3-none-any.whl
ARG WHEEL_PKG
# The name of the kloudbuster VM qcow2 image
# must be placed in the current directory
# example: ./kloudbuster-8.0.0.qcow2
ARG VM_IMAGE
# copy the wheel package so it can be installed inside the container
COPY ./dist/$WHEEL_PKG /
# copy the VM image under /
COPY $VM_IMAGE /
# copy the VM Image
# Install KloudBuster script and dependencies
# Note the dot_git directory must be renamed to .git
# in order for pip install -e . to work properly
RUN apt-get update && apt-get install -y \
git \
libyaml-dev \
python \
python-dev \
python-pip \
&& pip install -U -q pip \
&& hash -r pip \
&& pip install -U -q setuptools \
&& cd /kloudbuster \
&& pip install -q -e . \
&& rm -rf .git \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/*
RUN apt-get update \
&& apt-get install -y python3 python3-pip python-is-python3 \
&& pip3 install /$WHEEL_PKG \
&& rm -f /$WHEEL_PKG

2
README.rst

@ -1,5 +1,5 @@
=====================
KloudBuster version 7
KloudBuster version 8
=====================
How good is your OpenStack **data plane** or **storage service** under real

13
doc/source/readme.rst

@ -1,5 +1,5 @@
=====================
KloudBuster version 7
KloudBuster version 8
=====================
How good is your OpenStack **data plane** or **storage service** under real
@ -89,8 +89,6 @@ Feature List
* Aggregated results provide an easy to understand way to assess the scale of
the cloud under test
* KloudBuster VM image pre-built and available from the OpenStack Community App
Catalog (https://apps.openstack.org/)
**Diagrams** describing how the scale test resources are staged and how the
traffic flows are available in :ref:`arch`.
@ -100,6 +98,15 @@ graphical charts generated straight off the tool.
**Examples of results** are available in :ref:`gallery`.
New in Release 8
----------------
* Kloudbuster is now fully python 3 compatible, python 2.7 is no longer supported.
* Validated againts OpenStack Train release
New in Release 7
----------------

87
kb_build.sh

@ -10,12 +10,6 @@ export DIB_DEV_USER_PWDLESS_SUDO=Y
# Set the data sources to have ConfigDrive only
export DIB_CLOUD_INIT_DATASOURCES="ConfigDrive"
# Check we are in a virtual environment
function check_in_venv {
IN_VENV=$(python -c 'import sys; print hasattr(sys, "real_prefix")')
echo $IN_VENV
}
function cleanup_qcow2 {
echo
echo "Error: found unrelated qcow2 files that would make the container image too large."
@ -34,17 +28,12 @@ function build_vm {
fi
echo "Building $kb_image_name.qcow2..."
pip install "diskimage-builder>=2.15"
pip3 install "diskimage-builder>=2.15"
cd ./kb_dib
# Add the kloudbuster elements directory to the DIB elements path
export ELEMENTS_PATH=./elements
# canned user/password for direct login
export DIB_DEV_USER_USERNAME=kloudbuster
export DIB_DEV_USER_PASSWORD=kloudbuster
export DIB_DEV_USER_PWDLESS_SUDO=Y
# Install Ubuntu 18.04
export DIB_RELEASE=bionic
@ -64,10 +53,21 @@ function build_vm {
# Build container
function build_container {
echo "docker build --tag=berrypatch/kloudbuster:$KB_TAG ."
sudo docker build --tag=berrypatch/kloudbuster:$KB_TAG .
echo "sudo docker build --tag=berrypatch/kloudbuster:latest ."
sudo docker build --tag=berrypatch/kloudbuster:latest .
# Create a wheel package
# ./dist/kloudbuster-$KB_TAG-py3-none-any.whl
python setup.py build bdist_wheel || { echo "Error building package"; exit 5; }
wheel_pkg="kloudbuster-$KB_TAG-py3-none-any.whl"
if [ -f ./dist/$wheel_pkg ]; then
echo "Created package: ./dist/$wheel_pkg"
else
echo "Error: Cannot find created package: ./dist/$wheel_pkg"
exit 4
fi
build_args="--build-arg WHEEL_PKG=$wheel_pkg --build-arg VM_IMAGE=$kb_image_name.qcow2"
echo "docker build $build_args --tag=berrypatch/kloudbuster:$KB_TAG ."
sudo docker build $build_args --tag=berrypatch/kloudbuster:$KB_TAG .
echo "sudo docker build $build_args --tag=berrypatch/kloudbuster:latest ."
sudo docker build $build_args --tag=berrypatch/kloudbuster:latest .
}
function help {
@ -78,7 +78,7 @@ function help {
echo "Builds the KloudBuster VM and Docker container images"
echo "The Docker container image will include the VM image for easier upload"
echo
echo "Must run in a virtual environment and must be called from the root of the repository"
echo "Kloudbuster must be installed for this script to run (typically would run from a virtual environment)"
exit 1
}
@ -96,26 +96,49 @@ while [[ $# -gt 0 ]]; do
# Shift after checking all the cases to get the next option
shift
done
in_venv=$(check_in_venv)
if [ $in_venv != "True" ]; then
echo "Error: Must be in a virtual environment to run!"
exit 2
# check that we have python3/pip3 enabled
python -c 'print 0' >/dev/null 2>/dev/null
if [ $? -eq 0 ]; then
echo "Error: python 3 is required as default python version"
exit 3
fi
# check we're at the root of the kloudbuster repo
if [ ! -d kloudbuster -o ! -f Dockerfile ]; then
echo "Error: Must be called from the root of the kloudbuster repository to run!"
exit 2
# check that we are in a virtual environment
INVENV=$(python -c 'import sys;print(hasattr(sys, "real_prefix") or (hasattr(sys, "base_prefix") and sys.base_prefix != sys.prefix))')
if [ $INVENV != "True" ]; then
echo "Error: must run inside a venv as many packages will be installed"
exit 4
fi
# Install kloudbuster in the virtual env
pip install -q -U setuptools
pip install -q -e .
# check that kloudbuster binary is installed
# Get the kloudbuster version (must be retrieved from stderr)
KB_TAG=$(kloudbuster --version 2>&1)
if [ $? != 0 ]; then
echo "Error retrieving kloudbuster version:"
echo
kloudbuster --version
exit 2
echo "Installing kloudbuster..."
# Install kloudbuster in the virtual env in editable mode
pip3 install -q -e .
KB_TAG=$(kloudbuster --version 2>&1)
if [ $? != 0 ]; then
echo "Error: cannot retrieve version from kloudbuster..."
echo
kloudbuster --version
exit 2
fi
fi
# check that docker is installed
if [ $build_vm_only = 0 ]; then
docker --version >/dev/null 2>/dev/null
if [ $? -ne 0 ]; then
echo "Error: docker is not installed"
exit 4
fi
fi
# check we're at the root of the kloudbuster repo
if [ ! -d kloudbuster -o ! -f Dockerfile ]; then
echo "Error: Must be called from the root of the kloudbuster repository to run!"
exit 2
fi
echo

6
kb_dib/elements/kloudbuster/README.rst

@ -10,7 +10,7 @@ The same image can run using one of the following roles (Assigned from the user-
- Client VM for a given traffic type (e.g. http client or tcp/udp client)
- Redis server (only 1 instance in the client cloud)
The default login on the VM is
VMs are launched using cloud config and can be access with ssh:
- username: kb
- password: kb
- username: cloud-user
- no password, use key pairs to create the VM

4
kb_dib/elements/kloudbuster/package-installs.yaml

@ -6,8 +6,8 @@ libssl-dev:
libyaml-dev:
nginx:
ntpdate:
python-pip:
python-dev:
python3-pip:
python3-dev:
redis-server:
xfsprogs:
zlib1g-dev:

4
kb_dib/elements/kloudbuster/post-install.d/01-pip-package

@ -1,5 +1,3 @@
#!/bin/sh
pip install --upgrade pip
hash -r pip
pip install setuptools wheel
pip3 install setuptools wheel

14
kb_dib/elements/kloudbuster/post-install.d/02-kb-script

@ -56,7 +56,7 @@ echo 'mkdir -p /mnt/config' >> /etc/rc.local
echo 'mount /dev/disk/by-label/config-2 /mnt/config' >> /etc/rc.local
echo 'cp /mnt/config/openstack/latest/user_data /kb_test/' >> /etc/rc.local
echo 'cd /kb_test' >> /etc/rc.local
echo 'python kb_vm_agent.py &' >> /etc/rc.local
echo 'python3 kb_vm_agent.py &' >> /etc/rc.local
chmod +x /etc/rc.local
# =================
@ -65,24 +65,24 @@ chmod +x /etc/rc.local
cd /kb_test
git clone https://opendev.org/x/kloudbuster.git
cd kloudbuster
pip install -r requirements.txt
pip3 install -r requirements.txt
# ======
# Client
# ======
# python redis client, HdrHistorgram_py
pip install redis hdrhistogram
pip3 install redis hdrhistogram
# Install HdrHistorgram_c
cd /tmp
git clone git://github.com/HdrHistogram/HdrHistogram_c.git
git clone https://github.com/HdrHistogram/HdrHistogram_c.git
cd HdrHistogram_c
cmake .
make install
# Install the http traffic generator
cd /tmp
git clone git://github.com/yicwang/wrk2.git
git clone https://github.com/yicwang/wrk2.git
cd wrk2
make
mv wrk /usr/local/bin/wrk2
@ -113,7 +113,7 @@ rm -rf /tmp/wrk2
rm -rf /tmp/fio
# Uninstall unneeded packages
apt-get -y --purge remove libyaml-dev libssl-dev zlib1g-dev libaio-dev python-pip python-dev build-essential cmake
apt-get -y --purge remove libyaml-dev libssl-dev zlib1g-dev libaio-dev python3-pip python3-dev build-essential cmake
apt-get -y --purge autoremove
apt-get -y install python
## apt-get -y install python
apt-get -y autoclean

7
kb_dib/elements/kloudbuster/post-install.d/99-cloudcfg-edit

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import yaml
cloudcfg = "/etc/cloud/cloud.cfg"
@ -7,11 +7,12 @@ user = "cloud-user"
with open(cloudcfg) as f:
cfg = yaml.safe_load(f)
synver = "1"
try:
if cfg['system_info']['default_user']['name']:
synver = "2"
except KeyError:
synver = "1"
pass
if synver == "1":
if cfg['user'] == user:
@ -27,7 +28,7 @@ elif synver == "2":
# Change the user to cloud-user
cfg['system_info']['default_user']['name'] = user
cfg['system_info']['default_user']['gecos'] = "Cloud User"
print cfg['system_info']['default_user']['name']
print(cfg['system_info']['default_user']['name'])
with open(cloudcfg, "w") as f:
yaml.dump(cfg, f, default_flow_style=False)

81
kb_dib/elements/kloudbuster/static/kb_test/kb_vm_agent.py

@ -13,10 +13,8 @@
# under the License.
#
from hdrh.histogram import HdrHistogram
import json
import multiprocessing
import redis
import socket
import struct
import subprocess
@ -27,6 +25,9 @@ import threading
import time
import traceback
from hdrh.histogram import HdrHistogram
import redis
# Define the version of the KloudBuster agent and VM image
#
# When VM is up running, the agent will send the READY message to the
@ -36,11 +37,11 @@ import traceback
# and can be left constant moving forward.
__version__ = '7'
# TODO(Logging on Agent)
# to add later logging on Agent
def exec_command(cmd, cwd=None):
p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
(_, stderr) = p.communicate()
if p.returncode:
syslog.syslog("Command failed: " + ' '.join(cmd))
if stderr:
@ -54,7 +55,7 @@ def refresh_clock(clocks, force_sync=False):
command = "sudo ntpdate" + step + clocks
exec_command(command.split(" "))
class KB_Instance(object):
class KB_Instance():
# Check whether the HTTP Service is up running
@staticmethod
@ -73,7 +74,7 @@ class KB_Instance(object):
if if_name:
debug_msg += " and %s" % if_name
cmd += " dev %s" % if_name
print debug_msg
print(debug_msg)
return cmd
@staticmethod
@ -105,7 +106,7 @@ class KB_Instance(object):
else:
debug_msg = "with next hop %s" % if_name
cmd += " dev %s" % if_name
print debug_msg
print(debug_msg)
return cmd
# Run the HTTP benchmarking tool
@ -167,7 +168,7 @@ class KB_Instance(object):
cmd = '%s %s %s %s' % (dest_path, fixed_opt, required_opt, optional_opt)
return cmd
class KBA_Client(object):
class KBA_Client():
def __init__(self, user_data):
host = user_data['redis_server']
@ -185,10 +186,10 @@ class KBA_Client(object):
def setup_channels(self):
# Check for connections to redis server
while (True):
while True:
try:
self.redis_obj.get("test")
except (redis.exceptions.ConnectionError):
except redis.exceptions.ConnectionError:
time.sleep(1)
continue
break
@ -230,6 +231,8 @@ class KBA_Client(object):
self.last_process = p
lines_iterator = iter(p.stdout.readline, b"")
for line in lines_iterator:
# line is bytes, so need to make it a str
line = line.decode('utf-8')
# One exception, if this is the very last report, we will send it
# through "DONE" command, not "REPORT". So what's happening here
# is to determine whether this is the last report.
@ -267,23 +270,25 @@ class KBA_Client(object):
# When 'ACK' is received, means the master node
# acknowledged the current VM. So stopped sending more
# "hello" packet to the master node.
# Unfortunately, there is no thread.stop() in Python 2.x
self.stop_hello.set()
elif message['cmd'] == 'EXEC':
self.last_cmd = ""
arange = message['data']['active_range']
my_id = int(self.vm_name[self.vm_name.rindex('I') + 1:])
if (not arange) or (my_id >= arange[0] and my_id <= arange[1]):
if (not arange) or (arange[0] <= my_id <= arange[1]):
try:
par = message['data'].get('parameter', '')
str_par = 'par' if par else ''
cmd_res_tuple = eval('self.exec_%s(%s)' % (message['data']['cmd'], str_par))
cmd = message['data']['cmd']
if isinstance(cmd, bytes):
cmd = cmd.decode('utf-8')
cmd_res_tuple = eval('self.exec_%s(%s)' % (cmd, str_par))
cmd_res_dict = dict(zip(("status", "stdout", "stderr"), cmd_res_tuple))
except Exception as exc:
except Exception:
cmd_res_dict = {
"status": 1,
"stdout": self.last_cmd,
"stderr": str(exc)
"stderr": traceback.format_exc() + '\nmessage: ' + str(message['data'])
}
if self.__class__.__name__ == "KBA_Multicast_Client":
self.report('DONE_MC', message['client-type'], cmd_res_dict)
@ -291,14 +296,14 @@ class KBA_Client(object):
self.report('DONE', message['client-type'], cmd_res_dict)
else:
# Unexpected
print 'ERROR: Unexpected command received!'
print('ERROR: Unexpected command received!')
class KBA_HTTP_Client(KBA_Client):
def exec_setup_static_route(self):
self.last_cmd = KB_Instance.get_static_route(self.user_data['target_subnet_ip'])
result = self.exec_command(self.last_cmd)
if (self.user_data['target_subnet_ip'] not in result[1]):
if self.user_data['target_subnet_ip'] not in result[1]:
self.last_cmd = KB_Instance.add_static_route(
self.user_data['target_subnet_ip'],
self.user_data['target_shared_interface_ip'])
@ -323,7 +328,7 @@ class KBA_Multicast_Client(KBA_Client):
self.last_cmd = KB_Instance.get_static_route(self.user_data['target_subnet_ip'])
result = self.exec_command(self.last_cmd)
if (self.user_data['target_subnet_ip'] not in result[1]):
if self.user_data['target_subnet_ip'] not in result[1]:
self.last_cmd = KB_Instance.add_static_route(
self.user_data['target_subnet_ip'],
self.user_data['target_shared_interface_ip'])
@ -340,10 +345,10 @@ class KBA_Multicast_Client(KBA_Client):
'megabytes': 'megabytes', 'rate_Mbps': 'mbps', 'msmaxjitter': 'jitter',
'msavgOWD': 'latency'} # Format/Include Keys
try:
return {kmap[k]: abs(float(v))
for (k, v) in [c.split("=")
for c in p_out.split(" ")]
if k in kmap}
return {
kmap[k]: abs(float(v)) for (k, v) in [c.split("=")
for c in p_out.split(" ")] if k in kmap
}
except Exception:
return {'error': '0'}
@ -365,12 +370,12 @@ class KBA_Multicast_Client(KBA_Client):
queue.put([cmds[cmd][0], out])
# End Function #
for cmd in cmds:
for _ in cmds:
multiprocessing.Process(target=spawn, args=(cmd_index, queue)).start()
cmd_index += 1
p_err = ""
try:
while(j < len(cmds)):
while j < len(cmds):
out = queue.get(True, timeout)
key = out[0]
j += 1
@ -500,7 +505,7 @@ class KBA_Storage_Client(KBA_Client):
grp_msb_bits = clat['FIO_IO_U_PLAT_BITS']
buckets_per_grp = clat['FIO_IO_U_PLAT_VAL']
for bucket in xrange(total_buckets):
for bucket in range(total_buckets):
if clat[str(bucket)]:
grp = bucket / buckets_per_grp
subbucket = bucket % buckets_per_grp
@ -511,7 +516,8 @@ class KBA_Storage_Client(KBA_Client):
val = int(base + (base / buckets_per_grp) * (subbucket - 0.5))
histogram.record_value(val, clat[str(bucket)])
p_output['jobs'][0][test]['clat']['hist'] = histogram.encode()
# histogram.encode() returns a base64 compressed histogram as bytes
p_output['jobs'][0][test]['clat']['hist'] = histogram.encode().decode('utf-8')
p_output['jobs'][0][test]['clat'].pop('bins')
p_output['jobs'][0][test]['clat'].pop('percentile')
@ -534,7 +540,7 @@ class KBA_Storage_Client(KBA_Client):
return self.encode_bins(p_out)
class KBA_Server(object):
class KBA_Server():
def __init__(self, user_data):
self.user_data = user_data
@ -544,14 +550,14 @@ class KBA_Server(object):
html_size = self.user_data['http_server_configs']['html_size']
cmd_str = 'dd if=/dev/zero of=/data/www/index.html bs=%s count=1' % html_size
cmd = cmd_str.split()
return False if exec_command(cmd) else True
return not bool(exec_command(cmd))
def start_nginx_server(self):
cmd = ['sudo', 'service', 'nginx', 'start']
return exec_command(cmd)
def start_nuttcp_server(self):
cmd = ['/usr/local/bin/nuttcp', '-S' '-P5000']
cmd = ['/usr/local/bin/nuttcp', '-S', '-P5000']
return exec_command(cmd)
def start_multicast_listener(self, mc_addrs, multicast_ports, start_address="231.0.0.128"):
@ -574,7 +580,7 @@ class KBA_Server(object):
s.bind((m_addr, port))
s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
while True:
d, e = s.recvfrom(10240)
s.recvfrom(10240)
# End Function #
@ -587,7 +593,7 @@ class KBA_Server(object):
while True:
continue
class KBA_Proxy(object):
class KBA_Proxy():
def start_redis_server(self):
cmd = ['sudo', 'service', 'redis-server', 'start']
return exec_command(cmd)
@ -600,18 +606,19 @@ if __name__ == "__main__":
except Exception as e:
# KloudBuster starts without user-data
cwd = 'kloudbuster/kb_server'
cmd = ['python', 'setup.py', 'develop']
cmd = ['python3', 'setup.py', 'develop']
rc = exec_command(cmd, cwd=cwd)
if not rc:
syslog.syslog("Starting kloudbuster HTTP server")
cmd = ['/usr/local/bin/pecan', 'serve', 'config.py']
sys.exit(exec_command(cmd, cwd=cwd))
if user_data.get('role') == 'KB-PROXY':
role = user_data.get('role')
if role == 'KB-PROXY':
agent = KBA_Proxy()
syslog.syslog("Starting kloudbuster proxy server")
sys.exit(agent.start_redis_server())
if user_data.get('role').endswith('Server'):
if role.endswith('Server'):
agent = KBA_Server(user_data)
if user_data['role'].startswith('Multicast'):
KB_Instance.add_multicast_route()
@ -631,11 +638,11 @@ if __name__ == "__main__":
sys.exit(agent.start_nginx_server())
else:
sys.exit(1)
elif user_data.get('role').endswith('Client'):
if user_data['role'].startswith('HTTP'):
elif role.endswith('Client'):
if role.startswith('HTTP'):
syslog.syslog("Starting kloudbuster HTTP client")
agent = KBA_HTTP_Client(user_data)
elif user_data['role'].startswith('Multicast'):
elif role.startswith('Multicast'):
KB_Instance.add_multicast_route()
refresh_clock(user_data.get('ntp_clocks'), force_sync=True)
agent = KBA_Multicast_Client(user_data)

4
kb_server/kb_server/controllers/api_cfg.py

@ -29,7 +29,7 @@ from pecan import response
LOG = logging.getLogger("kloudbuster")
class ConfigController(object):
class ConfigController():
# Decorator to check for missing or invalid session ID
def check_session_id(func):
@ -198,7 +198,7 @@ class ConfigController(object):
allowed_status = ['READY']
except Exception as e:
response.status = 400
response.text = u"Invalid JSON: \n%s" % (e.message)
response.text = u"Invalid JSON: \n%s" % str(e)
return response.text
# http_tool_configs and storage_tool_config for client VMs is allowed to be

2
kb_server/kb_server/controllers/api_kb.py

@ -26,7 +26,7 @@ from pecan import response
LOG = logging.getLogger("kloudbuster")
class KBController(object):
class KBController():
def __init__(self):
self.kb_thread = None

4
kb_server/kb_server/controllers/kb_session.py

@ -17,7 +17,7 @@ import threading
KB_SESSIONS = {}
KB_SESSIONS_LOCK = threading.Lock()
class KBSessionManager(object):
class KBSessionManager():
@staticmethod
def has(session_id):
@ -46,7 +46,7 @@ class KBSessionManager(object):
KB_SESSIONS_LOCK.release()
class KBSession(object):
class KBSession():
def __init__(self):
self.kb_status = 'READY'
self.first_run = True

4
kb_server/kb_server/controllers/root.py

@ -19,7 +19,7 @@ from pecan import expose
from pecan import response
class APIController(object):
class APIController():
@expose()
def _lookup(self, primary_key, *remainder):
if primary_key == "config":
@ -30,7 +30,7 @@ class APIController(object):
abort(404)
class RootController(object):
class RootController():
@expose()
def index(self):
response.status = 301

20
kloudbuster/base_compute.py

@ -15,7 +15,7 @@
import os
import time
import log as logging
import kloudbuster.log as logging
from novaclient.exceptions import BadRequest
LOG = logging.getLogger(__name__)
@ -24,7 +24,7 @@ class KBVolAttachException(Exception):
pass
class BaseCompute(object):
class BaseCompute():
"""
The Base class for nova compute resources
1. Creates virtual machines with specific configs
@ -46,13 +46,12 @@ class BaseCompute(object):
self.shared_interface_ip = None
self.vol = None
# Create a server instance with associated
# security group, keypair with a provided public key
def create_server(self, image_name, flavor_type, keyname,
nic, sec_group, avail_zone=None, user_data=None,
config_drive=True, retry_count=100):
"""
Create a server instance with associated security group, keypair with a provided public key.
Create a VM instance given following parameters
1. VM Name
2. Image Name
@ -93,6 +92,7 @@ class BaseCompute(object):
LOG.error('Instance creation error:' + instance.fault['message'])
return None
time.sleep(2)
return None
def attach_vol(self):
if self.vol.status != 'available':
@ -117,7 +117,7 @@ class BaseCompute(object):
def detach_vol(self):
if self.instance and self.vol:
attached_vols = self.novaclient.volumes.get_server_volumes(self.instance.id)
if len(attached_vols):
if attached_vols:
try:
self.novaclient.volumes.delete_server_volume(self.instance.id, self.vol.id)
except BadRequest:
@ -133,7 +133,7 @@ class BaseCompute(object):
return flavor
class SecGroup(object):
class SecGroup():
def __init__(self, novaclient, neutronclient):
self.secgroup = None
@ -238,7 +238,7 @@ class SecGroup(object):
LOG.error('Failed while deleting security group %s.' % self.secgroup['id'])
return False
class KeyPair(object):
class KeyPair():
def __init__(self, novaclient):
self.keypair = None
@ -268,7 +268,7 @@ class KeyPair(object):
if self.keypair:
self.novaclient.keypairs.delete(self.keypair)
class Flavor(object):
class Flavor():
def __init__(self, novaclient):
self.novaclient = novaclient
@ -304,7 +304,7 @@ class Flavor(object):
except Exception:
pass
class NovaQuota(object):
class NovaQuota():
def __init__(self, novaclient, tenant_id):
self.novaclient = novaclient

26
kloudbuster/base_network.py

@ -14,11 +14,11 @@
import time
from perf_instance import PerfInstance
from kloudbuster.perf_instance import PerfInstance
import base_compute
import base_storage
import log as logging
import kloudbuster.base_compute as base_compute
import kloudbuster.base_storage as base_storage
import kloudbuster.log as logging
import netaddr
from neutronclient.common.exceptions import NetworkInUseClient
@ -101,7 +101,7 @@ def find_provider_network(neutron_client, name):
networks = neutron_client.list_networks()['networks']
for network in networks:
if network['provider:physical_network']:
if name == "" or name == network['name']:
if name in ("", network['name']):
return network
if name != "":
LOG.error("The provider network: " + name + " was not found.")
@ -116,11 +116,11 @@ def find_first_network(neutron_client):
If no external network is found return None
"""
networks = neutron_client.list_networks()['networks']
if (len(networks) > 0):
if networks:
return networks[0]
return None
class BaseNetwork(object):
class BaseNetwork():
"""
The Base class for neutron network operations
1. Creates networks with 1 subnet inside each network
@ -177,7 +177,7 @@ class BaseNetwork(object):
vol_size = 0
# Schedule to create the required number of VMs
for instance_count in xrange(vm_total):
for instance_count in range(vm_total):
vm_name = network_prefix + "-I" + str(instance_count)
perf_instance = PerfInstance(vm_name, self, config_scale)
self.instance_list.append(perf_instance)
@ -197,7 +197,8 @@ class BaseNetwork(object):
if config_scale['use_floatingip']:
# Create the floating ip for the instance
# store it and the ip address in perf_instance object
perf_instance.fip = create_floating_ip(self.neutron_client, external_network)
port_id = perf_instance.instance.interface_list()[0].id
perf_instance.fip = create_floating_ip(self.neutron_client, external_network, port_id)
perf_instance.fip_ip = perf_instance.fip['floatingip']['floating_ip_address']
self.res_logger.log('floating_ips',
perf_instance.fip['floatingip']['floating_ip_address'],
@ -270,7 +271,7 @@ class BaseNetwork(object):
if len(self.network['subnets']) > 0:
subnet = self.neutron_client.show_subnet(self.network['subnets'][0])['subnet']
self.network['subnet_ip'] = subnet['cidr']
self.network['is_ipv6'] = True if subnet['ipv6_address_mode'] else False
self.network['is_ipv6'] = bool(subnet['ipv6_address_mode'])
def get_cidr_from_subnet_id(self, subnetID):
sub = self.neutron_client.show_subnet(subnetID)
@ -281,6 +282,7 @@ class BaseNetwork(object):
"""Generate next CIDR for network or subnet, without IP overlapping.
"""
global cidr
# pylint: disable=not-callable
cidr = str(netaddr.IPNetwork(cidr).next())
return cidr
@ -304,7 +306,7 @@ class BaseNetwork(object):
def get_all_instances(self):
return self.instance_list
class Router(object):
class Router():
"""
Router class to create a new routers
Supports addition and deletion
@ -496,7 +498,7 @@ class Router(object):
class NeutronQuota(object):
class NeutronQuota():
def __init__(self, neutronclient, tenant_id):
self.neutronclient = neutronclient

6
kloudbuster/base_storage.py

@ -14,14 +14,14 @@
import time
import log as logging
import kloudbuster.log as logging
LOG = logging.getLogger(__name__)
class KBVolCreationException(Exception):
pass
class BaseStorage(object):
class BaseStorage():
"""
The Base class for cinder storage resources
"""
@ -69,7 +69,7 @@ class BaseStorage(object):
# self.cinderclient.volumes.detach(volume)
class CinderQuota(object):
class CinderQuota():
def __init__(self, cinderclient, tenant_id):
self.cinderclient = cinderclient

3
kloudbuster/cfg.scale.yaml

@ -32,8 +32,9 @@ image_name:
# vm_image_file: /kloudbuster/kloudbuster-7.0.0.qcow2
# If empty, KloudBuster will attempt to locate that file (with the default name)
# under the following directories:
# - root of the kloudbuster package
# - current directory
# - home directory
# - top directory ("/")
vm_image_file:
# Keystone admin role name (default should work in most deployments)

4
kloudbuster/credentials.py

@ -21,11 +21,11 @@ from keystoneauth1 import session
import os
import re
import log as logging
import kloudbuster.log as logging
LOG = logging.getLogger(__name__)
class Credentials(object):
class Credentials():
def get_session(self):
dct = {

10
kloudbuster/fio_tool.py

@ -15,7 +15,7 @@
import json
from perf_tool import PerfTool
from kloudbuster.perf_tool import PerfTool
from hdrh.histogram import HdrHistogram
@ -99,7 +99,7 @@ class FioTool(PerfTool):
histogram.decode_and_add(item['results'][clat])
latency_dict = histogram.get_percentile_to_value_dict(perc_list)
for key, value in latency_dict.iteritems():
for key, value in latency_dict.items():
all_res[clat].append([key, value])
all_res[clat].sort()
@ -108,10 +108,10 @@ class FioTool(PerfTool):
@staticmethod
def consolidate_samples(results, vm_count):
all_res = FioTool.consolidate_results(results)
total_count = float(len(results)) / vm_count
total_count = len(results) // vm_count
if not total_count:
return all_res
all_res['read_iops'] = int(all_res['read_iops'] / total_count)
all_res['write_iops'] = int(all_res['write_iops'] / total_count)
all_res['read_iops'] = all_res['read_iops'] // total_count
all_res['write_iops'] = all_res['write_iops'] // total_count
return all_res

172
kloudbuster/force_cleanup.py

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
# Copyright 2016 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -26,7 +26,7 @@
# #
# It is safe to use the script with the resource list generated by #
# KloudBuster, usage: #
# $ python force_cleanup.py --file kb_20150807_183001_svr.log #
# $ python3 force_cleanup.py --file kb_20150807_183001_svr.log #
# #
# Note: If running under single-tenant or tenant/user reusing mode, you have #
# to cleanup the server resources first, then client resources. #
@ -57,20 +57,25 @@ import traceback
# openstack python clients
import cinderclient
from keystoneclient import client as keystoneclient
from cinderclient.client import Client as CinderClient
import keystoneclient
from keystoneclient.client import Client as KeystoneClient
import neutronclient
from neutronclient.neutron.client import Client as NeutronClient
from novaclient.client import Client as NovaClient
from novaclient.exceptions import NotFound
from tabulate import tabulate
# kloudbuster base code
import credentials
import kloudbuster.credentials as credentials
resource_name_re = None
def prompt_to_run():
print "Warning: You didn't specify a resource list file as the input. "\
"The script will delete all resources shown above."
answer = raw_input("Are you sure? (y/n) ")
print("Warning: You didn't specify a resource list file as the input. "
"The script will delete all resources shown above.")
answer = input("Are you sure? (y/n) ")
if answer.lower() != 'y':
sys.exit(0)
@ -83,7 +88,7 @@ def fetch_resources(fetcher, options=None):
except Exception as e:
res_list = []
traceback.print_exc()
print "Warning exception while listing resources:" + str(e)
print('Warning exception while listing resources:', str(e))
resources = {}
for res in res_list:
# some objects provide direct access some
@ -98,16 +103,15 @@ def fetch_resources(fetcher, options=None):
resources[resid] = resname
return resources
class AbstractCleaner(object):
__metaclass__ = ABCMeta
class AbstractCleaner(metaclass=ABCMeta):
def __init__(self, res_category, res_desc, resources, dryrun):
self.dryrun = dryrun
self.category = res_category
self.resources = {}
if not resources:
print 'Discovering %s resources...' % (res_category)
for rtype, fetch_args in res_desc.iteritems():
print('Discovering %s resources...' % (res_category))
for rtype, fetch_args in res_desc.items():
if resources:
if rtype in resources:
self.resources[rtype] = resources[rtype]
@ -116,20 +120,20 @@ class AbstractCleaner(object):
def report_deletion(self, rtype, name):
if self.dryrun:
print ' + ' + rtype + ' ' + name + ' should be deleted (but is not deleted: dry run)'
print(' + ' + rtype + ' ' + name + ' should be deleted (but is not deleted: dry run)')
else:
print ' + ' + rtype + ' ' + name + ' is successfully deleted'
print(' + ' + rtype + ' ' + name + ' is successfully deleted')
def report_not_found(self, rtype, name):
print ' ? ' + rtype + ' ' + name + ' not found (already deleted?)'
print(' ? ' + rtype + ' ' + name + ' not found (already deleted?)')
def report_error(self, rtype, name, reason):
print ' - ' + rtype + ' ' + name + ' ERROR:' + reason
print(' - ' + rtype + ' ' + name + ' ERROR:' + reason)
def get_resource_list(self):
result = []
for rtype, rdict in self.resources.iteritems():
for resid, resname in rdict.iteritems():
for rtype, rdict in self.resources.items():
for resid, resname in rdict.items():
result.append([rtype, resname, resid])
return result
@ -139,21 +143,20 @@ class AbstractCleaner(object):
class StorageCleaner(AbstractCleaner):
def __init__(self, sess, resources, dryrun):
from cinderclient import client as cclient
from novaclient import client as nclient
self.nova = nclient.Client('2', endpoint_type='publicURL', session=sess)
self.cinder = cclient.Client('2', endpoint_type='publicURL', session=sess)
self.nova = NovaClient('2', endpoint_type='publicURL', session=sess)
self.cinder = CinderClient('2', endpoint_type='publicURL', session=sess)
res_desc = {'volumes': [self.cinder.volumes.list, {"all_tenants": 1}]}
super(StorageCleaner, self).__init__('Storage', res_desc, resources, dryrun)
def clean(self):
print '*** STORAGE cleanup'
print('*** STORAGE cleanup')
try:
kb_volumes = []
kb_detaching_volumes = []
for id, name in self.resources['volumes'].iteritems():
for id, name in self.resources['volumes'].items():
try:
vol = self.cinder.volumes.get(id)
if vol.attachments:
@ -162,15 +165,15 @@ class StorageCleaner(AbstractCleaner):
if not self.dryrun:
ins_id = vol.attachments[0]['server_id']
self.nova.volumes.delete_server_volume(ins_id, id)
print ' . VOLUME ' + vol.name + ' detaching...'
print(' . VOLUME ' + vol.name + ' detaching...')
else:
print ' . VOLUME ' + vol.name + ' to be detached...'
print(' . VOLUME ' + vol.name + ' to be detached...')
kb_detaching_volumes.append(vol)
except NotFound:
print 'WARNING: Volume %s attached to an instance that no longer '\
'exists (will require manual cleanup of the database)' % (id)
print('WARNING: Volume %s attached to an instance that no longer '
'exists (will require manual cleanup of the database)' % id)
except Exception as e:
print str(e)
print(str(e))
else:
# no attachments
kb_volumes.append(vol)
@ -180,8 +183,8 @@ class StorageCleaner(AbstractCleaner):
# check that the volumes are no longer attached
if kb_detaching_volumes:
if not self.dryrun:
print ' . Waiting for %d volumes to be fully detached...' % \
(len(kb_detaching_volumes))
print(' . Waiting for %d volumes to be fully detached...' %
(len(kb_detaching_volumes)))
retry_count = 5 + len(kb_detaching_volumes)
while True:
retry_count -= 1
@ -190,19 +193,19 @@ class StorageCleaner(AbstractCleaner):
latest_vol = self.cinder.volumes.get(kb_detaching_volumes[0].id)
if self.dryrun or not latest_vol.attachments:
if not self.dryrun:
print ' + VOLUME ' + vol.name + ' detach complete'
print(' + VOLUME ' + vol.name + ' detach complete')
kb_detaching_volumes.remove(vol)
kb_volumes.append(vol)
if kb_detaching_volumes and not self.dryrun:
if retry_count:
print ' . VOLUME %d left to be detached, retries left=%d...' % \
(len(kb_detaching_volumes), retry_count)
print(' . VOLUME %d left to be detached, retries left=%d...' %
len(kb_detaching_volumes), retry_count)
time.sleep(2)
else:
print ' - VOLUME detach timeout, %d volumes left:' % \
(len(kb_detaching_volumes))
print(' - VOLUME detach timeout, %d volumes left:' %
len(kb_detaching_volumes))
for vol in kb_detaching_volumes:
print ' ', vol.name, vol.status, vol.id, vol.attachments
print(' ', vol.name, vol.status, vol.id, vol.attachments)
break
else:
break
@ -213,17 +216,15 @@ class StorageCleaner(AbstractCleaner):
try:
vol.force_delete()
except cinderclient.exceptions.BadRequest as exc:
print str(exc)
print(str(exc))
self.report_deletion('VOLUME', vol.name)
except KeyError:
pass
class ComputeCleaner(AbstractCleaner):
def __init__(self, sess, resources, dryrun):
from neutronclient.neutron import client as nclient
from novaclient import client as novaclient
self.neutron_client = nclient.Client('2.0', endpoint_type='publicURL', session=sess)
self.nova_client = novaclient.Client('2', endpoint_type='publicURL', session=sess)
self.neutron_client = NeutronClient('2.0', endpoint_type='publicURL', session=sess)
self.nova_client = NovaClient('2', endpoint_type='publicURL', session=sess)
res_desc = {
'instances': [self.nova_client.servers.list, {"all_tenants": 1}],
'flavors': [self.nova_client.flavors.list],
@ -232,15 +233,16 @@ class ComputeCleaner(AbstractCleaner):
super(ComputeCleaner, self).__init__('Compute', res_desc, resources, dryrun)
def clean(self):
print '*** COMPUTE cleanup'