Remove PerfkitBenchMaker

No longer supported.

Change-Id: Iae8ff4e0a1f55af67b49df16e8ecf276877f2525
Signed-off-by: Charles Short <chucks@redhat.com>
This commit is contained in:
Charles Short 2019-11-20 14:54:21 -05:00
parent a3a3c51152
commit 0fa8454fd1
47 changed files with 11 additions and 1060 deletions

View File

@ -1,6 +1,6 @@
---
#
# Playbook to install Browbeat (Rally + Shaker + PerfKitBenchmarker) on undercloud
# Playbook to install Browbeat (Rally + Shaker) on undercloud
#
- hosts: browbeat
@ -11,7 +11,6 @@
- stockpile
- { role: browbeat-results, when: browbeat_results_in_httpd}
- firewall
- { role: perfkitbenchmarker, when: ansible_distribution_major_version < '8'}
- rally
- shaker
- { role: flavors, when: browbeat_create_flavors}

View File

@ -39,10 +39,6 @@ shaker_venv: "{{browbeat_path}}/.shaker-venv"
# Shaker version to Install
shaker_version: 1.3.1
# PerfKitBenchmarker Settings
perfkit_venv: "{{browbeat_path}}/.perfkit-venv"
perfkit_version: v1.13.0
# Configuration items to adjust browbeat results served through httpd
browbeat_results_port: 9001
browbeat_results_in_httpd: true

View File

@ -39,10 +39,6 @@ shaker_venv: "{{browbeat_path}}/.shaker-venv"
# Shaker version to Install
shaker_version: 1.1.3
# PerfKitBenchmarker Settings
perfkit_venv: "{{browbeat_path}}/.perfkit-venv"
perfkit_version: v1.13.0
# Configuration items to adjust browbeat results served through httpd
browbeat_results_port: 9001
browbeat_results_in_httpd: true

View File

@ -1,52 +0,0 @@
---
#
# Browbeat's PerfKitBenchmarker Install
#
- name: Create perfkit virtualenv
command: virtualenv {{ perfkit_venv }} creates={{ perfkit_venv }}
- name: Setup perfkit-venv CA certificate path
lineinfile:
dest: "{{ perfkit_venv }}/bin/activate"
line: 'export REQUESTS_CA_BUNDLE={{ overcloud_ca_path }}'
when: overcloud_ca_path is defined
- name: Determine if PerfKitBenchmarker is already cloned
stat:
path: "{{ perfkit_venv }}/PerfKitBenchmarker"
register: perfkit_exists
- debug: msg="PerfKitBenchmarker already exists on the host"
when: perfkit_exists.stat.isdir is defined and perfkit_exists.stat.isdir
- name: Clone PerfKitBenchmarker on undercloud
git:
repo: https://github.com/GoogleCloudPlatform/PerfKitBenchmarker.git
dest: "{{perfkit_venv}}/PerfKitBenchmarker"
version: "{{perfkit_version}}"
when: perfkit_exists.stat.isdir is undefined
- name: Install PerfKitBenchmarker requirements into perfkit-venv
pip:
requirements: "{{perfkit_venv}}/PerfKitBenchmarker/requirements.txt"
virtualenv: "{{perfkit_venv}}"
- name: Install PerfKitBenchmarker Openstack requirements into perfkit-venv
pip:
requirements: "{{ perfkit_venv }}/PerfKitBenchmarker/perfkitbenchmarker/providers/openstack/requirements.txt"
virtualenv: "{{perfkit_venv}}"
# (akrzos) - These requirements are what works for OpenStack Ocata
- name: Fix requirements for (OSP11 Ocata) inside perfkit-venv
pip:
name: "{{item.name}}"
version: "{{item.version}}"
virtualenv: "{{perfkit_venv}}"
with_items:
- name: openstacksdk
version: 0.9.17
- name: python-openstackclient
version: 3.12.0
- name: python-novaclient
version: 9.1.0

View File

@ -9,7 +9,6 @@
- browbeat/stockpile
- browbeat/browbeat
- browbeat/firewall
- browbeat/perfkitbenchmarker
- browbeat/rally
- browbeat/shaker
- browbeat/flavors

View File

@ -1,5 +1,5 @@
---
# Public network that perfkit and shaker utilize
# Public network that shaker utilize
browbeat_pub_net_name: browbeat_public
browbeat_pub_subnet: 1.1.1.1/22
browbeat_pub_pool_start: 1.1.1.1

View File

@ -26,18 +26,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -28,18 +28,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -28,18 +28,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -27,18 +27,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5
@ -58,13 +46,6 @@ shaker:
external_host: 2.2.2.2
workloads:
# PerfKitBenchmarker
- name: fio-centos-m1-small
enabled: false
type: perfkit
benchmarks: fio
openstack_volume_size: 1
# Rally
- name: authenticate
enabled: true

View File

@ -31,18 +31,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5
@ -62,12 +50,6 @@ shaker:
external_host: 2.2.2.2
workloads:
# PerfKitBenchmarker
- name: fio-centos-m1-small
enabled: false
type: perfkit
benchmarks: fio
openstack_volume_size: 1
# file for shaker should be one of https://opendev.org/performa/shaker/src/branch/master/shaker/scenarios/openstack
# Shaker
- name: shaker-l2

View File

@ -24,7 +24,7 @@ from browbeat.workloads import base
from browbeat.config import load_browbeat_config
from browbeat.path import results_path
_workload_opts = ['perfkit', 'rally', 'shaker']
_workload_opts = ['rally', 'shaker']
_config_file = 'browbeat-config.yaml'
debug_log_file = 'log/debug.log'

View File

@ -44,7 +44,7 @@ def load_browbeat_config(path):
def _validate_yaml(schema, config):
"""Raises exception if config is invalid.
:param schema: The schema to validate with (browbeat, perfkit, rally...)
:param schema: The schema to validate with (browbeat, rally...)
:param config: Loaded yaml to validate
"""
check = pykwalify_core.Core(

View File

@ -78,45 +78,6 @@ mapping:
port:
type: int
required: True
perfkit:
required: True
type: map
mapping:
default:
type: map
required: True
mapping:
image:
type: str
required: True
machine_type:
type: str
required: True
os_type:
type: str
required: True
enum: ['rhel', 'debian', 'ubuntu_container', 'windows']
openstack_image_username:
type: str
required: True
openstack_floating_ip_pool:
type: str
required: True
openstack_network:
type: str
required: True
ignore_package_requirements:
type: bool
required: False
timing_measurements:
type: str
required: False
sleep_after:
type: number
required: True
sleep_before:
type: number
required: True
rally:
required: True
type: map
@ -181,4 +142,4 @@ mapping:
type:
type: str
required: True
enum: ['perfkit', 'rally', 'shaker']
enum: ['rally', 'shaker']

View File

@ -1,48 +0,0 @@
# This schema defines how a PerfKitBenchmarker workload is formated
name: PerfKitBenchmarker workload schema
type: map
allowempty: True
mapping:
# Required items to be a PerfKit workload
benchmarks:
type: str
required: True
enabled:
type: bool
required: True
name:
type: str
required: True
type:
type: str
required: True
enum: ["perfkit"]
# Over-ridable defaults:
ignore_package_requirements:
type: bool
required: False
image:
type: str
required: False
machine_type:
type: str
required: False
openstack_floating_ip_pool:
type: str
required: False
openstack_image_username:
type: str
required: False
openstack_network:
type: str
required: False
openstack_volume_size:
type: int
required: False
os_type:
type: str
required: False
enum: ['rhel', 'debian', 'ubuntu_container', 'windows']
timing_measurements:
type: str
required: False

View File

@ -16,7 +16,6 @@ import os
import re
import subprocess
from browbeat.workloads import perfkit
from browbeat.workloads import rally
from browbeat.workloads import shaker
@ -62,9 +61,7 @@ class Tools(object):
:param result_dir_ts: Result directory timestamp
:param run_iteration: Iteration for a specific run
"""
if workload["type"] == "perfkit":
workloads = perfkit.PerfKit(self.config, result_dir_ts)
elif workload["type"] == "rally":
if workload["type"] == "rally":
workloads = rally.Rally(self.config, result_dir_ts)
elif workload["type"] == "shaker":
workloads = shaker.Shaker(self.config, result_dir_ts)
@ -98,8 +95,7 @@ class Tools(object):
def post_process(self, cli):
workloads = {}
workloads['shaker'] = re.compile("shaker")
workloads['perfkit'] = re.compile("perfkit")
workloads['rally'] = re.compile("(?!perfkit)|(?!shaker)")
workloads['rally'] = re.compile("(?!shaker)")
""" Iterate through dir structure """
results = {}
if os.path.isdir(cli.path):
@ -138,6 +134,3 @@ class Tools(object):
if workload is "shaker":
# Stub for Shaker.
continue
if workload is "perfkit":
# Stub for PerfKit.
continue

View File

@ -1,205 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import datetime
import glob
import logging
import os
import shutil
import subprocess
import time
import browbeat.tools
from browbeat import elastic
from browbeat import grafana
from browbeat.path import get_overcloudrc
from browbeat.path import get_workload_venv
from browbeat.path import results_path
from browbeat.workloads import base
import six
class PerfKit(base.WorkloadBase):
def __init__(self, config, result_dir_ts):
self.logger = logging.getLogger('browbeat.perfkit')
self.overcloudrc = get_overcloudrc()
self.config = config
self.result_dir_ts = result_dir_ts
self.tools = browbeat.tools.Tools(self.config)
self.grafana = grafana.Grafana(self.config)
self.elastic = elastic.Elastic(self.config, self.__class__.__name__.lower())
def string_to_dict(self, string):
"""Function for converting "|" quoted hash data into python dictionary."""
dict_data = {}
split_data = string.split('|,|')
split_data[0] = split_data[0][1:]
split_data[-1] = split_data[-1][:-1]
for item in split_data:
split_item = item.replace('.', '_').split(':', 1)
dict_data[split_item[0]] = ast.literal_eval("'" + split_item[1] + "'")
return dict_data
def get_error_details(self, result_dir):
error_details = []
with open('{}/pkb.stderr.log'.format(result_dir)) as perfkit_stderr:
for line in perfkit_stderr:
if 'ERROR' in line or 'Error' in line or 'Exception' in line:
error_details.append(line)
return error_details
def index_results(self, sucessful_run, result_dir, test_name, browbeat_rerun, benchmark_config):
es_ts = datetime.datetime.utcnow()
index_success = True
if sucessful_run:
# PerfKit json is newline delimited and thus each newline json needs to be indexed
with open('{}/perfkitbenchmarker_results.json'.format(result_dir)) \
as perfkit_results_json:
for result_count, json_result in enumerate(perfkit_results_json):
complete_result_json = {'browbeat_scenario': benchmark_config}
complete_result_json['results'] = {'unit': {}, 'value': {}}
single_result = self.elastic.load_json(json_result.strip())
complete_result_json['browbeat_rerun'] = browbeat_rerun
complete_result_json['timestamp'] = str(es_ts).replace(" ", "T")
complete_result_json['grafana_url'] = self.grafana.grafana_urls()
complete_result_json['perfkit_setup'] = \
self.string_to_dict(single_result['labels'])
result_metric = single_result['metric'].lower().replace(' ', '_'). \
replace('.', '_')
complete_result_json['results']['value'][result_metric] = single_result['value']
complete_result_json['results']['unit'][result_metric] = single_result['unit']
result = self.elastic.combine_metadata(complete_result_json)
if not self.elastic.index_result(result, test_name, result_dir,
str(result_count), 'result'):
index_success = False
self.update_index_failures()
else:
complete_result_json = {'browbeat_scenario': benchmark_config}
complete_result_json['perfkit_errors'] = self.get_error_details(result_dir)
complete_result_json['browbeat_rerun'] = browbeat_rerun
complete_result_json['timestamp'] = str(es_ts).replace(" ", "T")
complete_result_json['grafana_url'] = self.grafana.grafana_urls()
result = self.elastic.combine_metadata(complete_result_json)
index_success = self.elastic.index_result(result, test_name, result_dir, _type='error')
return index_success
def run_benchmark(self, benchmark_config, result_dir, test_name, cloud_type="OpenStack"):
self.logger.debug("--------------------------------")
self.logger.debug("Benchmark_config: {}".format(benchmark_config))
self.logger.debug("result_dir: {}".format(result_dir))
self.logger.debug("test_name: {}".format(test_name))
self.logger.debug("--------------------------------")
# Build command to run
if 'enabled' in benchmark_config:
del benchmark_config['enabled']
if 'type' in benchmark_config:
del benchmark_config['type']
cmd = ("source {0}; source {1}; "
"{2}/PerfKitBenchmarker/pkb.py "
"--cloud={3} --run_uri=browbeat".format(
get_workload_venv('perfkit', True),
self.overcloudrc,
get_workload_venv('perfkit', False), cloud_type))
for parameter, value in six.iteritems(benchmark_config):
if not parameter == 'name':
self.logger.debug(
"Parameter: {}, Value: {}".format(parameter, value))
cmd += " --{}={}".format(parameter, value)
# Remove any old results
if os.path.exists("/tmp/perfkitbenchmarker/runs/browbeat"):
shutil.rmtree("/tmp/perfkitbenchmarker/runs/browbeat")
self.logger.info("Running Perfkit Command: {}".format(cmd))
stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w')
stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w')
from_ts = time.time()
if 'sleep_before' in self.config['perfkit']:
time.sleep(self.config['perfkit']['sleep_before'])
process = subprocess.Popen(
cmd, shell=True, stdout=stdout_file, stderr=stderr_file)
process.communicate()
if 'sleep_after' in self.config['perfkit']:
time.sleep(self.config['perfkit']['sleep_after'])
to_ts = time.time()
# Determine success
success = False
try:
with open("{}/pkb.stderr.log".format(result_dir), 'r') as stderr:
if any('SUCCEEDED' in line for line in stderr):
self.logger.info("Benchmark completed.")
success = True
else:
self.logger.error("Benchmark failed.")
except IOError:
self.logger.error(
"File missing: {}/pkb.stderr.log".format(result_dir))
# Copy all results
for perfkit_file in glob.glob("/tmp/perfkitbenchmarker/runs/browbeat/*"):
shutil.move(perfkit_file, result_dir)
if os.path.exists("/tmp/perfkitbenchmarker/runs/browbeat"):
shutil.rmtree("/tmp/perfkitbenchmarker/runs/browbeat")
# Grafana integration
self.grafana.create_grafana_urls(
{'from_ts': int(from_ts * 1000),
'to_ts': int(to_ts * 1000)})
self.grafana.print_dashboard_url(test_name)
return success, to_ts, from_ts
def run_workload(self, workload, run_iteration):
self.logger.info("Starting PerfKitBenchmarker Workloads.")
time_stamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
self.logger.info("Benchmark: {}".format(workload['name']))
self.update_total_scenarios()
# Add default parameters as necessary
for default_item, value in six.iteritems(self.config['perfkit']['default']):
if default_item not in workload:
workload[default_item] = value
# Correct iteration/rerun
rerun_range = range(self.config["browbeat"]["rerun"])
if self.config["browbeat"]["rerun_type"] == "complete":
rerun_range = range(run_iteration, run_iteration + 1)
for run in rerun_range:
self.update_total_tests()
result_dir = self.tools.create_results_dir(
results_path, self.result_dir_ts, workload['name'], str(run))
test_name = "{}-{}-{}".format(time_stamp, workload['name'], run)
self.workload_logger(self.__class__.__name__)
success, to_ts, from_ts = self.run_benchmark(workload, result_dir, test_name)
index_success = 'disabled'
if self.config['elasticsearch']['enabled']:
index_success = self.index_results(success, result_dir, test_name, run, workload)
new_test_name = test_name.split('-')
new_test_name = new_test_name[2:]
new_test_name = '-'.join(new_test_name)
if success:
self.update_total_pass_tests()
self.get_time_dict(to_ts, from_ts, workload['benchmarks'],
new_test_name, self.__class__.__name__, "pass",
index_success)
else:
self.update_total_fail_tests()
self.get_time_dict(to_ts, from_ts, workload['benchmarks'],
new_test_name, self.__class__.__name__, "fail",
index_success)

View File

@ -27,18 +27,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -28,18 +28,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -28,18 +28,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -27,18 +27,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -27,18 +27,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -30,18 +30,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -28,18 +28,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -1,159 +0,0 @@
# Examples of PerfKit Benchmarks run from Browbeat
#
# Not all benchmark flags are exposed in this file because PerfKit
# ships with many benchmarks and many configuration options for
# each of the benchmarks. The settings for the benchmarks below
# are not tuned for best performance analysis either.
browbeat:
cloud_name: openstack
rerun: 1
rerun_type: iteration
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
host: 1.1.1.1
port: 9200
regather: false
metadata_files:
- name: hardware-metadata
file: metadata/hardware-metadata.json
- name: environment-metadata
file: metadata/environment-metadata.json
- name: software-metadata
file: metadata/software-metadata.json
- name: version
file: metadata/version.json
grafana:
enabled: true
host: example.grafana.com
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5
plugins:
- glance: rally/rally-plugins/glance
- neutron: rally/rally-plugins/neutron
- netcreate-boot: rally/rally-plugins/netcreate-boot
- workloads: rally/rally-plugins/workloads
shaker:
server: 1.1.1.1
port: 5555
flavor: m1.small
join_timeout: 600
sleep_before: 0
sleep_after: 0
shaker_region: regionOne
external_host: 2.2.2.2
workloads:
- name: aerospike-centos-m1-small
enabled: false
type: perfkit
benchmarks: aerospike
- name: block_storage_workload-database-centos-m1-small
enabled: false
type: perfkit
benchmarks: block_storage_workload
openstack_volume_size: 20
workload_mode: database
- name: block_storage_workload-logging-centos-m1-small
enabled: false
type: perfkit
benchmarks: block_storage_workload
openstack_volume_size: 20
workload_mode: logging
- name: block_storage_workload-streaming-centos-m1-small
enabled: false
type: perfkit
benchmarks: block_storage_workload
openstack_volume_size: 20
workload_mode: streaming
- name: cluster_boot-centos-m1-small
enabled: false
type: perfkit
benchmarks: cluster_boot
config_override: "cluster_boot.vm_groups.default.vm_count=4"
- name: copy_throughput-cp-centos-m1-small
enabled: false
type: perfkit
benchmarks: copy_throughput
copy_benchmark_mode: cp
openstack_volume_size: 20
- name: copy_throughput-dd-centos-m1-small
enabled: false
type: perfkit
benchmarks: copy_throughput
copy_benchmark_mode: dd
openstack_volume_size: 20
- name: copy_throughput-scp-centos-m1-small
enabled: false
type: perfkit
benchmarks: copy_throughput
copy_benchmark_mode: scp
openstack_volume_size: 20
- name: fio-centos-m1-small
enabled: false
type: perfkit
benchmarks: fio
openstack_volume_size: 20
- name: fio-centos-m1-small-10m
enabled: false
type: perfkit
benchmarks: fio
openstack_volume_size: 20
fio_generate_scenarios: all
fio_runtime: 600
fio_working_set_size: 4
- name: iperf-centos-m1-small
enabled: false
type: perfkit
benchmarks: iperf
- name: mesh_network-centos-m1-small
enabled: false
type: perfkit
benchmarks: mesh_network
num_vms: 3
- name: netperf-centos-m1-small
enabled: false
type: perfkit
benchmarks: netperf
- name: ping-centos-m1-small
enabled: false
type: perfkit
benchmarks: ping
- name: redis_ycsb-centos-m1-small
enabled: false
type: perfkit
benchmarks: redis_ycsb
ycsb_client_vms: 2
- name: scimark2-centos-m1-small
enabled: false
type: perfkit
benchmarks: scimark2
- name: sysbench_oltp-centos-m1-small
enabled: false
type: perfkit
benchmarks: sysbench_oltp
openstack_volume_size: 20
- name: unixbench-centos-m1-small
enabled: false
type: perfkit
benchmarks: unixbench
openstack_volume_size: 20

View File

@ -27,18 +27,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.tiny-centos
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5
@ -55,10 +43,6 @@ shaker:
# file for shaker should be one of https://opendev.org/performa/shaker/src/branch/master/shaker/scenarios/openstack
workloads:
- name: ping-m1-tiny-centos
enabled: true
type: perfkit
benchmarks: ping
- name: quickstart-shaker-l2
enabled: true
type: shaker

View File

@ -34,18 +34,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -34,18 +34,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -34,18 +34,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -34,18 +34,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 5
sleep_after: 5

View File

@ -31,18 +31,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 0
sleep_after: 0

View File

@ -31,18 +31,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 0
sleep_after: 0

View File

@ -30,18 +30,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 0
sleep_after: 0

View File

@ -32,18 +32,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 0
sleep_after: 0

View File

@ -30,18 +30,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 0
sleep_after: 0

View File

@ -31,18 +31,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 0
sleep_after: 0

View File

@ -350,13 +350,11 @@ to install Browbeat for usage on the new Tripleo Quickstart cloud.
[akrzos@bithead browbeat]$ . .browbeat-venv/bin/activate
(.browbeat-venv) [akrzos@bithead browbeat]$ ./browbeat.py -s conf/quickstart.yml rally
2017-12-13 15:46:34,648 - browbeat.config - INFO - Config conf/quickstart.yml validated
2017-12-13 15:46:34,655 - browbeat.config - INFO - Workload ping-m1-tiny-centos validated as perfkit
2017-12-13 15:46:34,657 - browbeat.config - INFO - Workload quickstart-shaker-l2 validated as shaker
2017-12-13 15:46:34,665 - browbeat.config - INFO - Workload quickstart-rally validated as rally
2017-12-13 15:46:34,665 - browbeat - INFO - Browbeat test suite kicked off
2017-12-13 15:46:34,665 - browbeat - INFO - Browbeat UUID: 8e869626-a596-4ec7-b0b1-ac7f2bf915a7
2017-12-13 15:46:34,666 - browbeat - INFO - Running workload(s): rally
2017-12-13 15:46:34,666 - browbeat - INFO - perfkit workload ping-m1-tiny-centos disabled via cli
2017-12-13 15:46:34,666 - browbeat - INFO - shaker workload quickstart-shaker-l2 disabled via cli
2017-12-13 15:46:34,666 - browbeat - INFO - rally workload quickstart-rally is enabled
2017-12-13 15:46:34,666 - browbeat.rally - INFO - Running Rally workload: quickstart-rally

View File

@ -12,37 +12,7 @@ Run Browbeat performance tests from Undercloud
[stack@ospd ~]$ cd browbeat/
[stack@ospd browbeat]$ . .browbeat-venv/bin/activate
(browbeat-venv)[stack@ospd browbeat]$ vi browbeat-config.yaml # Edit browbeat-config.yaml to control how many stress tests are run.
(browbeat-venv)[stack@ospd browbeat]$ ./browbeat.py <workload> #perfkit, rally, shaker or "all"
Running PerfKitBenchmarker
---------------------------
Note: PerfKitBenchmarker is disabled for Stein+ due to the lack of python3
support.
Many benchmarks work out of the box with Browbeat. You must ensure that your
network is setup correctly to run those benchmarks. Currently tested benchmarks
include: aerospike, bonnie++, cluster_boot, copy_throughput(cp,dd,scp), fio,
iperf, mesh_network, mongodb_ycsb, netperf, object_storage_service, ping,
scimark2, and sysbench_oltp.
To run Browbeat's PerfKit Benchmarks, you can start by viewing the
tested benchmark's configuration in conf/browbeat-perfkit-complete.yaml.
You must add them to your specific Browbeat config yaml file or
enable/disable the benchmarks you wish to run in the default config file
(browbeat-config.yaml). There are many flags exposed in the
configuration files to tune how those benchmarks run. Additional flags
are exposed in the source code of PerfKitBenchmarker available on the
Google Cloud Github_.
.. _Github: https://github.com/GoogleCloudPlatform/PerfKitBenchmarker
Example running only PerfKitBenchmarker benchmarks with Browbeat from
browbeat-config.yaml:
::
(browbeat-venv)[stack@ospd browbeat]$ ./browbeat.py perfkit -s browbeat-config.yaml
(browbeat-venv)[stack@ospd browbeat]$ ./browbeat.py <workload> #rally, shaker or "all"
Running Shaker
---------------

View File

@ -1,71 +0,0 @@
{
template: "browbeat-perfkit-*",
mappings: {
result: {
properties: {
browbeat_rerun: {
type: "long"
},
browbeat_scenario: {
properties: {
benchmarks: {
index: "not_analyzed",
type: "string"
}
}
},
browbeat_uuid: {
index: "not_analyzed",
type: "string"
},
cloud_name: {
index: "not_analyzed",
type: "string"
},
grafana_url: {
properties: {
openstack-general-system-performance: {
index: "not_analyzed",
type: "string"
}
}
},
perfkit_setup: {
properties: {
cloud: {
index: "not_analyzed",
type: "string"
},
image: {
index: "not_analyzed",
type: "string"
},
machine_instance: {
index: "not_analyzed",
type: "string"
},
machine_type: {
index: "not_analyzed",
type: "string"
},
perfkitbenchmarker_version: {
index: "not_analyzed",
type: "string"
},
vm_count: {
type: "string"
},
zone: {
index: "not_analyzed",
type: "string"
}
}
},
timestamp: {
type: "date",
format: "strict_date_optional_time||epoch_millis"
}
}
}
}
}

View File

@ -1,76 +0,0 @@
{
"template": "browbeat-perfkit-*",
"settings": {
"index.mapping.total_fields.limit": 5000,
"number_of_shards": 1,
"number_of_replicas": 0
},
"mappings": {
"result": {
"properties": {
"browbeat_rerun": {
"type": "long"
},
"browbeat_scenario": {
"properties": {
"benchmarks": {
"index": "not_analyzed",
"type": "string"
}
}
},
"browbeat_uuid": {
"index": "not_analyzed",
"type": "string"
},
"cloud_name": {
"index": "not_analyzed",
"type": "string"
},
"grafana_url": {
"properties": {
"openstack-general-system-performance": {
"index": "not_analyzed",
"type": "string"
}
}
},
"perfkit_setup": {
"properties": {
"cloud": {
"index": "not_analyzed",
"type": "string"
},
"image": {
"index": "not_analyzed",
"type": "string"
},
"machine_instance": {
"index": "not_analyzed",
"type": "string"
},
"machine_type": {
"index": "not_analyzed",
"type": "string"
},
"perfkitbenchmarker_version": {
"index": "not_analyzed",
"type": "string"
},
"vm_count": {
"type": "string"
},
"zone": {
"index": "not_analyzed",
"type": "string"
}
}
},
"timestamp": {
"type": "date",
"format": "strict_date_optional_time||epoch_millis"
}
}
}
}
}

View File

@ -28,18 +28,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 0
sleep_after: 0
@ -54,10 +42,6 @@ shaker:
external_host: 2.2.2.2
workloads:
- name: browbeat-test-perfkit-ping
enabled: false
type: perfkit
benchmarks: ping
- name: browbeat-test-authenticate
enabled: false
type: rally

View File

@ -27,18 +27,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 0
sleep_after: 0

View File

@ -27,18 +27,6 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
perfkit:
sleep_before: 0
sleep_after: 0
default:
image: centos7
machine_type: m1.small
os_type: rhel
openstack_image_username: centos
openstack_floating_ip_pool: browbeat_public
openstack_network: browbeat_private
timing_measurements: runtimes
ignore_package_requirements: true
rally:
sleep_before: 0
sleep_after: 0
@ -53,10 +41,6 @@ shaker:
external_host: 2.2.2.2
workloads:
- name: browbeat-test-perfkit-ping
enabled: false
type: perfkit
benchmarks: ping
- name: browbeat-test-authenticate
enabled: false
type: rally

View File

@ -1,17 +1,4 @@
# Valid and invalid workload schemas for testing per workload
perfkit:
- valid: true
data:
name: valid-test-perfkit-ping
enabled: false
type: perfkit
benchmarks: ping
- valid: false
data:
opps_name: invalid-test-perfkit-ping
enabled: false
type: perfkit
benchmarks: ping
rally:
- valid: true
data:

View File

@ -37,7 +37,7 @@ def test_load_browbeat_config(config):
assert "SchemaError" in str(exception_data)
@pytest.mark.parametrize("schema", ["perfkit", "rally", "shaker"])
@pytest.mark.parametrize("schema", ["rally", "shaker"])
def test__validate_yaml(schema):
"""Tests valid and invalid Browbeat workload configurations."""
with open("tests/data/workloads.yml", "r") as config_file:

View File

@ -84,4 +84,4 @@ show-source = True
ignore = E123,E125,E226,E302,E41,E231,E203,H233,H306,H238,H236,H404,H405,W504
max-line-length = 100
builtins = _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,ansible/*,.browbeat-venv,.perfkit-venv,.rally-venv,.shaker-venv,browbeat-containers/*
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,ansible/*,.browbeat-venv,.rally-venv,.shaker-venv,browbeat-containers/*

View File

@ -41,7 +41,7 @@ echo "${install_host}" >> ansible/hosts
if [ "$install_host" == "localhost" ]; then
# Clean local environment
rm -rf .browbeat-venv/ .perfkit-venv/ .rally-venv/ .shaker-venv/
rm -rf .browbeat-venv/ .rally-venv/ .shaker-venv/
rm -rf stackrc overcloudrc
# Make sure brovc.10 is up