benchmark: add metric create

This patch adds a "benchmark metric create" measuring the number of
metric per second we can create in Gnocchi.

Change-Id: Ib463c221d1cf95334184aeb472f18d0bd45042d4
This commit is contained in:
Julien Danjou
2015-09-30 16:34:57 +02:00
parent 6e4de9b71f
commit 833ee435e9
6 changed files with 201 additions and 9 deletions

133
gnocchiclient/benchmark.py Normal file
View File

@@ -0,0 +1,133 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
import time
import futurist
from oslo_utils import timeutils
import six.moves
from gnocchiclient.v1 import metric_cli
LOG = logging.getLogger(__name__)
def _positive_non_zero_int(argument_value):
if argument_value is None:
return None
try:
value = int(argument_value)
except ValueError:
msg = "%s must be an integer" % argument_value
raise argparse.ArgumentTypeError(msg)
if value <= 0:
msg = "%s must be greater than 0" % argument_value
raise argparse.ArgumentTypeError(msg)
return value
class BenchmarkPool(futurist.ThreadPoolExecutor):
def submit_job(self, times, fn, *args, **kwargs):
self.sw = timeutils.StopWatch()
self.sw.start()
self.times = times
return [self.submit(fn, *args, **kwargs)
for i in six.moves.range(times)]
def map_job(self, fn, iterable, **kwargs):
self.sw = timeutils.StopWatch()
self.sw.start()
r = []
self.times = 0
for item in iterable:
r.append(self.submit(fn, item, **kwargs))
self.times += 1
return r
def _log_progress(self, verb):
runtime = self.sw.elapsed()
done = self.statistics.executed
rate = done / runtime if runtime != 0 else 0
LOG.info(
"%d/%d, "
"total: %.2f seconds, "
"rate: %.2f %s/second"
% (done, self.times, runtime, rate, verb))
def wait_job(self, verb, futures):
while self.statistics.executed != self.times:
self._log_progress(verb)
time.sleep(1)
self._log_progress(verb)
self.shutdown(wait=True)
runtime = self.sw.elapsed()
results = []
for f in futures:
try:
results.append(f.result())
except Exception as e:
LOG.error("Error with %s metric: %s" % (verb, e))
return results, {
'client workers': self._max_workers,
verb + ' runtime': "%.2f seconds" % runtime,
verb + ' executed': self.statistics.executed,
verb + ' speed': (
"%.2f metric/s" % (self.statistics.executed / runtime)
),
verb + ' failures': self.statistics.failures,
verb + ' failures rate': (
"%.2f %%" % (
100
* self.statistics.failures
/ float(self.statistics.executed)
)
),
}
class CliBenchmarkMetricCreate(metric_cli.CliMetricCreateBase):
def get_parser(self, prog_name):
parser = super(CliBenchmarkMetricCreate, self).get_parser(prog_name)
parser.add_argument("--number", "-n",
required=True,
type=_positive_non_zero_int,
help="Number of metrics to create")
parser.add_argument("--keep", "-k",
action='store_true',
help="Keep created metrics")
parser.add_argument("--workers", "-w",
default=None,
type=_positive_non_zero_int,
help="Number of workers to use")
return parser
def _take_action(self, metric, parsed_args):
pool = BenchmarkPool(parsed_args.workers)
LOG.info("Creating metrics")
futures = pool.submit_job(parsed_args.number,
self.app.client.metric.create,
metric, refetch_metric=False)
created_metrics, stats = pool.wait_job("create", futures)
if not parsed_args.keep:
LOG.info("Deleting metrics")
pool = BenchmarkPool(parsed_args.workers)
futures = pool.map_job(self.app.client.metric.delete,
[m['id'] for m in created_metrics])
_, dstats = pool.wait_job("delete", futures)
stats.update(dstats)
return self.dict2columns(stats)

View File

@@ -24,6 +24,7 @@ from keystoneauth1 import adapter
from keystoneauth1 import exceptions from keystoneauth1 import exceptions
from keystoneauth1 import loading from keystoneauth1 import loading
from gnocchiclient import benchmark
from gnocchiclient import client from gnocchiclient import client
from gnocchiclient import noauth from gnocchiclient import noauth
from gnocchiclient.v1 import archive_policy_cli from gnocchiclient.v1 import archive_policy_cli
@@ -59,6 +60,7 @@ class GnocchiCommandManager(commandmanager.CommandManager):
"measures add": metric_cli.CliMeasuresAdd, "measures add": metric_cli.CliMeasuresAdd,
"measures aggregation": metric_cli.CliMeasuresAggregation, "measures aggregation": metric_cli.CliMeasuresAggregation,
"capabilities list": capabilities_cli.CliCapabilitiesList, "capabilities list": capabilities_cli.CliCapabilitiesList,
"benchmark metric create": benchmark.CliBenchmarkMetricCreate,
} }
def load_commands(self, namespace): def load_commands(self, namespace):

View File

@@ -0,0 +1,43 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from gnocchiclient.tests.functional import base
class BenchmarkMetricTest(base.ClientTestBase):
def test_benchmark_metric_create_wrong_workers(self):
result = self.gnocchi(
u'benchmark', params=u"metric create -n 0",
fail_ok=True, merge_stderr=True)
self.assertIn("0 must be greater than 0", result)
def test_benchmark_metric_create(self):
apname = str(uuid.uuid4())
# PREPARE AN ACHIVE POLICY
self.gnocchi("archive-policy", params="create %s "
"--back-window 0 -d granularity:1s,points:86400" % apname)
result = self.gnocchi(
u'benchmark', params=u"metric create -n 10 -a %s" % apname)
result = self.details_multiple(result)[0]
self.assertEqual(10, int(result['create executed']))
self.assertLessEqual(int(result['create failures']), 10)
self.assertLessEqual(int(result['delete executed']),
int(result['create executed']))
result = self.gnocchi(
u'benchmark', params=u"metric create -k -n 10 -a %s" % apname)
result = self.details_multiple(result)[0]
self.assertEqual(10, int(result['create executed']))
self.assertLessEqual(int(result['create failures']), 10)
self.assertNotIn('delete executed', result)

View File

@@ -54,7 +54,8 @@ class MetricManager(base.Manager):
url = (self.resource_url % resource_id) + metric url = (self.resource_url % resource_id) + metric
return self._get(url).json() return self._get(url).json()
def create(self, metric): # FIXME(jd): remove refetch_metric when LP#1497171 is fixed
def create(self, metric, refetch_metric=True):
"""Create an metric """Create an metric
:param metric: The metric :param metric: The metric
@@ -68,7 +69,9 @@ class MetricManager(base.Manager):
data=jsonutils.dumps(metric)).json() data=jsonutils.dumps(metric)).json()
# FIXME(sileht): create and get have a # FIXME(sileht): create and get have a
# different output: LP#1497171 # different output: LP#1497171
return self.get(metric["id"]) if refetch_metric:
return self.get(metric["id"])
return metric
metric_name = metric.get('name') metric_name = metric.get('name')

View File

@@ -47,26 +47,36 @@ class CliMetricShow(show.ShowOne):
return self.dict2columns(metric) return self.dict2columns(metric)
class CliMetricCreate(show.ShowOne): class CliMetricCreateBase(show.ShowOne):
def get_parser(self, prog_name): def get_parser(self, prog_name):
parser = super(CliMetricCreate, self).get_parser(prog_name) parser = super(CliMetricCreateBase, self).get_parser(prog_name)
parser.add_argument("--archive-policy-name", "-a", parser.add_argument("--archive-policy-name", "-a",
dest="archive_policy_name", dest="archive_policy_name",
help=("name of the archive policy")) help=("name of the archive policy"))
parser.add_argument("--resource-id", "-r", parser.add_argument("--resource-id", "-r",
dest="resource_id", dest="resource_id",
help="ID of the resource") help="ID of the resource")
parser.add_argument("name", nargs='?',
metavar="METRIC_NAME",
help="Name of the metric")
return parser return parser
def take_action(self, parsed_args): def take_action(self, parsed_args):
metric = utils.dict_from_parsed_args(parsed_args, metric = utils.dict_from_parsed_args(parsed_args,
["archive_policy_name", ["archive_policy_name",
"name",
"resource_id"]) "resource_id"])
return self._take_action(metric, parsed_args)
class CliMetricCreate(CliMetricCreateBase):
def get_parser(self, prog_name):
parser = super(CliMetricCreate, self).get_parser(prog_name)
parser.add_argument("name", nargs='?',
metavar="METRIC_NAME",
help="Name of the metric")
return parser
def _take_action(self, metric, parsed_args):
if parsed_args.name:
metric['name'] = parsed_args.name
metric = self.app.client.metric.create(metric) metric = self.app.client.metric.create(metric)
utils.format_archive_policy(metric["archive_policy"]) utils.format_archive_policy(metric["archive_policy"])
utils.format_move_dict_to_root(metric, "archive_policy") utils.format_move_dict_to_root(metric, "archive_policy")

View File

@@ -10,3 +10,4 @@ oslo.serialization>=1.4.0 # Apache-2.0
oslo.utils>=2.0.0 # Apache-2.0 oslo.utils>=2.0.0 # Apache-2.0
keystoneauth1>=1.0.0 keystoneauth1>=1.0.0
six six
futurist