Add batching for samples creation in context

Performance Ceilometer scenarios required a great amount of data, that means
that if something happens during samples are storing the test fails.
Batching improves it. Storing with batching has the same rate as if whole
sample list is stored, but choosing batch size not greater then 1% of all
samples makes it losing not critical for the test.

Change-Id: I96cfbdb6f0f8c20363b4be11cac488f6459f81e2
This commit is contained in:
Igor Degtiarov 2015-10-21 13:31:38 +03:00
parent 4178ec9fa3
commit ce8068a9af
7 changed files with 77 additions and 19 deletions

View File

@ -495,7 +495,7 @@
counter_unit: "instance" counter_unit: "instance"
counter_volume: 1.0 counter_volume: 1.0
resources_per_tenant: 3 resources_per_tenant: 3
samples_per_resource: 3 samples_per_resource: 10
timestamp_interval: 60 timestamp_interval: 60
metadata_list: metadata_list:
- status: "active" - status: "active"
@ -506,6 +506,7 @@
name: "fake_resource_1" name: "fake_resource_1"
deleted: "False" deleted: "False"
created_at: "2015-09-10T06:55:12.000000" created_at: "2015-09-10T06:55:12.000000"
batch_size: 5
sla: sla:
failure_rate: failure_rate:
max: 0 max: 0

View File

@ -18,6 +18,7 @@ from rally.common.i18n import _
from rally.common import log as logging from rally.common import log as logging
from rally.common import utils as rutils from rally.common import utils as rutils
from rally import consts from rally import consts
from rally import exceptions
from rally.plugins.openstack.scenarios.ceilometer import utils as ceilo_utils from rally.plugins.openstack.scenarios.ceilometer import utils as ceilo_utils
from rally.task import context from rally.task import context
@ -77,6 +78,14 @@ class CeilometerSampleGenerator(context.Context):
} }
} }
} }
},
"batch_size": {
"type": "integer",
"minimum": 1
},
"batches_allow_lose": {
"type": "integer",
"minimum": 0
} }
}, },
"required": ["counter_name", "counter_type", "counter_unit", "required": ["counter_name", "counter_type", "counter_unit",
@ -90,6 +99,22 @@ class CeilometerSampleGenerator(context.Context):
"timestamp_interval": 60 "timestamp_interval": 60
} }
def _store_batch_samples(self, scenario, batches, batches_allow_lose):
batches_allow_lose = batches_allow_lose or 0
unsuccess = 0
for i, batch in enumerate(batches, start=1):
try:
samples = scenario._create_samples(batch)
except Exception:
unsuccess += 1
LOG.warning(_("Failed to store batch %d of Ceilometer samples"
" during context creation") % i)
if unsuccess > batches_allow_lose:
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(),
msg=_("Context failed to store too many batches of samples"))
return samples
@logging.log_task_wrapper(LOG.info, _("Enter context: `Ceilometer`")) @logging.log_task_wrapper(LOG.info, _("Enter context: `Ceilometer`"))
def setup(self): def setup(self):
new_sample = { new_sample = {
@ -110,8 +135,12 @@ class CeilometerSampleGenerator(context.Context):
count=self.config["samples_per_resource"], count=self.config["samples_per_resource"],
interval=self.config["timestamp_interval"], interval=self.config["timestamp_interval"],
metadata_list=self.config.get("metadata_list"), metadata_list=self.config.get("metadata_list"),
batch_size=self.config.get("batch_size"),
**new_sample) **new_sample)
samples = scenario._create_samples(samples_to_create) samples = self._store_batch_samples(
scenario, samples_to_create,
self.config.get("batches_allow_lose")
)
for sample in samples: for sample in samples:
self.context["tenants"][tenant_id]["samples"].append( self.context["tenants"][tenant_id]["samples"].append(
sample.to_dict()) sample.to_dict())

View File

@ -28,7 +28,7 @@ class CeilometerScenario(scenario.OpenStackScenario):
def _make_samples(self, count=1, interval=0, counter_name="cpu_util", def _make_samples(self, count=1, interval=0, counter_name="cpu_util",
counter_type="gauge", counter_unit="%", counter_volume=1, counter_type="gauge", counter_unit="%", counter_volume=1,
project_id=None, user_id=None, source=None, project_id=None, user_id=None, source=None,
timestamp=None, metadata_list=None): timestamp=None, metadata_list=None, batch_size=None):
"""Prepare and return a list of samples. """Prepare and return a list of samples.
:param count: specifies number of samples in array :param count: specifies number of samples in array
@ -43,9 +43,10 @@ class CeilometerScenario(scenario.OpenStackScenario):
:param source: specifies source for samples :param source: specifies source for samples
:param timestamp: specifies timestamp for samples :param timestamp: specifies timestamp for samples
:param metadata_list: specifies list of resource metadata :param metadata_list: specifies list of resource metadata
:returns: list of samples used to create samples :param batch_size: specifies number of samples to store in one query
:returns: generator that produces lists of samples
""" """
samples = [] batch_size = batch_size or count
sample = { sample = {
"counter_name": counter_name, "counter_name": counter_name,
"counter_type": counter_type, "counter_type": counter_type,
@ -62,9 +63,13 @@ class CeilometerScenario(scenario.OpenStackScenario):
for k, v in six.iteritems(opt_fields): for k, v in six.iteritems(opt_fields):
if v: if v:
sample.update({k: v}) sample.update({k: v})
now = timestamp or datetime.datetime.utcnow()
len_meta = len(metadata_list) if metadata_list else 0 len_meta = len(metadata_list) if metadata_list else 0
now = timestamp or datetime.datetime.utcnow()
samples = []
for i in six.moves.xrange(count): for i in six.moves.xrange(count):
if i and not (i % batch_size):
yield samples
samples = []
sample_item = dict(sample) sample_item = dict(sample)
sample_item["timestamp"] = ( sample_item["timestamp"] = (
now - datetime.timedelta(seconds=(interval * i)) now - datetime.timedelta(seconds=(interval * i))
@ -76,8 +81,7 @@ class CeilometerScenario(scenario.OpenStackScenario):
i * len_meta // count i * len_meta // count
] ]
samples.append(sample_item) samples.append(sample_item)
yield samples
return samples
def _make_query_item(self, field, op="eq", value=None): def _make_query_item(self, field, op="eq", value=None):
"""Create a SimpleQuery item for requests. """Create a SimpleQuery item for requests.

View File

@ -26,7 +26,8 @@
{"status": "not_active", "name": "fake_resource_1", {"status": "not_active", "name": "fake_resource_1",
"deleted": "False", "deleted": "False",
"created_at": "2015-09-10T06:55:12.000000"} "created_at": "2015-09-10T06:55:12.000000"}
] ],
"batch_size": 5
} }
}, },
"args":{ "args":{

View File

@ -26,6 +26,7 @@
name: "fake_resource_1" name: "fake_resource_1"
deleted: "False" deleted: "False"
created_at: "2015-09-10T06:55:12.000000" created_at: "2015-09-10T06:55:12.000000"
batch_size: 5
args: args:
limit: 50 limit: 50
metadata_query: metadata_query:

View File

@ -115,7 +115,6 @@ class CeilometerSampleGeneratorTestCase(test.TestCase):
"counter_type": "fake-counter-type", "counter_type": "fake-counter-type",
"counter_unit": "fake-counter-unit", "counter_unit": "fake-counter-unit",
"counter_volume": 100, "counter_volume": 100,
"resource_id": "fake-resource-id",
"metadata_list": [ "metadata_list": [
{"status": "active", "name": "fake_resource", {"status": "active", "name": "fake_resource",
"deleted": "False", "deleted": "False",
@ -128,9 +127,10 @@ class CeilometerSampleGeneratorTestCase(test.TestCase):
scenario.generate_random_name = mock.Mock( scenario.generate_random_name = mock.Mock(
return_value="fake_resource-id") return_value="fake_resource-id")
kwargs = copy.deepcopy(sample) kwargs = copy.deepcopy(sample)
kwargs.pop("resource_id") samples_to_create = list(
samples_to_create = scenario._make_samples(count=samples_per_resource, scenario._make_samples(count=samples_per_resource, interval=60,
interval=60, **kwargs) **kwargs)
)[0]
new_context = copy.deepcopy(real_context) new_context = copy.deepcopy(real_context)
for id_ in tenants.keys(): for id_ in tenants.keys():
new_context["tenants"][id_].setdefault("samples", []) new_context["tenants"][id_].setdefault("samples", [])

View File

@ -30,21 +30,43 @@ class CeilometerScenarioTestCase(test.ScenarioTestCase):
super(CeilometerScenarioTestCase, self).setUp() super(CeilometerScenarioTestCase, self).setUp()
self.scenario = utils.CeilometerScenario(self.context) self.scenario = utils.CeilometerScenario(self.context)
def test__make_samples(self): def test__make_samples_no_batch_size(self):
self.scenario.generate_random_name = mock.Mock( self.scenario.generate_random_name = mock.Mock(
return_value="fake_resource") return_value="fake_resource")
test_timestamp = datetime.datetime(2015, 10, 20, 14, 18, 40) test_timestamp = datetime.datetime(2015, 10, 20, 14, 18, 40)
result = self.scenario._make_samples(count=2, interval=60, result = list(self.scenario._make_samples(count=2, interval=60,
timestamp=test_timestamp) timestamp=test_timestamp))
self.assertEqual(1, len(result))
expected = {"counter_name": "cpu_util", expected = {"counter_name": "cpu_util",
"counter_type": "gauge", "counter_type": "gauge",
"counter_unit": "%", "counter_unit": "%",
"counter_volume": 1, "counter_volume": 1,
"resource_id": "fake_resource", "resource_id": "fake_resource",
"timestamp": test_timestamp.isoformat()} "timestamp": test_timestamp.isoformat()}
self.assertEqual(expected, result[0]) self.assertEqual(expected, result[0][0])
samples_int = (parser.parse(result[0]["timestamp"]) - samples_int = (parser.parse(result[0][0]["timestamp"]) -
parser.parse(result[1]["timestamp"])).seconds parser.parse(result[0][1]["timestamp"])).seconds
self.assertEqual(60, samples_int)
def test__make_samples_batch_size(self):
self.scenario.generate_random_name = mock.Mock(
return_value="fake_resource")
test_timestamp = datetime.datetime(2015, 10, 20, 14, 18, 40)
result = list(self.scenario._make_samples(count=4, interval=60,
batch_size=2,
timestamp=test_timestamp))
self.assertEqual(2, len(result))
expected = {"counter_name": "cpu_util",
"counter_type": "gauge",
"counter_unit": "%",
"counter_volume": 1,
"resource_id": "fake_resource",
"timestamp": test_timestamp.isoformat()}
self.assertEqual(expected, result[0][0])
samples_int = (parser.parse(result[0][-1]["timestamp"]) -
parser.parse(result[1][0]["timestamp"])).seconds
# NOTE(idegtiarov): here we check that interval between last sample in
# first batch and first sample in second batch is equal 60 sec.
self.assertEqual(60, samples_int) self.assertEqual(60, samples_int)
def test__make_timestamp_query(self): def test__make_timestamp_query(self):