gnocchi: Use Dynamic Aggregates API

Switch to using the Dynamic Aggregates API as the Metric Aggregation
API is deprecated.

When using the Dynamic Aggregates API, any aggregation using rates
can use the underlying base measures for the aggregation rather than
the rate, for example:

    (aggregation rate:mean (metric cpu mean))

The tuple of data for each record returned via this API is encapsulated
with information about the aggregation used so adapt the sanitization
function to deal with this and the formatting of the metrics measures
API as well.

Change-Id: I4f631d224404460138f4050b1b981d577b592544
Closes-Bug: 1946793
(cherry picked from commit 74eadfbd58)
(cherry picked from commit e6d55b1ea9)
(cherry picked from commit 27ee81a37a)
This commit is contained in:
James Page 2022-02-18 09:51:41 +00:00 committed by Yadnesh Kulkarni
parent 85571a65b8
commit 4820d595bf
5 changed files with 230 additions and 124 deletions

View File

@ -200,10 +200,15 @@ class AggregationMetricByResourcesLookupRule(AlarmGnocchiThresholdRule):
'interface': conf.service_credentials.interface,
'region_name': conf.service_credentials.region_name})
try:
gnocchi_client.metric.aggregation(
metrics=rule.metric,
query=query,
aggregation=rule.aggregation_method,
gnocchi_client.aggregates.fetch(
operations=[
'aggregate', rule.aggregation_method,
[
'metric', rule.metric,
rule.aggregation_method.lstrip('rate:')
]
],
search=query,
needed_overlap=0,
start="-1 day",
stop="now",

View File

@ -47,6 +47,12 @@ class GnocchiBase(threshold.ThresholdEvaluator):
# but not a stddev-of-stddevs).
# TODO(sileht): support alarm['exclude_outliers']
LOG.debug('sanitize stats %s', statistics)
# NOTE(jamespage)
# Dynamic Aggregates are returned in a dict struct so
# check for this first.
if isinstance(statistics, dict):
# Pop array of measures from aggregated subdict
statistics = statistics['measures']['aggregated']
statistics = [stats[VALUE] for stats in statistics
if stats[GRANULARITY] == rule['granularity']]
if not statistics:
@ -93,6 +99,16 @@ class GnocchiResourceThresholdEvaluator(GnocchiBase):
class GnocchiAggregationMetricsThresholdEvaluator(GnocchiBase):
def _statistics(self, rule, start, end):
try:
_operations = [
'aggregate', rule['aggregation_method']
]
for metric in rule['metrics']:
_operations.append(
[
'metric', metric,
rule['aggregation_method'].lstrip('rate:')
]
)
# FIXME(sileht): In case of a heat autoscaling stack decide to
# delete an instance, the gnocchi metrics associated to this
# instance will be no more updated and when the alarm will ask
@ -101,11 +117,10 @@ class GnocchiAggregationMetricsThresholdEvaluator(GnocchiBase):
# So temporary set 'needed_overlap' to 0 to disable the
# gnocchi checks about missing points. For more detail see:
# https://bugs.launchpad.net/gnocchi/+bug/1479429
return self._gnocchi_client.metric.aggregation(
metrics=rule['metrics'],
return self._gnocchi_client.aggregates.fetch(
operations=_operations,
granularity=rule['granularity'],
start=start, stop=end,
aggregation=rule['aggregation_method'],
needed_overlap=0)
except exceptions.MetricNotFound:
raise threshold.InsufficientDataError(
@ -128,24 +143,28 @@ class GnocchiAggregationMetricsThresholdEvaluator(GnocchiBase):
class GnocchiAggregationResourcesThresholdEvaluator(GnocchiBase):
def _statistics(self, rule, start, end):
# FIXME(sileht): In case of a heat autoscaling stack decide to
# delete an instance, the gnocchi metrics associated to this
# instance will be no more updated and when the alarm will ask
# for the aggregation, gnocchi will raise a 'No overlap'
# exception.
# So temporary set 'needed_overlap' to 0 to disable the
# gnocchi checks about missing points. For more detail see:
# https://bugs.launchpad.net/gnocchi/+bug/1479429
try:
return self._gnocchi_client.metric.aggregation(
metrics=rule['metric'],
# FIXME(sileht): In case of a heat autoscaling stack decide to
# delete an instance, the gnocchi metrics associated to this
# instance will be no more updated and when the alarm will ask
# for the aggregation, gnocchi will raise a 'No overlap'
# exception.
# So temporary set 'needed_overlap' to 0 to disable the
# gnocchi checks about missing points. For more detail see:
# https://bugs.launchpad.net/gnocchi/+bug/1479429
return self._gnocchi_client.aggregates.fetch(
operations=[
'aggregate', rule['aggregation_method'],
[
'metric', rule['metric'],
rule['aggregation_method'].lstrip('rate:')
]
],
granularity=rule['granularity'],
query=json.loads(rule['query']),
search=json.loads(rule['query']),
resource_type=rule["resource_type"],
start=start, stop=end,
aggregation=rule['aggregation_method'],
needed_overlap=0,
)
needed_overlap=0)
except exceptions.MetricNotFound:
raise threshold.InsufficientDataError(
'metric %s does not exists' % rule['metric'], [])

View File

@ -2505,14 +2505,16 @@ class TestAlarmsRuleGnocchi(TestAlarmsBase):
self.post_json('/alarms', params=json, headers=self.auth_headers)
self.assertEqual([mock.call(
aggregation='count',
metrics='ameter',
operations=[
'aggregate', 'count',
['metric', 'ameter', 'count']
],
needed_overlap=0,
start="-1 day",
stop="now",
query=expected_query,
search=expected_query,
resource_type="instance")],
c.metric.aggregation.mock_calls),
c.aggregates.fetch.mock_calls),
alarms = list(self.alarm_conn.get_alarms(enabled=False))
self.assertEqual(1, len(alarms))

View File

@ -18,7 +18,6 @@ import fixtures
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six import moves
from aodh import evaluator
from aodh.evaluator import composite
@ -37,9 +36,17 @@ class BaseCompositeEvaluate(base.TestEvaluatorBase):
super(BaseCompositeEvaluate, self).setUp()
@staticmethod
def _get_gnocchi_stats(granularity, values):
def _get_gnocchi_stats(granularity, values, aggregated=False):
now = timeutils.utcnow_ts()
return [[six.text_type(now - len(values) * granularity),
if aggregated:
return {
'measures': {
'aggregated':
[[str(now - len(values) * granularity),
granularity, value] for value in values]
}
}
return [[str(now - len(values) * granularity),
granularity, value] for value in values]
@staticmethod
@ -238,7 +245,7 @@ class CompositeTest(BaseCompositeEvaluate):
def test_simple_insufficient(self):
self._set_all_alarms('ok')
self.client.metric.aggregation.return_value = []
self.client.aggregates.fetch.return_value = []
self.client.metric.get_measures.return_value = []
self._evaluate_all_alarms()
self._assert_all_alarms('insufficient data')
@ -289,26 +296,36 @@ class CompositeTest(BaseCompositeEvaluate):
# self.sub_rule4: ok
# self.sub_rule5: ok
# self.sub_rule6: alarm
maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v
for v in moves.xrange(1, 5)])
avgs1 = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v
for v in moves.xrange(1, 4)])
avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v
for v in moves.xrange(1, 6)])
gavgs1 = self._get_gnocchi_stats(60, [self.sub_rule4['threshold']
- v for v in moves.xrange(1, 6)])
gmaxs = self._get_gnocchi_stats(300, [self.sub_rule5['threshold'] + v
for v in moves.xrange(1, 5)])
gavgs2 = self._get_gnocchi_stats(50, [self.sub_rule6['threshold'] + v
for v in moves.xrange(1, 7)])
maxs = self._get_gnocchi_stats(
60, [self.sub_rule2['threshold'] + v
for v in range(1, 5)],
aggregated=True)
avgs1 = self._get_gnocchi_stats(
60, [self.sub_rule3['threshold'] + v
for v in range(1, 4)])
avgs2 = self._get_gnocchi_stats(
60, [self.sub_rule1['threshold'] - v
for v in range(1, 6)],
aggregated=True)
gavgs1 = self._get_gnocchi_stats(
60, [self.sub_rule4['threshold']
- v for v in range(1, 6)],
aggregated=True)
gmaxs = self._get_gnocchi_stats(
300, [self.sub_rule5['threshold'] + v
for v in range(1, 5)],
aggregated=True)
gavgs2 = self._get_gnocchi_stats(
50, [self.sub_rule6['threshold'] + v
for v in range(1, 7)],
aggregated=True)
self.client.metric.get_measures.side_effect = [gavgs1]
self.client.metric.aggregation.side_effect = [maxs, avgs1, avgs2,
gmaxs, gavgs2]
self.client.aggregates.fetch.side_effect = [maxs, avgs1, avgs2,
gmaxs, gavgs2]
self.evaluator.evaluate(alarm)
self.assertEqual(1, self.client.metric.get_measures.call_count)
self.assertEqual(5, self.client.metric.aggregation.call_count)
self.assertEqual(5, self.client.aggregates.fetch.call_count)
self.assertEqual('alarm', alarm.state)
expected = mock.call(
alarm, 'ok',
@ -322,12 +339,14 @@ class CompositeTest(BaseCompositeEvaluate):
def test_alarm_with_short_circuit_logic(self):
alarm = self.alarms[1]
# self.sub_rule1: alarm
avgs = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] + v
for v in moves.xrange(1, 6)])
self.client.metric.aggregation.side_effect = [avgs]
avgs = self._get_gnocchi_stats(
60, [self.sub_rule1['threshold'] + v
for v in range(1, 6)],
aggregated=True)
self.client.aggregates.fetch.side_effect = [avgs]
self.evaluator.evaluate(alarm)
self.assertEqual('alarm', alarm.state)
self.assertEqual(1, self.client.metric.aggregation.call_count)
self.assertEqual(1, self.client.aggregates.fetch.call_count)
expected = mock.call(self.alarms[1], 'insufficient data',
*self._reason(
'alarm',
@ -338,12 +357,14 @@ class CompositeTest(BaseCompositeEvaluate):
def test_ok_with_short_circuit_logic(self):
alarm = self.alarms[2]
# self.sub_rule1: ok
avgs = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v
for v in moves.xrange(1, 6)])
self.client.metric.aggregation.side_effect = [avgs]
avgs = self._get_gnocchi_stats(
60, [self.sub_rule1['threshold'] - v
for v in range(1, 6)],
aggregated=True)
self.client.aggregates.fetch.side_effect = [avgs]
self.evaluator.evaluate(alarm)
self.assertEqual('ok', alarm.state)
self.assertEqual(1, self.client.metric.aggregation.call_count)
self.assertEqual(1, self.client.aggregates.fetch.call_count)
expected = mock.call(self.alarms[2], 'insufficient data',
*self._reason(
'ok',
@ -353,13 +374,19 @@ class CompositeTest(BaseCompositeEvaluate):
def test_unknown_state_with_sub_rules_trending_state(self):
alarm = self.alarms[0]
maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v
for v in moves.xrange(-1, 4)])
avgs = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v
for v in moves.xrange(-1, 3)])
avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v
for v in moves.xrange(1, 6)])
self.client.metric.aggregation.side_effect = [avgs2, maxs, avgs]
maxs = self._get_gnocchi_stats(
60, [self.sub_rule2['threshold'] + v
for v in range(-1, 4)],
aggregated=True)
avgs = self._get_gnocchi_stats(
60, [self.sub_rule3['threshold'] + v
for v in range(-1, 3)],
aggregated=True)
avgs2 = self._get_gnocchi_stats(
60, [self.sub_rule1['threshold'] - v
for v in range(1, 6)],
aggregated=True)
self.client.aggregates.fetch.side_effect = [avgs2, maxs, avgs]
self.evaluator.evaluate(alarm)
self.assertEqual('alarm', alarm.state)
@ -376,13 +403,19 @@ class CompositeTest(BaseCompositeEvaluate):
alarm.repeat_actions = True
alarm.state = 'ok'
maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v
for v in moves.xrange(-1, 4)])
avgs = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v
for v in moves.xrange(-1, 3)])
avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v
for v in moves.xrange(1, 6)])
self.client.metric.aggregation.side_effect = [avgs2, maxs, avgs]
maxs = self._get_gnocchi_stats(
60, [self.sub_rule2['threshold'] + v
for v in range(-1, 4)],
aggregated=True)
avgs = self._get_gnocchi_stats(
60, [self.sub_rule3['threshold'] + v
for v in range(-1, 3)],
aggregated=True)
avgs2 = self._get_gnocchi_stats(
60, [self.sub_rule1['threshold'] - v
for v in range(1, 6)],
aggregated=True)
self.client.aggregates.fetch.side_effect = [avgs2, maxs, avgs]
self.evaluator.evaluate(alarm)
self.assertEqual('ok', alarm.state)
@ -398,13 +431,19 @@ class CompositeTest(BaseCompositeEvaluate):
def test_known_state_with_sub_rules_trending_state_and_not_repeat(self):
alarm = self.alarms[2]
alarm.state = 'ok'
maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v
for v in moves.xrange(-1, 4)])
avgs = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v
for v in moves.xrange(-1, 3)])
avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v
for v in moves.xrange(1, 6)])
self.client.metric.aggregation.side_effect = [avgs2, maxs, avgs]
maxs = self._get_gnocchi_stats(
60, [self.sub_rule2['threshold'] + v
for v in range(-1, 4)],
aggregated=True)
avgs = self._get_gnocchi_stats(
60, [self.sub_rule3['threshold'] + v
for v in range(-1, 3)],
aggregated=True)
avgs2 = self._get_gnocchi_stats(
60, [self.sub_rule1['threshold'] - v
for v in range(1, 6)],
aggregated=True)
self.client.aggregates.fetch.side_effect = [avgs2, maxs, avgs]
self.evaluator.evaluate(alarm)
self.assertEqual('ok', alarm.state)
self.assertEqual([], self.notifier.notify.mock_calls)

View File

@ -112,9 +112,9 @@ class TestGnocchiEvaluatorBase(base.TestEvaluatorBase):
comparison_operator='gt',
threshold=80.0,
evaluation_periods=6,
aggregation_method='mean',
aggregation_method='rate:mean',
granularity=50,
metric='cpu_util',
metric='cpu',
resource_type='instance',
query='{"=": {"server_group": '
'"my_autoscaling_group"}}')
@ -124,9 +124,17 @@ class TestGnocchiEvaluatorBase(base.TestEvaluatorBase):
super(TestGnocchiEvaluatorBase, self).setUp()
@staticmethod
def _get_stats(granularity, values):
def _get_stats(granularity, values, aggregated=False):
now = timeutils.utcnow_ts()
return [[six.text_type(now - len(values) * granularity),
if aggregated:
return {
'measures': {
'aggregated':
[[str(now - len(values) * granularity),
granularity, value] for value in values]
}
}
return [[str(now - len(values) * granularity),
granularity, value] for value in values]
@staticmethod
@ -438,13 +446,17 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
self.alarms = self.prepared_alarms[1:2]
def test_retry_transient_api_failure(self):
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(4)])
self.client.metric.aggregation.side_effect = [Exception('boom'), maxs]
maxs = self._get_stats(
300,
[self.alarms[0].rule['threshold'] + v
for v in range(4)],
aggregated=True
)
self.client.aggregates.fetch.side_effect = [Exception('boom'), maxs]
self._test_retry_transient()
def test_simple_insufficient(self):
self.client.metric.aggregation.return_value = []
self.client.aggregates.fetch.return_value = []
self._test_simple_insufficient()
@mock.patch.object(timeutils, 'utcnow')
@ -452,26 +464,33 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0)
self._set_all_alarms('ok')
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(4)])
self.client.metric.aggregation.side_effect = [maxs]
maxs = self._get_stats(
300,
[self.alarms[0].rule['threshold'] - v
for v in range(4)],
aggregated=True
)
self.client.aggregates.fetch.side_effect = [maxs]
self._evaluate_all_alarms()
start_alarm = "2015-01-26T12:32:00"
end = "2015-01-26T12:57:00"
self.assertEqual(
[mock.call.aggregation(aggregation='max',
metrics=[
'0bb1604d-1193-4c0a-b4b8-74b170e35e83',
'9ddc209f-42f8-41e1-b8f1-8804f59c4053'],
granularity=300,
needed_overlap=0,
start=start_alarm, stop=end)],
self.client.metric.mock_calls)
[mock.call.fetch(
operations=[
'aggregate', 'max',
['metric', '0bb1604d-1193-4c0a-b4b8-74b170e35e83', 'max'], # noqa
['metric', '9ddc209f-42f8-41e1-b8f1-8804f59c4053', 'max'], # noqa
],
granularity=300,
needed_overlap=0,
start=start_alarm, stop=end)],
self.client.aggregates.mock_calls)
self._assert_all_alarms('alarm')
expected = [mock.call(alarm) for alarm in self.alarms]
update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual(expected, update_calls)
maxs = maxs['measures']['aggregated']
reason = ('Transition to alarm due to 4 samples outside '
'threshold, most recent: %s' % maxs[-1][2])
@ -482,13 +501,14 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
def test_simple_alarm_clear(self):
self._set_all_alarms('alarm')
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(1, 5)])
self.client.metric.aggregation.side_effect = [maxs]
for v in range(1, 5)], aggregated=True)
self.client.aggregates.fetch.side_effect = [maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('ok')
expected = [mock.call(alarm) for alarm in self.alarms]
update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual(expected, update_calls)
maxs = maxs['measures']['aggregated']
reason = ('Transition to ok due to 4 samples inside '
'threshold, most recent: %s' % maxs[-1][2])
reason_data = self._reason_data('inside', 4, maxs[-1][2])
@ -498,9 +518,13 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
def test_equivocal_from_known_state_ok(self):
self._set_all_alarms('ok')
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(-1, 3)])
self.client.metric.aggregation.side_effect = [maxs]
maxs = self._get_stats(
300,
[self.alarms[0].rule['threshold'] - v
for v in range(-1, 3)],
aggregated=True
)
self.client.aggregates.fetch.side_effect = [maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('ok')
self.assertEqual(
@ -512,18 +536,26 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
self._set_all_alarms('ok')
# NOTE(sileht): we add one useless point (81.0) that will break
# the test if the evaluator doesn't remove it.
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(-1, 5)])
self.client.metric.aggregation.side_effect = [maxs]
maxs = self._get_stats(
300,
[self.alarms[0].rule['threshold'] - v
for v in range(-1, 5)],
aggregated=True
)
self.client.aggregates.fetch.side_effect = [maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('alarm')
def test_equivocal_from_known_state_and_repeat_actions(self):
self._set_all_alarms('ok')
self.alarms[0].repeat_actions = True
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(-1, 3)])
self.client.metric.aggregation.side_effect = [maxs]
maxs = self._get_stats(
300,
[self.alarms[0].rule['threshold'] - v
for v in range(-1, 3)],
aggregated=True
)
self.client.aggregates.fetch.side_effect = [maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('ok')
self.assertEqual([], self.storage_conn.update_alarm.call_args_list)
@ -537,9 +569,12 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
self._set_all_alarms('alarm')
self.alarms[0].repeat_actions = True
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(4)])
self.client.metric.aggregation.side_effect = [maxs]
maxs = self._get_stats(
300, [self.alarms[0].rule['threshold'] - v
for v in range(4)],
aggregated=True
)
self.client.aggregates.fetch.side_effect = [maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('alarm')
self.assertEqual([], self.storage_conn.update_alarm.call_args_list)
@ -560,13 +595,13 @@ class TestGnocchiAggregationResourcesThresholdEvaluate(
def test_retry_transient_api_failure(self):
avgs2 = self._get_stats(50, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(6)])
self.client.metric.aggregation.side_effect = [
for v in range(6)], aggregated=True)
self.client.aggregates.fetch.side_effect = [
exceptions.ClientException(500, "error"), avgs2]
self._test_retry_transient()
def test_simple_insufficient(self):
self.client.metric.aggregation.return_value = []
self.client.aggregates.fetch.return_value = []
self._test_simple_insufficient()
@mock.patch.object(timeutils, 'utcnow')
@ -574,25 +609,30 @@ class TestGnocchiAggregationResourcesThresholdEvaluate(
utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0)
self._set_all_alarms('ok')
avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(1, 7)])
for v in range(1, 7)], aggregated=True)
self.client.metric.aggregation.side_effect = [avgs]
self.client.aggregates.fetch.side_effect = [avgs]
self._evaluate_all_alarms()
start_alarm = "2015-01-26T12:51:10"
end = "2015-01-26T12:57:00"
self.assertEqual(
[mock.call.aggregation(aggregation='mean', metrics='cpu_util',
granularity=50,
needed_overlap=0,
query={"=": {"server_group":
"my_autoscaling_group"}},
resource_type='instance',
start=start_alarm, stop=end)],
self.client.metric.mock_calls)
[mock.call.fetch(
operations=[
'aggregate', 'rate:mean',
['metric', 'cpu', 'mean'],
],
granularity=50,
search={"=": {"server_group":
"my_autoscaling_group"}},
resource_type='instance',
start=start_alarm, stop=end,
needed_overlap=0)],
self.client.aggregates.mock_calls)
self._assert_all_alarms('alarm')
expected = [mock.call(alarm) for alarm in self.alarms]
update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual(expected, update_calls)
avgs = avgs['measures']['aggregated']
reason = ('Transition to alarm due to 6 samples outside '
'threshold, most recent: %s' % avgs[-1][2])
reason_data = self._reason_data('outside', 6, avgs[-1][2])
@ -602,13 +642,14 @@ class TestGnocchiAggregationResourcesThresholdEvaluate(
def test_simple_alarm_clear(self):
self._set_all_alarms('alarm')
avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(6)])
self.client.metric.aggregation.side_effect = [avgs]
for v in range(6)], aggregated=True)
self.client.aggregates.fetch.side_effect = [avgs]
self._evaluate_all_alarms()
self._assert_all_alarms('ok')
expected = [mock.call(alarm) for alarm in self.alarms]
update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual(expected, update_calls)
avgs = avgs['measures']['aggregated']
reason = ('Transition to ok due to 6 samples inside '
'threshold, most recent: %s' % avgs[-1][2])
reason_data = self._reason_data('inside', 6, avgs[-1][2])
@ -618,8 +659,8 @@ class TestGnocchiAggregationResourcesThresholdEvaluate(
def test_equivocal_from_known_state_ok(self):
self._set_all_alarms('ok')
avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(6)])
self.client.metric.aggregation.side_effect = [avgs]
for v in range(6)], aggregated=True)
self.client.aggregates.fetch.side_effect = [avgs]
self._evaluate_all_alarms()
self._assert_all_alarms('ok')
self.assertEqual(