Merge "alarming: add gnocchi alarm rules"
This commit is contained in:
commit
df10a87ea7
@ -13,10 +13,10 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from keystoneclient.v2_0 import client as ksclient
|
||||
from oslo_config import cfg
|
||||
|
||||
from ceilometer.agent import base
|
||||
from ceilometer import keystone_client
|
||||
from ceilometer.openstack.common import log
|
||||
|
||||
OPTS = [
|
||||
@ -31,8 +31,6 @@ OPTS = [
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(OPTS, group='polling')
|
||||
cfg.CONF.import_group('service_credentials', 'ceilometer.service')
|
||||
cfg.CONF.import_opt('http_timeout', 'ceilometer.service')
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
@ -48,17 +46,7 @@ class AgentManager(base.AgentManager):
|
||||
|
||||
def interval_task(self, task):
|
||||
try:
|
||||
self.keystone = ksclient.Client(
|
||||
username=cfg.CONF.service_credentials.os_username,
|
||||
password=cfg.CONF.service_credentials.os_password,
|
||||
tenant_id=cfg.CONF.service_credentials.os_tenant_id,
|
||||
tenant_name=cfg.CONF.service_credentials.os_tenant_name,
|
||||
cacert=cfg.CONF.service_credentials.os_cacert,
|
||||
auth_url=cfg.CONF.service_credentials.os_auth_url,
|
||||
region_name=cfg.CONF.service_credentials.os_region_name,
|
||||
insecure=cfg.CONF.service_credentials.insecure,
|
||||
timeout=cfg.CONF.http_timeout,)
|
||||
self.keystone = keystone_client.get_client()
|
||||
except Exception as e:
|
||||
self.keystone = e
|
||||
|
||||
super(AgentManager, self).interval_task(task)
|
||||
|
226
ceilometer/alarm/evaluator/gnocchi.py
Normal file
226
ceilometer/alarm/evaluator/gnocchi.py
Normal file
@ -0,0 +1,226 @@
|
||||
#
|
||||
# Copyright 2015 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import operator
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import timeutils
|
||||
import requests
|
||||
import six.moves
|
||||
|
||||
from ceilometer.alarm import evaluator
|
||||
from ceilometer.i18n import _
|
||||
from ceilometer import keystone_client
|
||||
from ceilometer.openstack.common import log
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
COMPARATORS = {
|
||||
'gt': operator.gt,
|
||||
'lt': operator.lt,
|
||||
'ge': operator.ge,
|
||||
'le': operator.le,
|
||||
'eq': operator.eq,
|
||||
'ne': operator.ne,
|
||||
}
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('gnocchi_url',
|
||||
default="http://localhost:8041",
|
||||
help='URL to Gnocchi.'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(OPTS, group="alarms")
|
||||
cfg.CONF.import_opt('http_timeout', 'ceilometer.service')
|
||||
|
||||
|
||||
class GnocchiThresholdEvaluator(evaluator.Evaluator):
|
||||
|
||||
# the sliding evaluation window is extended to allow
|
||||
# for reporting/ingestion lag
|
||||
look_back = 1
|
||||
|
||||
# minimum number of datapoints within sliding window to
|
||||
# avoid unknown state
|
||||
quorum = 1
|
||||
|
||||
def __init__(self, notifier):
|
||||
super(GnocchiThresholdEvaluator, self).__init__(notifier)
|
||||
self.gnocchi_url = cfg.CONF.alarms.gnocchi_url
|
||||
self._ks_client = None
|
||||
|
||||
@property
|
||||
def ks_client(self):
|
||||
if self._ks_client is None:
|
||||
self._ks_client = keystone_client.get_client()
|
||||
return self._ks_client
|
||||
|
||||
def _get_headers(self, content_type="application/json"):
|
||||
return {
|
||||
'Content-Type': content_type,
|
||||
'X-Auth-Token': self.ks_client.auth_token,
|
||||
}
|
||||
|
||||
def _statistics(self, alarm, start, end):
|
||||
"""Retrieve statistics over the current window."""
|
||||
if alarm.type == 'gnocchi_metrics_threshold':
|
||||
url = ("%s/v1/metric_aggregation/?"
|
||||
"aggregation=%s&start=%s&end=%s&%s") % (
|
||||
self.gnocchi_url,
|
||||
alarm.rule['aggregation_method'],
|
||||
start, end,
|
||||
"&".join("metric=%s" % m
|
||||
for m in alarm.rule['metrics']))
|
||||
|
||||
elif alarm.type == 'gnocchi_resources_threshold':
|
||||
url = ("%s/v1/resource/%s/%s/metric/%s/measures?"
|
||||
"aggregation=%s&start=%s&end=%s") % (
|
||||
self.gnocchi_url,
|
||||
alarm.rule['resource_type'],
|
||||
alarm.rule['resource_constraint'],
|
||||
alarm.rule['metric'],
|
||||
alarm.rule['aggregation_method'],
|
||||
start, end)
|
||||
|
||||
LOG.debug(_('stats query %s') % url)
|
||||
try:
|
||||
r = requests.get(url, headers=self._get_headers())
|
||||
except Exception:
|
||||
LOG.exception(_('alarm stats retrieval failed'))
|
||||
return []
|
||||
if int(r.status_code / 100) != 2:
|
||||
LOG.exception(_('alarm stats retrieval failed: %s') % r.text)
|
||||
return []
|
||||
else:
|
||||
return jsonutils.loads(r.text)
|
||||
|
||||
@classmethod
|
||||
def _bound_duration(cls, alarm):
|
||||
"""Bound the duration of the statistics query."""
|
||||
now = timeutils.utcnow()
|
||||
# when exclusion of weak datapoints is enabled, we extend
|
||||
# the look-back period so as to allow a clearer sample count
|
||||
# trend to be established
|
||||
window = (alarm.rule['granularity'] *
|
||||
(alarm.rule['evaluation_periods'] + cls.look_back))
|
||||
start = now - datetime.timedelta(seconds=window)
|
||||
LOG.debug(_('query stats from %(start)s to '
|
||||
'%(now)s') % {'start': start, 'now': now})
|
||||
return start.isoformat(), now.isoformat()
|
||||
|
||||
def _sufficient(self, alarm, statistics):
|
||||
"""Check for the sufficiency of the data for evaluation.
|
||||
|
||||
Ensure there is sufficient data for evaluation, transitioning to
|
||||
unknown otherwise.
|
||||
"""
|
||||
sufficient = len(statistics) >= self.quorum
|
||||
if not sufficient and alarm.state != evaluator.UNKNOWN:
|
||||
reason = _('%d datapoints are unknown') % alarm.rule[
|
||||
'evaluation_periods']
|
||||
reason_data = self._reason_data('unknown',
|
||||
alarm.rule['evaluation_periods'],
|
||||
None)
|
||||
self._refresh(alarm, evaluator.UNKNOWN, reason, reason_data)
|
||||
return sufficient
|
||||
|
||||
@staticmethod
|
||||
def _reason_data(disposition, count, most_recent):
|
||||
"""Create a reason data dictionary for this evaluator type."""
|
||||
return {'type': 'threshold', 'disposition': disposition,
|
||||
'count': count, 'most_recent': most_recent}
|
||||
|
||||
@classmethod
|
||||
def _reason(cls, alarm, statistics, distilled, state):
|
||||
"""Fabricate reason string."""
|
||||
count = len(statistics)
|
||||
disposition = 'inside' if state == evaluator.OK else 'outside'
|
||||
last = statistics[-1]
|
||||
transition = alarm.state != state
|
||||
reason_data = cls._reason_data(disposition, count, last)
|
||||
if transition:
|
||||
return (_('Transition to %(state)s due to %(count)d samples'
|
||||
' %(disposition)s threshold, most recent:'
|
||||
' %(most_recent)s')
|
||||
% dict(reason_data, state=state)), reason_data
|
||||
return (_('Remaining as %(state)s due to %(count)d samples'
|
||||
' %(disposition)s threshold, most recent: %(most_recent)s')
|
||||
% dict(reason_data, state=state)), reason_data
|
||||
|
||||
def _transition(self, alarm, statistics, compared):
|
||||
"""Transition alarm state if necessary.
|
||||
|
||||
The transition rules are currently hardcoded as:
|
||||
|
||||
- transitioning from a known state requires an unequivocal
|
||||
set of datapoints
|
||||
|
||||
- transitioning from unknown is on the basis of the most
|
||||
recent datapoint if equivocal
|
||||
|
||||
Ultimately this will be policy-driven.
|
||||
"""
|
||||
distilled = all(compared)
|
||||
unequivocal = distilled or not any(compared)
|
||||
unknown = alarm.state == evaluator.UNKNOWN
|
||||
continuous = alarm.repeat_actions
|
||||
|
||||
if unequivocal:
|
||||
state = evaluator.ALARM if distilled else evaluator.OK
|
||||
reason, reason_data = self._reason(alarm, statistics,
|
||||
distilled, state)
|
||||
if alarm.state != state or continuous:
|
||||
self._refresh(alarm, state, reason, reason_data)
|
||||
elif unknown or continuous:
|
||||
trending_state = evaluator.ALARM if compared[-1] else evaluator.OK
|
||||
state = trending_state if unknown else alarm.state
|
||||
reason, reason_data = self._reason(alarm, statistics,
|
||||
distilled, state)
|
||||
self._refresh(alarm, state, reason, reason_data)
|
||||
|
||||
@staticmethod
|
||||
def _select_best_granularity(alarm, statistics):
|
||||
"""Return the datapoints that correspond to the alarm granularity"""
|
||||
# TODO(sileht): if there's no direct match, but there is an archive
|
||||
# policy with granularity that's an even divisor or the period,
|
||||
# we could potentially do a mean-of-means (or max-of-maxes or whatever,
|
||||
# but not a stddev-of-stddevs).
|
||||
return [stats[2] for stats in statistics
|
||||
if stats[1] == alarm.rule['granularity']]
|
||||
|
||||
def evaluate(self, alarm):
|
||||
if not self.within_time_constraint(alarm):
|
||||
LOG.debug(_('Attempted to evaluate alarm %s, but it is not '
|
||||
'within its time constraint.') % alarm.alarm_id)
|
||||
return
|
||||
|
||||
start, end = self._bound_duration(alarm)
|
||||
statistics = self._statistics(alarm, start, end)
|
||||
statistics = self._select_best_granularity(alarm, statistics)
|
||||
|
||||
if self._sufficient(alarm, statistics):
|
||||
def _compare(value):
|
||||
op = COMPARATORS[alarm.rule['comparison_operator']]
|
||||
limit = alarm.rule['threshold']
|
||||
LOG.debug(_('comparing value %(value)s against threshold'
|
||||
' %(limit)s') %
|
||||
{'value': value, 'limit': limit})
|
||||
return op(value, limit)
|
||||
|
||||
self._transition(alarm,
|
||||
statistics,
|
||||
list(six.moves.map(_compare, statistics)))
|
146
ceilometer/api/controllers/v2/alarm_rules/gnocchi.py
Normal file
146
ceilometer/api/controllers/v2/alarm_rules/gnocchi.py
Normal file
@ -0,0 +1,146 @@
|
||||
#
|
||||
# Copyright 2015 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
import requests
|
||||
import uuid
|
||||
import wsme
|
||||
from wsme import types as wtypes
|
||||
|
||||
from ceilometer.api.controllers.v2 import base
|
||||
from ceilometer.api.controllers.v2 import utils as v2_utils
|
||||
from ceilometer import keystone_client
|
||||
|
||||
|
||||
cfg.CONF.import_opt('gnocchi_url', 'ceilometer.alarm.evaluator.gnocchi',
|
||||
group="alarms")
|
||||
|
||||
|
||||
class GnocchiUnavailable(Exception):
|
||||
code = 503
|
||||
|
||||
|
||||
class AlarmGnocchiThresholdRule(base.AlarmRule):
|
||||
comparison_operator = base.AdvEnum('comparison_operator', str,
|
||||
'lt', 'le', 'eq', 'ne', 'ge', 'gt',
|
||||
default='eq')
|
||||
"The comparison against the alarm threshold"
|
||||
|
||||
threshold = wsme.wsattr(float, mandatory=True)
|
||||
"The threshold of the alarm"
|
||||
|
||||
aggregation_method = wsme.wsattr(wtypes.text, mandatory=True)
|
||||
"The aggregation_method to compare to the threshold"
|
||||
|
||||
evaluation_periods = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1)
|
||||
"The number of historical periods to evaluate the threshold"
|
||||
|
||||
granularity = wsme.wsattr(wtypes.IntegerType(minimum=1), default=60)
|
||||
"The time range in seconds over which query"
|
||||
|
||||
@classmethod
|
||||
def validate_alarm(cls, alarm):
|
||||
alarm_rule = getattr(alarm, "%s_rule" % alarm.type)
|
||||
aggregation_method = alarm_rule.aggregation_method
|
||||
if aggregation_method not in cls._get_aggregation_methods():
|
||||
raise base.ClientSideError(
|
||||
'aggregation_method should be in %s not %s' % (
|
||||
cls._get_aggregation_methods(), aggregation_method))
|
||||
|
||||
# NOTE(sileht): once cachetools is in the requirements
|
||||
# enable it
|
||||
# @cachetools.ttl_cache(maxsize=1, ttl=600)
|
||||
@staticmethod
|
||||
def _get_aggregation_methods():
|
||||
ks_client = keystone_client.get_client()
|
||||
gnocchi_url = cfg.CONF.alarms.gnocchi_url
|
||||
headers = {'Content-Type': "application/json",
|
||||
'X-Auth-Token': ks_client.auth_token}
|
||||
try:
|
||||
r = requests.get("%s/v1/capabilities" % gnocchi_url,
|
||||
headers=headers)
|
||||
except requests.ConnectionError as e:
|
||||
raise GnocchiUnavailable(e)
|
||||
if r.status_code // 200 != 1:
|
||||
raise GnocchiUnavailable(r.text)
|
||||
|
||||
return jsonutils.loads(r.text).get('aggregation_methods', [])
|
||||
|
||||
|
||||
class AlarmGnocchiMetricOfResourcesThresholdRule(AlarmGnocchiThresholdRule):
|
||||
metric = wsme.wsattr(wtypes.text, mandatory=True)
|
||||
"The name of the metric"
|
||||
|
||||
resource_constraint = wsme.wsattr(wtypes.text, mandatory=True)
|
||||
"The id of a resource or a expression to select multiple resources"
|
||||
|
||||
resource_type = wsme.wsattr(wtypes.text, mandatory=True)
|
||||
"The resource type"
|
||||
|
||||
def as_dict(self):
|
||||
rule = self.as_dict_from_keys(['granularity', 'comparison_operator',
|
||||
'threshold', 'aggregation_method',
|
||||
'evaluation_periods',
|
||||
'metric',
|
||||
'resource_constraint',
|
||||
'resource_type'])
|
||||
return rule
|
||||
|
||||
@classmethod
|
||||
def validate_alarm(cls, alarm):
|
||||
super(AlarmGnocchiMetricOfResourcesThresholdRule,
|
||||
cls).validate_alarm(alarm)
|
||||
|
||||
rule = alarm.gnocchi_resources_threshold_rule
|
||||
try:
|
||||
uuid.UUID(rule.resource_constraint)
|
||||
except Exception:
|
||||
auth_project = v2_utils.get_auth_project(alarm.project_id)
|
||||
if auth_project:
|
||||
# NOTE(sileht): when we have more complex query allowed
|
||||
# this should be enhanced to ensure the constraint are still
|
||||
# scoped to auth_project
|
||||
rule.resource_constraint += (
|
||||
u'\u2227project_id=%s' % auth_project)
|
||||
else:
|
||||
ks_client = keystone_client.get_client()
|
||||
gnocchi_url = cfg.CONF.alarms.gnocchi_url
|
||||
headers = {'Content-Type': "application/json",
|
||||
'X-Auth-Token': ks_client.auth_token}
|
||||
try:
|
||||
r = requests.get("%s/v1/resource/%s/%s" % (
|
||||
gnocchi_url, rule.resource_type,
|
||||
rule.resource_constraint),
|
||||
headers=headers)
|
||||
except requests.ConnectionError as e:
|
||||
raise GnocchiUnavailable(e)
|
||||
if r.status_code == 404:
|
||||
raise base.EntityNotFound('gnocchi resource',
|
||||
rule.resource_constraint)
|
||||
elif r.status_code // 200 != 1:
|
||||
raise base.ClientSideError(r.body, status_code=r.status_code)
|
||||
|
||||
|
||||
class AlarmGnocchiMetricsThresholdRule(AlarmGnocchiThresholdRule):
|
||||
metrics = wsme.wsattr([wtypes.text], mandatory=True)
|
||||
"A list of metric Ids"
|
||||
|
||||
def as_dict(self):
|
||||
rule = self.as_dict_from_keys(['granularity', 'comparison_operator',
|
||||
'threshold', 'aggregation_method',
|
||||
'evaluation_periods',
|
||||
'metrics'])
|
||||
return rule
|
@ -199,8 +199,10 @@ class Alarm(base.Base):
|
||||
|
||||
def get_description(self):
|
||||
rule = getattr(self, '%s_rule' % self.type, None)
|
||||
if not self._description and rule:
|
||||
return six.text_type(rule.default_description)
|
||||
if not self._description:
|
||||
if hasattr(rule, 'default_description'):
|
||||
return six.text_type(rule.default_description)
|
||||
return "%s alarm rule" % self.type
|
||||
return self._description
|
||||
|
||||
def set_description(self, value):
|
||||
|
34
ceilometer/keystone_client.py
Normal file
34
ceilometer/keystone_client.py
Normal file
@ -0,0 +1,34 @@
|
||||
#
|
||||
# Copyright 2015 eNovance <licensing@enovance.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from keystoneclient.v2_0 import client as ksclient
|
||||
from oslo_config import cfg
|
||||
|
||||
cfg.CONF.import_group('service_credentials', 'ceilometer.service')
|
||||
cfg.CONF.import_opt('http_timeout', 'ceilometer.service')
|
||||
|
||||
|
||||
def get_client():
|
||||
return ksclient.Client(
|
||||
username=cfg.CONF.service_credentials.os_username,
|
||||
password=cfg.CONF.service_credentials.os_password,
|
||||
tenant_id=cfg.CONF.service_credentials.os_tenant_id,
|
||||
tenant_name=cfg.CONF.service_credentials.os_tenant_name,
|
||||
cacert=cfg.CONF.service_credentials.os_cacert,
|
||||
auth_url=cfg.CONF.service_credentials.os_auth_url,
|
||||
region_name=cfg.CONF.service_credentials.os_region_name,
|
||||
insecure=cfg.CONF.service_credentials.insecure,
|
||||
timeout=cfg.CONF.http_timeout,)
|
@ -104,6 +104,7 @@ def list_opts():
|
||||
itertools.chain(ceilometer.alarm.notifier.rest.OPTS,
|
||||
ceilometer.alarm.service.OPTS,
|
||||
ceilometer.alarm.rpc.OPTS,
|
||||
ceilometer.alarm.evaluator.gnocchi.OPTS,
|
||||
ceilometer.api.controllers.v2.alarms.ALARM_API_OPTS,
|
||||
ceilometer.cmd.alarm.OPTS)),
|
||||
('api',
|
||||
|
358
ceilometer/tests/alarm/evaluator/test_gnocchi.py
Normal file
358
ceilometer/tests/alarm/evaluator/test_gnocchi.py
Normal file
@ -0,0 +1,358 @@
|
||||
#
|
||||
# Copyright 2015 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import unittest
|
||||
import uuid
|
||||
|
||||
import mock
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import timeutils
|
||||
from oslotest import mockpatch
|
||||
import pytz
|
||||
import six
|
||||
from six import moves
|
||||
|
||||
from ceilometer.alarm.evaluator import gnocchi
|
||||
from ceilometer.alarm.storage import models
|
||||
from ceilometer.tests.alarm.evaluator import base
|
||||
from ceilometer.tests import constants
|
||||
|
||||
|
||||
class FakeResponse(object):
|
||||
def __init__(self, code, data):
|
||||
if code == 200:
|
||||
self.values = [d[2] for d in data]
|
||||
else:
|
||||
self.values = []
|
||||
self.text = jsonutils.dumps(data)
|
||||
self.status_code = code
|
||||
|
||||
|
||||
class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
|
||||
EVALUATOR = gnocchi.GnocchiThresholdEvaluator
|
||||
|
||||
def setUp(self):
|
||||
ks_client = mock.Mock(auth_token='fake_token')
|
||||
ks_client.users.find.return_value = 'gnocchi'
|
||||
self.useFixture(mockpatch.Patch(
|
||||
'keystoneclient.v2_0.client.Client',
|
||||
return_value=ks_client))
|
||||
|
||||
super(TestGnocchiThresholdEvaluate, self).setUp()
|
||||
|
||||
self.useFixture(mockpatch.Patch('ceilometerclient.client.get_client',
|
||||
return_value=self.api_client))
|
||||
self.requests = self.useFixture(mockpatch.Patch(
|
||||
'ceilometer.alarm.evaluator.gnocchi.requests')).mock
|
||||
|
||||
def prepare_alarms(self):
|
||||
self.alarms = [
|
||||
models.Alarm(name='instance_running_hot',
|
||||
description='instance_running_hot',
|
||||
type='gnocchi_resources_threshold',
|
||||
enabled=True,
|
||||
user_id='foobar',
|
||||
project_id='snafu',
|
||||
alarm_id=str(uuid.uuid4()),
|
||||
state='insufficient data',
|
||||
state_timestamp=constants.MIN_DATETIME,
|
||||
timestamp=constants.MIN_DATETIME,
|
||||
insufficient_data_actions=[],
|
||||
ok_actions=[],
|
||||
alarm_actions=[],
|
||||
repeat_actions=False,
|
||||
time_constraints=[],
|
||||
rule=dict(
|
||||
comparison_operator='gt',
|
||||
threshold=80.0,
|
||||
evaluation_periods=5,
|
||||
aggregation_method='mean',
|
||||
granularity=60,
|
||||
metric='cpu_util',
|
||||
resource_type='instance',
|
||||
resource_constraint='my_instance')
|
||||
),
|
||||
models.Alarm(name='group_running_idle',
|
||||
description='group_running_idle',
|
||||
type='gnocchi_metrics_threshold',
|
||||
enabled=True,
|
||||
user_id='foobar',
|
||||
project_id='snafu',
|
||||
state='insufficient data',
|
||||
state_timestamp=constants.MIN_DATETIME,
|
||||
timestamp=constants.MIN_DATETIME,
|
||||
insufficient_data_actions=[],
|
||||
ok_actions=[],
|
||||
alarm_actions=[],
|
||||
repeat_actions=False,
|
||||
alarm_id=str(uuid.uuid4()),
|
||||
time_constraints=[],
|
||||
rule=dict(
|
||||
comparison_operator='le',
|
||||
threshold=10.0,
|
||||
evaluation_periods=4,
|
||||
aggregation_method='max',
|
||||
granularity=300,
|
||||
metrics=['0bb1604d-1193-4c0a-b4b8-74b170e35e83',
|
||||
'9ddc209f-42f8-41e1-b8f1-8804f59c4053']),
|
||||
),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _get_stats(granularity, values):
|
||||
now = timeutils.utcnow_ts()
|
||||
return FakeResponse(
|
||||
200, [[six.text_type(now - len(values) * granularity),
|
||||
granularity, value] for value in values])
|
||||
|
||||
@staticmethod
|
||||
def _reason_data(disposition, count, most_recent):
|
||||
return {'type': 'threshold', 'disposition': disposition,
|
||||
'count': count, 'most_recent': most_recent}
|
||||
|
||||
def _set_all_rules(self, field, value):
|
||||
for alarm in self.alarms:
|
||||
alarm.rule[field] = value
|
||||
|
||||
def test_retry_transient_api_failure(self):
|
||||
means = self._get_stats(60, [self.alarms[0].rule['threshold'] - v
|
||||
for v in moves.xrange(5)])
|
||||
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] + v
|
||||
for v in moves.xrange(1, 4)])
|
||||
self.requests.get.side_effect = [Exception('boom'),
|
||||
FakeResponse(500, "error"),
|
||||
means,
|
||||
maxs]
|
||||
self._evaluate_all_alarms()
|
||||
self._assert_all_alarms('insufficient data')
|
||||
self._evaluate_all_alarms()
|
||||
self._assert_all_alarms('ok')
|
||||
|
||||
def test_simple_insufficient(self):
|
||||
self._set_all_alarms('ok')
|
||||
self.requests.get.return_value = FakeResponse(200, [])
|
||||
self._evaluate_all_alarms()
|
||||
self._assert_all_alarms('insufficient data')
|
||||
expected = [mock.call(alarm.alarm_id, state='insufficient data')
|
||||
for alarm in self.alarms]
|
||||
update_calls = self.api_client.alarms.set_state.call_args_list
|
||||
self.assertEqual(expected, update_calls)
|
||||
expected = [mock.call(
|
||||
alarm,
|
||||
'ok',
|
||||
('%d datapoints are unknown'
|
||||
% alarm.rule['evaluation_periods']),
|
||||
self._reason_data('unknown',
|
||||
alarm.rule['evaluation_periods'],
|
||||
None))
|
||||
for alarm in self.alarms]
|
||||
self.assertEqual(expected, self.notifier.notify.call_args_list)
|
||||
|
||||
@mock.patch.object(timeutils, 'utcnow')
|
||||
def test_simple_alarm_trip(self, utcnow):
|
||||
utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0)
|
||||
self._set_all_alarms('ok')
|
||||
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
|
||||
for v in moves.xrange(1, 6)])
|
||||
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
|
||||
for v in moves.xrange(4)])
|
||||
|
||||
self.requests.get.side_effect = [avgs, maxs]
|
||||
self._evaluate_all_alarms()
|
||||
|
||||
expected_headers = {'X-Auth-Token': 'fake_token',
|
||||
'Content-Type': 'application/json'}
|
||||
|
||||
start_alarm1 = "2015-01-26T12:51:00"
|
||||
start_alarm2 = "2015-01-26T12:32:00"
|
||||
end = "2015-01-26T12:57:00"
|
||||
|
||||
self.assertEqual([
|
||||
mock.call('http://localhost:8041/v1/resource/instance/my_instance/'
|
||||
'metric/cpu_util/measures?aggregation=mean'
|
||||
'&start=' + start_alarm1 + '&end=' + end,
|
||||
headers=expected_headers),
|
||||
mock.call('http://localhost:8041/v1/metric_aggregation/?'
|
||||
'aggregation=max&start=' + start_alarm2 + '&end=' + end +
|
||||
'&metric=0bb1604d-1193-4c0a-b4b8-74b170e35e83'
|
||||
'&metric=9ddc209f-42f8-41e1-b8f1-8804f59c4053',
|
||||
headers=expected_headers)],
|
||||
self.requests.get.mock_calls)
|
||||
|
||||
self._assert_all_alarms('alarm')
|
||||
expected = [mock.call(alarm.alarm_id, state='alarm')
|
||||
for alarm in self.alarms]
|
||||
update_calls = self.api_client.alarms.set_state.call_args_list
|
||||
self.assertEqual(expected, update_calls)
|
||||
reasons = ['Transition to alarm due to 5 samples outside'
|
||||
' threshold, most recent: %s' % avgs.values[-1],
|
||||
'Transition to alarm due to 4 samples outside'
|
||||
' threshold, most recent: %s' % maxs.values[-1]]
|
||||
reason_datas = [self._reason_data('outside', 5, avgs.values[-1]),
|
||||
self._reason_data('outside', 4, maxs.values[-1])]
|
||||
expected = [mock.call(alarm, 'ok', reason, reason_data)
|
||||
for alarm, reason, reason_data
|
||||
in zip(self.alarms, reasons, reason_datas)]
|
||||
self.assertEqual(expected, self.notifier.notify.call_args_list)
|
||||
|
||||
def test_simple_alarm_clear(self):
|
||||
self._set_all_alarms('alarm')
|
||||
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] - v
|
||||
for v in moves.xrange(5)])
|
||||
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] + v
|
||||
for v in moves.xrange(1, 5)])
|
||||
self.requests.get.side_effect = [avgs, maxs]
|
||||
self._evaluate_all_alarms()
|
||||
self._assert_all_alarms('ok')
|
||||
expected = [mock.call(alarm.alarm_id, state='ok')
|
||||
for alarm in self.alarms]
|
||||
update_calls = self.api_client.alarms.set_state.call_args_list
|
||||
self.assertEqual(expected, update_calls)
|
||||
reasons = ['Transition to ok due to 5 samples inside'
|
||||
' threshold, most recent: %s' % avgs.values[-1],
|
||||
'Transition to ok due to 4 samples inside'
|
||||
' threshold, most recent: %s' % maxs.values[-1]]
|
||||
reason_datas = [self._reason_data('inside', 5, avgs.values[-1]),
|
||||
self._reason_data('inside', 4, maxs.values[-1])]
|
||||
expected = [mock.call(alarm, 'alarm', reason, reason_data)
|
||||
for alarm, reason, reason_data
|
||||
in zip(self.alarms, reasons, reason_datas)]
|
||||
self.assertEqual(expected, self.notifier.notify.call_args_list)
|
||||
|
||||
def test_equivocal_from_known_state(self):
|
||||
self._set_all_alarms('ok')
|
||||
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
|
||||
for v in moves.xrange(5)])
|
||||
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
|
||||
for v in moves.xrange(-1, 3)])
|
||||
self.requests.get.side_effect = [avgs, maxs]
|
||||
self._evaluate_all_alarms()
|
||||
self._assert_all_alarms('ok')
|
||||
self.assertEqual(
|
||||
[],
|
||||
self.api_client.alarms.set_state.call_args_list)
|
||||
self.assertEqual([], self.notifier.notify.call_args_list)
|
||||
|
||||
def test_equivocal_from_known_state_and_repeat_actions(self):
|
||||
self._set_all_alarms('ok')
|
||||
self.alarms[1].repeat_actions = True
|
||||
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
|
||||
for v in moves.xrange(5)])
|
||||
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
|
||||
for v in moves.xrange(-1, 3)])
|
||||
self.requests.get.side_effect = [avgs, maxs]
|
||||
self._evaluate_all_alarms()
|
||||
self._assert_all_alarms('ok')
|
||||
self.assertEqual([], self.api_client.alarms.set_state.call_args_list)
|
||||
reason = ('Remaining as ok due to 4 samples inside'
|
||||
' threshold, most recent: 8.0')
|
||||
reason_datas = self._reason_data('inside', 4, 8.0)
|
||||
expected = [mock.call(self.alarms[1], 'ok', reason, reason_datas)]
|
||||
self.assertEqual(expected, self.notifier.notify.call_args_list)
|
||||
|
||||
def test_unequivocal_from_known_state_and_repeat_actions(self):
|
||||
self._set_all_alarms('alarm')
|
||||
self.alarms[1].repeat_actions = True
|
||||
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
|
||||
for v in moves.xrange(1, 6)])
|
||||
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
|
||||
for v in moves.xrange(4)])
|
||||
self.requests.get.side_effect = [avgs, maxs]
|
||||
self._evaluate_all_alarms()
|
||||
self._assert_all_alarms('alarm')
|
||||
self.assertEqual([], self.api_client.alarms.set_state.call_args_list)
|
||||
reason = ('Remaining as alarm due to 4 samples outside'
|
||||
' threshold, most recent: 7.0')
|
||||
reason_datas = self._reason_data('outside', 4, 7.0)
|
||||
expected = [mock.call(self.alarms[1], 'alarm',
|
||||
reason, reason_datas)]
|
||||
self.assertEqual(expected, self.notifier.notify.call_args_list)
|
||||
|
||||
def test_state_change_and_repeat_actions(self):
|
||||
self._set_all_alarms('ok')
|
||||
self.alarms[0].repeat_actions = True
|
||||
self.alarms[1].repeat_actions = True
|
||||
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
|
||||
for v in moves.xrange(1, 6)])
|
||||
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
|
||||
for v in moves.xrange(4)])
|
||||
self.requests.get.side_effect = [avgs, maxs]
|
||||
self._evaluate_all_alarms()
|
||||
self._assert_all_alarms('alarm')
|
||||
expected = [mock.call(alarm.alarm_id, state='alarm')
|
||||
for alarm in self.alarms]
|
||||
update_calls = self.api_client.alarms.set_state.call_args_list
|
||||
self.assertEqual(expected, update_calls)
|
||||
reasons = ['Transition to alarm due to 5 samples outside'
|
||||
' threshold, most recent: %s' % avgs.values[-1],
|
||||
'Transition to alarm due to 4 samples outside'
|
||||
' threshold, most recent: %s' % maxs.values[-1]]
|
||||
reason_datas = [self._reason_data('outside', 5, avgs.values[-1]),
|
||||
self._reason_data('outside', 4, maxs.values[-1])]
|
||||
expected = [mock.call(alarm, 'ok', reason, reason_data)
|
||||
for alarm, reason, reason_data
|
||||
in zip(self.alarms, reasons, reason_datas)]
|
||||
self.assertEqual(expected, self.notifier.notify.call_args_list)
|
||||
|
||||
def test_equivocal_from_unknown(self):
|
||||
self._set_all_alarms('insufficient data')
|
||||
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
|
||||
for v in moves.xrange(1, 6)])
|
||||
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
|
||||
for v in moves.xrange(4)])
|
||||
self.requests.get.side_effect = [avgs, maxs]
|
||||
self._evaluate_all_alarms()
|
||||
self._assert_all_alarms('alarm')
|
||||
expected = [mock.call(alarm.alarm_id, state='alarm')
|
||||
for alarm in self.alarms]
|
||||
update_calls = self.api_client.alarms.set_state.call_args_list
|
||||
self.assertEqual(expected, update_calls)
|
||||
reasons = ['Transition to alarm due to 5 samples outside'
|
||||
' threshold, most recent: %s' % avgs.values[-1],
|
||||
'Transition to alarm due to 4 samples outside'
|
||||
' threshold, most recent: %s' % maxs.values[-1]]
|
||||
reason_datas = [self._reason_data('outside', 5, avgs.values[-1]),
|
||||
self._reason_data('outside', 4, maxs.values[-1])]
|
||||
expected = [mock.call(alarm, 'insufficient data',
|
||||
reason, reason_data)
|
||||
for alarm, reason, reason_data
|
||||
in zip(self.alarms, reasons, reason_datas)]
|
||||
self.assertEqual(expected, self.notifier.notify.call_args_list)
|
||||
|
||||
@unittest.skipIf(six.PY3,
|
||||
"the ceilometer base class is not python 3 ready")
|
||||
@mock.patch.object(timeutils, 'utcnow')
|
||||
def test_no_state_change_outside_time_constraint(self, mock_utcnow):
|
||||
self._set_all_alarms('ok')
|
||||
self.alarms[0].time_constraints = [
|
||||
{'name': 'test',
|
||||
'description': 'test',
|
||||
'start': '0 11 * * *', # daily at 11:00
|
||||
'duration': 10800, # 3 hours
|
||||
'timezone': 'Europe/Ljubljana'}
|
||||
]
|
||||
self.alarms[1].time_constraints = self.alarms[0].time_constraints
|
||||
dt = datetime.datetime(2014, 1, 1, 15, 0, 0,
|
||||
tzinfo=pytz.timezone('Europe/Ljubljana'))
|
||||
mock_utcnow.return_value = dt.astimezone(pytz.UTC)
|
||||
self.requests.get.return_value = []
|
||||
self._evaluate_all_alarms()
|
||||
self._assert_all_alarms('ok')
|
||||
update_calls = self.api_client.alarms.set_state.call_args_list
|
||||
self.assertEqual([], update_calls,
|
||||
"Alarm should not change state if the current "
|
||||
" time is outside its time constraint.")
|
||||
self.assertEqual([], self.notifier.notify.call_args_list)
|
@ -20,6 +20,7 @@ import uuid
|
||||
import mock
|
||||
import oslo.messaging.conffixture
|
||||
from oslo_serialization import jsonutils
|
||||
import requests
|
||||
import six
|
||||
from six import moves
|
||||
|
||||
@ -147,12 +148,68 @@ class TestAlarms(v2.FunctionalTest,
|
||||
time_constraints=[],
|
||||
rule=dict(alarm_ids=['a', 'b'],
|
||||
operator='or'),
|
||||
)]:
|
||||
),
|
||||
models.Alarm(name='name5',
|
||||
type='gnocchi_resources_threshold',
|
||||
enabled=True,
|
||||
alarm_id='e',
|
||||
description='e',
|
||||
state='insufficient data',
|
||||
severity='critical',
|
||||
state_timestamp=constants.MIN_DATETIME,
|
||||
timestamp=constants.MIN_DATETIME,
|
||||
ok_actions=[],
|
||||
insufficient_data_actions=[],
|
||||
alarm_actions=[],
|
||||
repeat_actions=True,
|
||||
user_id=self.auth_headers['X-User-Id'],
|
||||
project_id=self.auth_headers['X-Project-Id'],
|
||||
time_constraints=[],
|
||||
rule=dict(comparison_operator='gt',
|
||||
threshold=2.0,
|
||||
aggregation_method='mean',
|
||||
granularity=60,
|
||||
evaluation_periods=1,
|
||||
metric='meter.test',
|
||||
resource_type='instance',
|
||||
resource_constraint=(
|
||||
'6841c175-d7c4-4bc2-bc7a-1c7832271b8f'),
|
||||
)
|
||||
),
|
||||
models.Alarm(name='name6',
|
||||
type='gnocchi_metrics_threshold',
|
||||
enabled=True,
|
||||
alarm_id='f',
|
||||
description='f',
|
||||
state='insufficient data',
|
||||
severity='critical',
|
||||
state_timestamp=constants.MIN_DATETIME,
|
||||
timestamp=constants.MIN_DATETIME,
|
||||
ok_actions=[],
|
||||
insufficient_data_actions=[],
|
||||
alarm_actions=[],
|
||||
repeat_actions=True,
|
||||
user_id=self.auth_headers['X-User-Id'],
|
||||
project_id=self.auth_headers['X-Project-Id'],
|
||||
time_constraints=[],
|
||||
rule=dict(comparison_operator='gt',
|
||||
threshold=2.0,
|
||||
aggregation_method='mean',
|
||||
evaluation_periods=1,
|
||||
granularity=60,
|
||||
metrics=[
|
||||
'41869681-5776-46d6-91ed-cccc43b6e4e3',
|
||||
'a1fb80f4-c242-4f57-87c6-68f47521059e']
|
||||
),
|
||||
),
|
||||
]:
|
||||
|
||||
self.alarm_conn.update_alarm(alarm)
|
||||
|
||||
@staticmethod
|
||||
def _add_default_threshold_rule(alarm):
|
||||
if 'exclude_outliers' not in alarm['threshold_rule']:
|
||||
if (alarm['type'] == 'threshold' and
|
||||
'exclude_outliers' not in alarm['threshold_rule']):
|
||||
alarm['threshold_rule']['exclude_outliers'] = False
|
||||
|
||||
def _verify_alarm(self, json, alarm, expected_name=None):
|
||||
@ -168,8 +225,9 @@ class TestAlarms(v2.FunctionalTest,
|
||||
|
||||
def test_list_alarms(self):
|
||||
data = self.get_json('/alarms')
|
||||
self.assertEqual(4, len(data))
|
||||
self.assertEqual(set(['name1', 'name2', 'name3', 'name4']),
|
||||
self.assertEqual(6, len(data))
|
||||
self.assertEqual(set(['name1', 'name2', 'name3', 'name4', 'name5',
|
||||
'name6']),
|
||||
set(r['name'] for r in data))
|
||||
self.assertEqual(set(['meter.test', 'meter.mine']),
|
||||
set(r['threshold_rule']['meter_name']
|
||||
@ -177,6 +235,10 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.assertEqual(set(['or']),
|
||||
set(r['combination_rule']['operator']
|
||||
for r in data if 'combination_rule' in r))
|
||||
self.assertEqual(set(['meter.test']),
|
||||
set(r['gnocchi_resources_threshold_rule']['metric']
|
||||
for r in data
|
||||
if 'gnocchi_resources_threshold_rule' in r))
|
||||
|
||||
def test_alarms_query_with_timestamp(self):
|
||||
date_time = datetime.datetime(2012, 7, 2, 10, 41)
|
||||
@ -341,7 +403,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
q=[{'field': field,
|
||||
'op': 'eq',
|
||||
'value': project}])
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
_test('project')
|
||||
_test('project_id')
|
||||
@ -406,7 +468,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
% field.split('/', 1)[-1],
|
||||
resp.json['error_message']['faultstring'])
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_invalid_alarm_time_constraint_start(self):
|
||||
json = {
|
||||
@ -427,7 +489,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.post_json('/alarms', params=json, expect_errors=True, status=400,
|
||||
headers=self.auth_headers)
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_duplicate_time_constraint_name(self):
|
||||
json = {
|
||||
@ -456,7 +518,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
"Time constraint names must be unique for a given alarm.",
|
||||
resp.json['error_message']['faultstring'])
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_alarm_null_time_constraint(self):
|
||||
json = {
|
||||
@ -490,7 +552,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.post_json('/alarms', params=json, expect_errors=True, status=400,
|
||||
headers=self.auth_headers)
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_invalid_alarm_time_constraint_timezone(self):
|
||||
json = {
|
||||
@ -512,7 +574,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.post_json('/alarms', params=json, expect_errors=True, status=400,
|
||||
headers=self.auth_headers)
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_invalid_alarm_period(self):
|
||||
json = {
|
||||
@ -530,7 +592,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.post_json('/alarms', params=json, expect_errors=True, status=400,
|
||||
headers=self.auth_headers)
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_null_threshold_rule(self):
|
||||
json = {
|
||||
@ -563,7 +625,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.assertIn(expected_err_msg,
|
||||
resp.json['error_message']['faultstring'])
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_invalid_alarm_input_state(self):
|
||||
json = {
|
||||
@ -583,7 +645,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.assertIn(expected_err_msg,
|
||||
resp.json['error_message']['faultstring'])
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_invalid_alarm_input_severity(self):
|
||||
json = {
|
||||
@ -604,7 +666,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.assertIn(expected_err_msg,
|
||||
resp.json['error_message']['faultstring'])
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_invalid_alarm_input_comparison_operator(self):
|
||||
json = {
|
||||
@ -625,7 +687,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.assertIn(expected_err_msg,
|
||||
resp.json['error_message']['faultstring'])
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_invalid_alarm_input_type(self):
|
||||
json = {
|
||||
@ -646,7 +708,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.assertIn(expected_err_msg,
|
||||
resp.json['error_message']['faultstring'])
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_invalid_alarm_input_enabled_str(self):
|
||||
json = {
|
||||
@ -670,7 +732,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.assertEqual(expected_err_msg,
|
||||
resp.json['error_message']['faultstring'])
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_invalid_alarm_input_enabled_int(self):
|
||||
json = {
|
||||
@ -694,7 +756,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.assertEqual(expected_err_msg,
|
||||
resp.json['error_message']['faultstring'])
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_invalid_combination_alarm_input_operator(self):
|
||||
json = {
|
||||
@ -720,7 +782,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.assertIn(expected_err_msg,
|
||||
resp.json['error_message']['faultstring'])
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_invalid_alarm_query(self):
|
||||
json = {
|
||||
@ -739,7 +801,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.post_json('/alarms', params=json, expect_errors=True, status=400,
|
||||
headers=self.auth_headers)
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_invalid_alarm_query_field_type(self):
|
||||
json = {
|
||||
@ -763,7 +825,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
fault_string = resp_string['error_message']['faultstring']
|
||||
self.assertTrue(fault_string.startswith(expected_error_message))
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
|
||||
def test_post_invalid_alarm_have_multiple_rules(self):
|
||||
json = {
|
||||
@ -784,7 +846,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
resp = self.post_json('/alarms', params=json, expect_errors=True,
|
||||
status=400, headers=self.auth_headers)
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
self.assertEqual('threshold_rule and combination_rule cannot '
|
||||
'be set at the same time',
|
||||
resp.json['error_message']['faultstring'])
|
||||
@ -808,7 +870,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
resp = self.post_json('/alarms', params=json, expect_errors=True,
|
||||
status=400, headers=self.auth_headers)
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
self.assertEqual(
|
||||
'Unknown argument: "timestamp": '
|
||||
'not valid for this resource',
|
||||
@ -847,7 +909,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
resp = self.post_json('/alarms', params=json, status=400,
|
||||
headers=self.auth_headers)
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(4, len(alarms))
|
||||
self.assertEqual(6, len(alarms))
|
||||
self.assertEqual(error_message,
|
||||
resp.json['error_message']['faultstring'])
|
||||
|
||||
@ -915,7 +977,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
self.post_json('/alarms', params=json, status=201,
|
||||
headers=self.auth_headers)
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(5, len(alarms))
|
||||
self.assertEqual(7, len(alarms))
|
||||
for alarm in alarms:
|
||||
if alarm.name == 'added_alarm_defaults':
|
||||
for key in to_check:
|
||||
@ -1868,18 +1930,18 @@ class TestAlarms(v2.FunctionalTest,
|
||||
|
||||
def test_delete_alarm(self):
|
||||
data = self.get_json('/alarms')
|
||||
self.assertEqual(4, len(data))
|
||||
self.assertEqual(6, len(data))
|
||||
|
||||
resp = self.delete('/alarms/%s' % data[0]['alarm_id'],
|
||||
headers=self.auth_headers,
|
||||
status=204)
|
||||
self.assertEqual('', resp.body)
|
||||
alarms = list(self.alarm_conn.get_alarms())
|
||||
self.assertEqual(3, len(alarms))
|
||||
self.assertEqual(5, len(alarms))
|
||||
|
||||
def test_get_state_alarm(self):
|
||||
data = self.get_json('/alarms')
|
||||
self.assertEqual(4, len(data))
|
||||
self.assertEqual(6, len(data))
|
||||
|
||||
resp = self.get_json('/alarms/%s/state' % data[0]['alarm_id'],
|
||||
headers=self.auth_headers)
|
||||
@ -1887,7 +1949,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
|
||||
def test_set_state_alarm(self):
|
||||
data = self.get_json('/alarms')
|
||||
self.assertEqual(4, len(data))
|
||||
self.assertEqual(6, len(data))
|
||||
|
||||
resp = self.put_json('/alarms/%s/state' % data[0]['alarm_id'],
|
||||
headers=self.auth_headers,
|
||||
@ -1899,7 +1961,7 @@ class TestAlarms(v2.FunctionalTest,
|
||||
|
||||
def test_set_invalid_state_alarm(self):
|
||||
data = self.get_json('/alarms')
|
||||
self.assertEqual(4, len(data))
|
||||
self.assertEqual(6, len(data))
|
||||
|
||||
self.put_json('/alarms/%s/state' % data[0]['alarm_id'],
|
||||
headers=self.auth_headers,
|
||||
@ -2299,6 +2361,140 @@ class TestAlarms(v2.FunctionalTest,
|
||||
'project_id', 'timestamp', 'type',
|
||||
'user_id']).issubset(payload.keys()))
|
||||
|
||||
@mock.patch('ceilometer.keystone_client.get_client')
|
||||
def test_post_gnocchi_resources_alarm(self, __):
|
||||
json = {
|
||||
'enabled': False,
|
||||
'name': 'name_post',
|
||||
'state': 'ok',
|
||||
'type': 'gnocchi_resources_threshold',
|
||||
'severity': 'critical',
|
||||
'ok_actions': ['http://something/ok'],
|
||||
'alarm_actions': ['http://something/alarm'],
|
||||
'insufficient_data_actions': ['http://something/no'],
|
||||
'repeat_actions': True,
|
||||
'gnocchi_resources_threshold_rule': {
|
||||
'metric': 'ameter',
|
||||
'comparison_operator': 'le',
|
||||
'aggregation_method': 'count',
|
||||
'threshold': 50,
|
||||
'evaluation_periods': 3,
|
||||
'granularity': 180,
|
||||
'resource_type': 'instance',
|
||||
'resource_constraint': '209ef69c-c10c-4efb-90ff-46f4b2d90d2e',
|
||||
}
|
||||
}
|
||||
|
||||
with mock.patch('requests.get',
|
||||
side_effect=requests.ConnectionError()):
|
||||
resp = self.post_json('/alarms', params=json,
|
||||
headers=self.auth_headers,
|
||||
expect_errors=True)
|
||||
self.assertEqual(503, resp.status_code, resp.body)
|
||||
|
||||
with mock.patch('requests.get',
|
||||
return_value=mock.Mock(status_code=500,
|
||||
body="my_custom_error",
|
||||
text="my_custom_error")):
|
||||
resp = self.post_json('/alarms', params=json,
|
||||
headers=self.auth_headers,
|
||||
expect_errors=True)
|
||||
self.assertEqual(503, resp.status_code, resp.body)
|
||||
self.assertIn('my_custom_error',
|
||||
resp.json['error_message']['faultstring'])
|
||||
|
||||
cap_result = mock.Mock(status_code=201,
|
||||
text=jsonutils.dumps(
|
||||
{'aggregation_methods': ['count']}))
|
||||
resource_result = mock.Mock(status_code=200, text="blob")
|
||||
with mock.patch('requests.get', side_effect=[cap_result,
|
||||
resource_result]
|
||||
) as gnocchi_get:
|
||||
self.post_json('/alarms', params=json, headers=self.auth_headers)
|
||||
|
||||
expected = [mock.call('http://localhost:8041/v1/capabilities',
|
||||
headers=mock.ANY),
|
||||
mock.call('http://localhost:8041/v1/resource/instance/'
|
||||
'209ef69c-c10c-4efb-90ff-46f4b2d90d2e',
|
||||
headers=mock.ANY)]
|
||||
self.assertEqual(expected, gnocchi_get.mock_calls)
|
||||
|
||||
alarms = list(self.alarm_conn.get_alarms(enabled=False))
|
||||
self.assertEqual(1, len(alarms))
|
||||
self._verify_alarm(json, alarms[0])
|
||||
|
||||
@mock.patch('ceilometer.keystone_client.get_client')
|
||||
def test_post_gnocchi_metrics_alarm(self, __):
|
||||
json = {
|
||||
'enabled': False,
|
||||
'name': 'name_post',
|
||||
'state': 'ok',
|
||||
'type': 'gnocchi_metrics_threshold',
|
||||
'severity': 'critical',
|
||||
'ok_actions': ['http://something/ok'],
|
||||
'alarm_actions': ['http://something/alarm'],
|
||||
'insufficient_data_actions': ['http://something/no'],
|
||||
'repeat_actions': True,
|
||||
'gnocchi_metrics_threshold_rule': {
|
||||
'metrics': ['b3d9d8ab-05e8-439f-89ad-5e978dd2a5eb',
|
||||
'009d4faf-c275-46f0-8f2d-670b15bac2b0'],
|
||||
'comparison_operator': 'le',
|
||||
'aggregation_method': 'count',
|
||||
'threshold': 50,
|
||||
'evaluation_periods': 3,
|
||||
'granularity': 180,
|
||||
}
|
||||
}
|
||||
|
||||
cap_result = mock.Mock(status_code=200,
|
||||
text=jsonutils.dumps(
|
||||
{'aggregation_methods': ['count']}))
|
||||
with mock.patch('requests.get', return_value=cap_result):
|
||||
self.post_json('/alarms', params=json, headers=self.auth_headers)
|
||||
|
||||
alarms = list(self.alarm_conn.get_alarms(enabled=False))
|
||||
self.assertEqual(1, len(alarms))
|
||||
self._verify_alarm(json, alarms[0])
|
||||
|
||||
@mock.patch('ceilometer.keystone_client.get_client')
|
||||
def test_post_gnocchi_resources_alarm_project_constraint(self, __):
|
||||
json = {
|
||||
'enabled': False,
|
||||
'name': 'name_post',
|
||||
'state': 'ok',
|
||||
'type': 'gnocchi_resources_threshold',
|
||||
'severity': 'critical',
|
||||
'ok_actions': ['http://something/ok'],
|
||||
'alarm_actions': ['http://something/alarm'],
|
||||
'insufficient_data_actions': ['http://something/no'],
|
||||
'repeat_actions': True,
|
||||
'gnocchi_resources_threshold_rule': {
|
||||
'metric': 'ameter',
|
||||
'comparison_operator': 'le',
|
||||
'aggregation_method': 'count',
|
||||
'threshold': 50,
|
||||
'evaluation_periods': 3,
|
||||
'granularity': 180,
|
||||
'resource_type': 'instance',
|
||||
'resource_constraint': u'server_group=as',
|
||||
}
|
||||
}
|
||||
|
||||
cap_result = mock.Mock(status_code=201,
|
||||
text=jsonutils.dumps(
|
||||
{'aggregation_methods': ['count']}))
|
||||
resource_result = mock.Mock(status_code=200, text="blob")
|
||||
with mock.patch('requests.get',
|
||||
side_effect=[cap_result, resource_result]):
|
||||
self.post_json('/alarms', params=json, headers=self.auth_headers)
|
||||
|
||||
alarms = list(self.alarm_conn.get_alarms(enabled=False))
|
||||
self.assertEqual(1, len(alarms))
|
||||
|
||||
json['gnocchi_resources_threshold_rule']['resource_constraint'] += (
|
||||
u'\u2227project_id=%s' % self.auth_headers['X-Project-Id'])
|
||||
self._verify_alarm(json, alarms[0])
|
||||
|
||||
|
||||
class TestAlarmsQuotas(v2.FunctionalTest,
|
||||
tests_db.MixinTestsWithBackendScenarios):
|
||||
|
@ -100,6 +100,12 @@ Alarms
|
||||
.. autotype:: ceilometer.api.controllers.v2.alarm_rules.combination.AlarmCombinationRule
|
||||
:members:
|
||||
|
||||
.. autotype:: ceilometer.api.controllers.v2.alarm_rules.gnocchi.AlarmGnocchiMetricOfResourcesThresholdRule
|
||||
:members:
|
||||
|
||||
.. autotype:: ceilometer.api.controllers.v2.alarm_rules.gnocchi.AlarmGnocchiMetricsThresholdRule
|
||||
:members:
|
||||
|
||||
.. autotype:: ceilometer.api.controllers.v2.alarms.AlarmTimeConstraint
|
||||
:members:
|
||||
|
||||
|
@ -295,10 +295,14 @@ ceilometer.event.publisher =
|
||||
ceilometer.alarm.rule =
|
||||
threshold = ceilometer.api.controllers.v2.alarm_rules.threshold:AlarmThresholdRule
|
||||
combination = ceilometer.api.controllers.v2.alarm_rules.combination:AlarmCombinationRule
|
||||
gnocchi_resources_threshold = ceilometer.api.controllers.v2.alarm_rules.gnocchi:AlarmGnocchiMetricOfResourcesThresholdRule
|
||||
gnocchi_metrics_threshold = ceilometer.api.controllers.v2.alarm_rules.gnocchi:AlarmGnocchiMetricsThresholdRule
|
||||
|
||||
ceilometer.alarm.evaluator =
|
||||
threshold = ceilometer.alarm.evaluator.threshold:ThresholdEvaluator
|
||||
combination = ceilometer.alarm.evaluator.combination:CombinationEvaluator
|
||||
gnocchi_resources_threshold = ceilometer.alarm.evaluator.gnocchi:GnocchiThresholdEvaluator
|
||||
gnocchi_metrics_threshold = ceilometer.alarm.evaluator.gnocchi:GnocchiThresholdEvaluator
|
||||
|
||||
ceilometer.alarm.evaluator_service =
|
||||
default = ceilometer.alarm.service:AlarmEvaluationService
|
||||
|
Loading…
Reference in New Issue
Block a user