support for overlapping templates

Change-Id: If92de78596043eba97059d5ab110734c56eb1294
This commit is contained in:
Elisha Rosensweig 2016-05-19 18:27:31 +03:00
parent 01cba4281d
commit dbeed110f6
8 changed files with 608 additions and 44 deletions

View File

@ -86,6 +86,16 @@ class DatasourceInfoMapper(object):
self.category_normalizer[category].set_operational_value(
new_vertex, self.UNDEFINED_DATASOURCE)
def get_datasource_priorities(self, datasource_name=None):
if datasource_name:
datasource_info = self.datasources_state_confs[datasource_name]
return datasource_info[self.PRIORITY_VALUES]
else:
priorities_dict = \
{key: self.datasources_state_confs[key][self.PRIORITY_VALUES]
for key in self.datasources_state_confs.keys()}
return priorities_dict
@staticmethod
def _init_category_normalizer():
return {

View File

@ -0,0 +1,59 @@
# Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vitrage.common.constants import VertexProperties as VProps
from vitrage.evaluator.template_fields import TemplateFields
class CausalTools(object):
@staticmethod
def get_score(action_info):
return 1 # no priorities
@staticmethod
def get_key(action_specs):
target_ids = {k: v.vertex_id for k, v in action_specs.targets.items()}
return action_specs.type, hash(tuple(sorted(target_ids.items())))
class RaiseAlarmTools(object):
def __init__(self, scores):
self.scores = scores
def get_score(self, action_info):
severity = action_info.specs.properties[TemplateFields.SEVERITY]
return self.scores[severity.upper()]
@staticmethod
def get_key(action_specs):
return action_specs.type,\
action_specs.properties[TemplateFields.ALARM_NAME], \
hash(action_specs.targets[TemplateFields.TARGET].vertex_id)
class SetStateTools(object):
def __init__(self, scores):
self.scores = scores
def get_score(self, action_info):
state = action_info.specs.properties[TemplateFields.STATE].upper()
target_resource = action_info.specs.targets[TemplateFields.TARGET]
return self.scores[target_resource[VProps.TYPE]][state]
@staticmethod
def get_key(action_specs):
return action_specs.type, \
hash(action_specs.targets[TemplateFields.TARGET].vertex_id)

View File

@ -12,12 +12,18 @@
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
from oslo_log import log
from vitrage.common.constants import EdgeProperties as EProps
from vitrage.common.constants import VertexProperties as VProps
from vitrage.entity_graph.mappings.datasource_info_mapper \
import DatasourceInfoMapper
from vitrage.evaluator.actions.action_executor import ActionExecutor
from vitrage.evaluator.actions.base import ActionMode
from vitrage.evaluator.actions.base import ActionType
import vitrage.evaluator.actions.priority_tools as pt
from vitrage.evaluator.template import ActionSpecs
from vitrage.evaluator.template import EdgeDescription
from vitrage.evaluator.template import ENTITY
@ -26,9 +32,17 @@ from vitrage.graph import create_algorithm
from vitrage.graph import create_graph
from vitrage.graph.driver import Vertex
LOG = log.getLogger(__name__)
# Entry containing action info.
# specs - ActionSpecs
# mode - DO or UNDO (the action)
# scenario_id - the scenario id in scenario_repository
# Trigger_id - a unique identifier per match in graph (i.e., the subgraph
# that matched the action in the spec) for the specific action.
ActionInfo = \
namedtuple('ActionInfo', ['specs', 'mode', 'scenario_id', 'trigger_id'])
class ScenarioEvaluator(object):
@ -44,6 +58,7 @@ class ScenarioEvaluator(object):
self._scenario_repo = scenario_repo
self._action_executor = ActionExecutor(event_queue)
self._entity_graph.subscribe(self.process_event)
self._action_tracker = ActionTracker(DatasourceInfoMapper(self.conf))
self.enabled = enabled
def process_event(self, before, current, is_vertex):
@ -86,10 +101,10 @@ class ScenarioEvaluator(object):
if actions:
LOG.debug("Actions to perform: %s", actions.values())
for action in actions.values():
action_spec = action[0]
action_mode = action[1]
self._action_executor.execute(action_spec, action_mode)
filtered_actions = \
self._analyze_and_filter_actions(actions.values())
for action in filtered_actions:
self._action_executor.execute(action.specs, action.mode)
LOG.debug('Process event - completed')
@ -141,7 +156,9 @@ class ScenarioEvaluator(object):
if matches:
for match in matches:
spec, action_id = self._get_action_spec(action, match)
actions[action_id] = (spec, mode)
match_hash = hash(tuple(sorted(match.items())))
actions[action_id] = \
ActionInfo(spec, mode, scenario.id, match_hash)
return actions
@staticmethod
@ -153,6 +170,7 @@ class ScenarioEvaluator(object):
revised_spec = ActionSpecs(action_spec.type,
real_items,
action_spec.properties)
# noinspection PyTypeChecker
action_id = ScenarioEvaluator._generate_action_id(revised_spec)
return revised_spec, action_id
@ -181,7 +199,7 @@ class ScenarioEvaluator(object):
for term in condition:
if not term.positive:
# todo(erosensw): add support for NOT clauses
LOG.error('Unsupported template with NOT operator')
LOG.error('Template with NOT operator current not supported')
return []
if term.type == ENTITY:
@ -210,3 +228,75 @@ class ScenarioEvaluator(object):
condition_graph.add_vertex(edge_description.source)
condition_graph.add_vertex(edge_description.target)
condition_graph.add_edge(edge_description.edge)
def _analyze_and_filter_actions(self, actions):
actions_to_perform = {}
for action in actions:
key = self._action_tracker.get_key(action.specs)
prev_dominant = self._action_tracker.get_dominant_action(key)
if action.mode == ActionMode.DO:
self._action_tracker.insert_action(key, action)
else:
self._action_tracker.remove_action(key, action)
new_dominant = self._action_tracker.get_dominant_action(key)
# todo: (erosensw) improvement - first analyze DOs, then UNDOs
if not new_dominant: # removed last entry for key
undo_action = ActionInfo(prev_dominant.specs,
ActionMode.UNDO,
prev_dominant.scenario_id,
prev_dominant.trigger_id)
actions_to_perform[key] = undo_action
elif new_dominant != prev_dominant:
actions_to_perform[key] = new_dominant
return actions_to_perform.values()
class ActionTracker(object):
"""Keeps track of all active actions and relative dominance/priority.
Actions are organized according to resource-id
and action details.
Examples:
- all set_state actions on a given resource share the same entry,
regardless of state
- all raise_alarm of type alarm_name on a given resource share the same
entry, regardless of severity
"""
def __init__(self, datasource_info_mapper):
self._tracker = {}
alarms_score = \
datasource_info_mapper.get_datasource_priorities('vitrage')
all_scores = datasource_info_mapper.get_datasource_priorities()
self._action_tools = {
ActionType.SET_STATE: pt.SetStateTools(all_scores),
ActionType.RAISE_ALARM: pt.RaiseAlarmTools(alarms_score),
ActionType.ADD_CAUSAL_RELATIONSHIP: pt.CausalTools
}
def get_key(self, action_specs):
return self._action_tools[action_specs.type].get_key(action_specs)
def insert_action(self, key, action):
actions = self._tracker.get(key, [])
actions.append(action)
scorer = self._action_tools[action.specs.type].get_score
self._tracker[key] = sorted(actions, key=scorer, reverse=True)
def remove_action(self, key, action):
# actions are unique in their trigger and scenario_ids
def _is_equivalent(entry):
return entry.trigger_id == action.trigger_id and \
entry.scenario_id == action.scenario_id
try:
to_remove = next(entry for entry in self._tracker[key]
if _is_equivalent(entry))
self._tracker[key].remove(to_remove)
except StopIteration:
LOG.warn("Could not find action entry to remove "
"from tracker: {}".format(action))
def get_dominant_action(self, key):
return self._tracker[key][0] if self._tracker.get(key, None) else None

View File

@ -16,16 +16,21 @@ from oslo_config import cfg
from oslo_log import log as logging
from six.moves import queue
from vitrage.common.constants import EdgeProperties as EProps
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.nova.host import NOVA_HOST_DATASOURCE
from vitrage.evaluator.scenario_evaluator import ScenarioEvaluator
from vitrage.evaluator.scenario_repository import ScenarioRepository
from vitrage.tests.functional.base import \
TestFunctionalBase
import vitrage.tests.mocks.mock_driver as mock_driver
from vitrage.tests.mocks import utils
LOG = logging.getLogger(__name__)
_TARGET_HOST = 'host-2'
_NAGIOS_TEST_INFO = {'resource_name': _TARGET_HOST, 'sync_mode': 'snapshot'}
class TestScenarioEvaluator(TestFunctionalBase):
@ -51,46 +56,259 @@ class TestScenarioEvaluator(TestFunctionalBase):
def test_deduced_state(self):
# Test Setup
processor = self._create_processor_with_graph(self.conf)
event_queue = queue.Queue()
ScenarioEvaluator(self.conf,
processor.entity_graph,
self.scenario_repository,
event_queue,
enabled=True)
event_queue, processor, evaluator = self._init_system()
target_host = 'host-2'
host_v = self._get_host_from_graph(target_host, processor.entity_graph)
host_v = self._get_host_from_graph(_TARGET_HOST,
processor.entity_graph)
self.assertEqual('AVAILABLE', host_v[VProps.AGGREGATED_STATE],
'host should be RUNNING when starting')
'host should be AVAILABLE when starting')
nagios_event = {'last_check': '2016-02-07 15:26:04',
'resource_name': target_host,
'resource_type': NOVA_HOST_DATASOURCE,
'service': 'Check_MK',
'status': 'CRITICAL',
'status_info': 'ok',
'sync_mode': 'snapshot',
'sync_type': 'nagios',
'sample_date': '2016-02-07 15:26:04'}
processor.process_event(nagios_event)
# The set_state action should have added an event to the queue, so
processor.process_event(event_queue.get())
# generate nagios alarm to trigger template scenario
test_vals = {'status': 'WARNING', 'service': 'cause_suboptimal_state'}
test_vals.update(_NAGIOS_TEST_INFO)
generator = mock_driver.simple_nagios_alarm_generators(1, 1, test_vals)
warning_test = mock_driver.generate_random_events_list(generator)[0]
host_v = self._get_host_from_graph(target_host, processor.entity_graph)
host_v = self.get_host_after_event(event_queue, warning_test,
processor, _TARGET_HOST)
self.assertEqual('SUBOPTIMAL', host_v[VProps.AGGREGATED_STATE],
'host should be SUBOPTIMAL after nagios alarm event')
'host should be SUBOPTIMAL with warning alarm')
# next disable the alarm
nagios_event['status'] = 'OK'
processor.process_event(nagios_event)
# The set_state action should have added an event to the queue, so
processor.process_event(event_queue.get())
host_v = self._get_host_from_graph(target_host, processor.entity_graph)
warning_test['status'] = 'OK'
host_v = self.get_host_after_event(event_queue, warning_test,
processor, _TARGET_HOST)
self.assertEqual('AVAILABLE', host_v[VProps.AGGREGATED_STATE],
'host should be RUNNING when starting')
'host should be AVAILABLE when alarm disabled')
def test_overlapping_deduced_state_1(self):
event_queue, processor, evaluator = self._init_system()
host_v = self._get_host_from_graph(_TARGET_HOST,
processor.entity_graph)
self.assertEqual('AVAILABLE', host_v[VProps.AGGREGATED_STATE],
'host should be AVAILABLE when starting')
# generate nagios alarm to trigger
test_vals = {'status': 'WARNING', 'service': 'cause_suboptimal_state'}
test_vals.update(_NAGIOS_TEST_INFO)
generator = mock_driver.simple_nagios_alarm_generators(1, 1, test_vals)
warning_test = mock_driver.generate_random_events_list(generator)[0]
host_v = self.get_host_after_event(event_queue, warning_test,
processor, _TARGET_HOST)
self.assertEqual('SUBOPTIMAL', host_v[VProps.AGGREGATED_STATE],
'host should be SUBOPTIMAL with warning alarm')
# generate CRITICAL nagios alarm to trigger
test_vals = {'status': 'CRITICAL', 'service': 'cause_error_state'}
test_vals.update(_NAGIOS_TEST_INFO)
generator = mock_driver.simple_nagios_alarm_generators(1, 1, test_vals)
critical_test = mock_driver.generate_random_events_list(generator)[0]
host_v = self.get_host_after_event(event_queue, critical_test,
processor, _TARGET_HOST)
self.assertEqual('ERROR', host_v[VProps.AGGREGATED_STATE],
'host should be ERROR with critical alarm')
# next disable the critical alarm
critical_test['status'] = 'OK'
host_v = self.get_host_after_event(event_queue, critical_test,
processor, _TARGET_HOST)
self.assertEqual('SUBOPTIMAL', host_v[VProps.AGGREGATED_STATE],
'host should be SUBOPTIMAL with only warning alarm')
# next disable the alarm
warning_test['status'] = 'OK'
host_v = self.get_host_after_event(event_queue, warning_test,
processor, _TARGET_HOST)
self.assertEqual('AVAILABLE', host_v[VProps.AGGREGATED_STATE],
'host should be AVAILABLE after alarm disabled')
def test_overlapping_deduced_state_2(self):
event_queue, processor, evaluator = self._init_system()
host_v = self._get_host_from_graph(_TARGET_HOST,
processor.entity_graph)
self.assertEqual('AVAILABLE', host_v[VProps.AGGREGATED_STATE],
'host should be AVAILABLE when starting')
# generate CRITICAL nagios alarm to trigger
test_vals = {'status': 'CRITICAL', 'service': 'cause_error_state'}
test_vals.update(_NAGIOS_TEST_INFO)
generator = mock_driver.simple_nagios_alarm_generators(1, 1, test_vals)
critical_test = mock_driver.generate_random_events_list(generator)[0]
host_v = self.get_host_after_event(event_queue, critical_test,
processor, _TARGET_HOST)
self.assertEqual('ERROR', host_v[VProps.AGGREGATED_STATE],
'host should be ERROR with critical alarm')
# generate WARNING nagios alarm to trigger
test_vals = {'status': 'WARNING', 'service': 'cause_suboptimal_state'}
test_vals.update(_NAGIOS_TEST_INFO)
generator = mock_driver.simple_nagios_alarm_generators(1, 1, test_vals)
warning_test = mock_driver.generate_random_events_list(generator)[0]
host_v = self.get_host_after_event(event_queue, warning_test,
processor, _TARGET_HOST)
self.assertEqual('ERROR', host_v[VProps.AGGREGATED_STATE],
'host should be ERROR with critical alarm')
# next disable the critical alarm
critical_test['status'] = 'OK'
host_v = self.get_host_after_event(event_queue, critical_test,
processor, _TARGET_HOST)
self.assertEqual('SUBOPTIMAL', host_v[VProps.AGGREGATED_STATE],
'host should be SUBOPTIMAL with only warning alarm')
def test_deduced_alarm(self):
event_queue, processor, evaluator = self._init_system()
host_v = self._get_host_from_graph(_TARGET_HOST,
processor.entity_graph)
self.assertEqual('AVAILABLE', host_v[VProps.AGGREGATED_STATE],
'host should be AVAILABLE when starting')
# generate CRITICAL nagios alarm to trigger
test_vals = {'status': 'WARNING',
'service': 'cause_warning_deduced_alarm'}
test_vals.update(_NAGIOS_TEST_INFO)
generator = mock_driver.simple_nagios_alarm_generators(1, 1, test_vals)
warning_test = mock_driver.generate_random_events_list(generator)[0]
host_v = self.get_host_after_event(event_queue, warning_test,
processor, _TARGET_HOST)
alarms = \
self._get_deduced_alarms_on_host(host_v, processor.entity_graph)
self.assertEqual(1, len(alarms))
self.assertEqual('WARNING', alarms[0]['severity'])
causes = self._get_alarm_causes(alarms[0], processor.entity_graph)
self.assertEqual(1, len(causes))
# next disable the alarm
warning_test['status'] = 'OK'
host_v = self.get_host_after_event(event_queue, warning_test,
processor, _TARGET_HOST)
alarms = \
self._get_deduced_alarms_on_host(host_v, processor.entity_graph)
self.assertEqual(0, len(alarms))
# todo: (erosensw) uncomment this test
def test_overlapping_deduced_alarm_1(self):
event_queue, processor, evaluator = self._init_system()
# generate WARNING nagios alarm
vals = {'status': 'WARNING', 'service': 'cause_warning_deduced_alarm'}
vals.update(_NAGIOS_TEST_INFO)
generator = mock_driver.simple_nagios_alarm_generators(1, 1, vals)
warning_test = mock_driver.generate_random_events_list(generator)[0]
host_v = self.get_host_after_event(event_queue, warning_test,
processor, _TARGET_HOST)
alarms = \
self._get_deduced_alarms_on_host(host_v, processor.entity_graph)
self.assertEqual(1, len(alarms))
self.assertEqual('WARNING', alarms[0]['severity'])
causes = self._get_alarm_causes(alarms[0], processor.entity_graph)
self.assertEqual(1, len(causes))
# generate CRITICAL nagios alarm to trigger
vals = {'status': 'CRITICAL',
'service': 'cause_critical_deduced_alarm'}
vals.update(_NAGIOS_TEST_INFO)
generator = mock_driver.simple_nagios_alarm_generators(1, 1, vals)
critical_test = mock_driver.generate_random_events_list(generator)[0]
host_v = self.get_host_after_event(event_queue, critical_test,
processor, _TARGET_HOST)
alarms = \
self._get_deduced_alarms_on_host(host_v, processor.entity_graph)
self.assertEqual(1, len(alarms))
self.assertEqual('CRITICAL', alarms[0]['severity'])
causes = self._get_alarm_causes(alarms[0], processor.entity_graph)
self.assertEqual(2, len(causes))
# remove WARNING nagios alarm, leaving only CRITICAL one
warning_test['status'] = 'OK'
host_v = self.get_host_after_event(event_queue, warning_test,
processor, _TARGET_HOST)
alarms = \
self._get_deduced_alarms_on_host(host_v, processor.entity_graph)
self.assertEqual(1, len(alarms))
self.assertEqual('CRITICAL', alarms[0]['severity'])
causes = self._get_alarm_causes(alarms[0], processor.entity_graph)
self.assertEqual(1, len(causes))
# next disable the alarm
critical_test['status'] = 'OK'
host_v = self.get_host_after_event(event_queue, critical_test,
processor, _TARGET_HOST)
alarms = \
self._get_deduced_alarms_on_host(host_v, processor.entity_graph)
self.assertEqual(0, len(alarms))
def test_overlapping_deduced_alarm_2(self):
event_queue, processor, evaluator = self._init_system()
# generate CRITICAL nagios alarm to trigger
test_vals = {'status': 'CRITICAL',
'service': 'cause_critical_deduced_alarm'}
test_vals.update(_NAGIOS_TEST_INFO)
generator = mock_driver.simple_nagios_alarm_generators(1, 1, test_vals)
critical_test = mock_driver.generate_random_events_list(generator)[0]
host_v = self.get_host_after_event(event_queue, critical_test,
processor, _TARGET_HOST)
alarms = \
self._get_deduced_alarms_on_host(host_v, processor.entity_graph)
self.assertEqual(1, len(alarms))
self.assertEqual('CRITICAL', alarms[0]['severity'])
# generate WARNING nagios alarm to trigger
test_vals = {'status': 'WARNING',
'service': 'cause_warning_deduced_alarm'}
test_vals.update(_NAGIOS_TEST_INFO)
generator = mock_driver.simple_nagios_alarm_generators(1, 1, test_vals)
warning_test = mock_driver.generate_random_events_list(generator)[0]
host_v = self.get_host_after_event(event_queue, warning_test,
processor, _TARGET_HOST)
alarms = \
self._get_deduced_alarms_on_host(host_v, processor.entity_graph)
self.assertEqual(1, len(alarms))
self.assertEqual('CRITICAL', alarms[0]['severity'])
# remove CRITICAL nagios alarm, leaving only WARNING one
critical_test['status'] = 'OK'
host_v = self.get_host_after_event(event_queue, critical_test,
processor, _TARGET_HOST)
alarms = \
self._get_deduced_alarms_on_host(host_v, processor.entity_graph)
self.assertEqual(1, len(alarms))
self.assertEqual('WARNING', alarms[0]['severity'])
def get_host_after_event(self, event_queue, nagios_event,
processor, target_host):
processor.process_event(nagios_event)
while not event_queue.empty():
processor.process_event(event_queue.get())
host_v = self._get_host_from_graph(target_host,
processor.entity_graph)
return host_v
def _init_system(self):
processor = self._create_processor_with_graph(self.conf)
event_queue = queue.Queue()
evaluator = ScenarioEvaluator(self.conf, processor.entity_graph,
self.scenario_repository, event_queue,
enabled=True)
return event_queue, processor, evaluator
@staticmethod
def _get_host_from_graph(host_name, entity_graph):
@ -100,3 +318,18 @@ class TestScenarioEvaluator(TestFunctionalBase):
vertex_attr_filter=vertex_attrs)
assert len(host_vertices) == 1, "incorrect number of vertices"
return host_vertices[0]
@staticmethod
def _get_deduced_alarms_on_host(host_v, entity_graph):
v_id = host_v.vertex_id
vertex_attrs = {VProps.NAME: 'deduced_alarm',
VProps.IS_DELETED: False, }
return entity_graph.neighbors(v_id=v_id,
vertex_attr_filter=vertex_attrs)
@staticmethod
def _get_alarm_causes(alarm_v, entity_graph):
v_id = alarm_v.vertex_id
edge_attrs = {EProps.RELATIONSHIP_TYPE: "causes",
EProps.IS_DELETED: False, }
return entity_graph.neighbors(v_id=v_id, edge_attr_filter=edge_attrs)

View File

@ -0,0 +1,37 @@
metadata:
id: basic_causal_links
definitions:
entities:
- entity:
category: ALARM
type: nagios
template_id: nagios_alarm
- entity:
category: ALARM
type: vitrage
name: deduced_alarm
template_id: deduced_alarm
- entity:
category: RESOURCE
type: nova.host
template_id: host
relationships:
- relationship:
source: nagios_alarm
relationship_type: on
target: host
template_id : nagios_alarm_on_host
- relationship:
source: deduced_alarm
relationship_type: on
target: host
template_id : deduced_alarm_on_host
scenarios:
- scenario:
condition: nagios_alarm_on_host and deduced_alarm_on_host
actions:
- action:
action_type: add_causal_relationship
action_target:
source: nagios_alarm
target: deduced_alarm

View File

@ -0,0 +1,62 @@
metadata:
id: deduced_alarm_for_host_alarms
definitions:
entities:
- entity:
category: ALARM
type: nagios
name: cause_warning_deduced_alarm
severity: WARNING
template_id: warning_alarm
- entity:
category: ALARM
type: nagios
name: cause_critical_deduced_alarm
severity: CRITICAL
template_id: critical_alarm
- entity:
category: RESOURCE
type: nova.host
template_id: host
relationships:
- relationship:
source: warning_alarm
relationship_type: on
target: host
template_id : warning_alarm_on_host
- relationship:
source: critical_alarm
relationship_type: on
target: host
template_id : critical_alarm_on_host
scenarios:
- scenario:
condition: warning_alarm_on_host
actions:
- action:
action_type: raise_alarm
properties:
alarm_name: deduced_alarm
severity: WARNING
action_target:
target: host
- scenario:
condition: critical_alarm_on_host
actions:
- action:
action_type: raise_alarm
properties:
alarm_name: deduced_alarm
severity: CRITICAL
action_target:
target: host
- scenario: # duplicate of previous scenario
condition: critical_alarm_on_host
actions:
- action:
action_type: raise_alarm
properties:
alarm_name: deduced_alarm
severity: CRITICAL
action_target:
target: host

View File

@ -5,20 +5,33 @@ definitions:
- entity:
category: ALARM
type: nagios
template_id: alarm
name: cause_suboptimal_state
severity: WARNING
template_id: warning_alarm
- entity:
category: ALARM
type: nagios
name: cause_error_state
severity: CRITICAL
template_id: critical_alarm
- entity:
category: RESOURCE
type: nova.host
template_id: host
relationships:
- relationship:
source: alarm
target: host
source: warning_alarm
relationship_type: on
template_id : alarm_on_host
target: host
template_id : warning_alarm_on_host
- relationship:
source: critical_alarm
relationship_type: on
target: host
template_id : critical_alarm_on_host
scenarios:
- scenario:
condition: alarm_on_host
condition: warning_alarm_on_host
actions:
- action:
action_type: set_state
@ -26,3 +39,12 @@ scenarios:
state: SUBOPTIMAL
action_target:
target: host
- scenario:
condition: critical_alarm_on_host
actions:
- action:
action_type: set_state
properties:
state: ERROR
action_target:
target: host

View File

@ -0,0 +1,51 @@
metadata:
id: deduced_state_for_host_alarms_2
description: replicates deduced_state.yaml, with changes to template_ids, to test overlapping templates
definitions:
entities:
- entity:
category: ALARM
type: nagios
name: cause_suboptimal_state
severity: WARNING
template_id: warning_alarm_2
- entity:
category: ALARM
type: nagios
name: cause_error_state
severity: CRITICAL
template_id: critical_alarm_2
- entity:
category: RESOURCE
type: nova.host
template_id: host_2
relationships:
- relationship:
source: warning_alarm_2
relationship_type: on
target: host_2
template_id : warning_alarm_on_host_2
- relationship:
source: critical_alarm_2
relationship_type: on
target: host_2
template_id : critical_alarm_on_host_2
scenarios:
- scenario:
condition: warning_alarm_on_host_2
actions:
- action:
action_type: set_state
properties:
state: SUBOPTIMAL
action_target:
target: host_2
- scenario:
condition: critical_alarm_on_host_2
actions:
- action:
action_type: set_state
properties:
state: ERROR
action_target:
target: host_2