NFP - Added periodic HM functionality

Adds support for periodic health monitor. When a service vm is launched,
Health monitor identifies health of the service VM and notifies the
status to under the cloud NFP orchestrator.

Change-Id: I416e6287a0e0155be69fe7f83a4ea9e6dd62f671
Co-Authored-By: ashutosh mishra <mca.ashu4@gmail.com>
Closes-Bug: 1654254
This commit is contained in:
Shishir Kumar Singh
2017-01-06 17:25:08 +05:30
committed by Mahesh Kurund
parent 811fcbb6e2
commit dcfcef56c0
12 changed files with 260 additions and 65 deletions

View File

@@ -10,8 +10,11 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
from neutron._i18n import _LI
from gbpservice.contrib.nfp.configurator.agents import agent_base
from gbpservice.contrib.nfp.configurator.lib import (
generic_config_constants as gen_cfg_const)
@@ -139,7 +142,9 @@ class GenericConfigRpcManager(agent_base.AgentBaseRPCManager):
Returns: None
"""
LOG.info(_LI("Received configure health monitor api for nfds:"
"%(nfds)s"),
{'nfds': resource_data['nfds']})
resource_data['fail_count'] = 0
self._send_event(context,
resource_data,
@@ -156,11 +161,12 @@ class GenericConfigRpcManager(agent_base.AgentBaseRPCManager):
Returns: None
"""
self._send_event(context,
resource_data,
gen_cfg_const.EVENT_CLEAR_HEALTHMONITOR,
resource_data['nfds'][0]['vmid'])
LOG.info(_LI("Received clear health monitor api for nfds:"
"%(nfds)s"),
{'nfds': resource_data['nfds']})
event_key = resource_data['nfds'][0]['vmid']
poll_event_id = gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR
self.sc.stop_poll_event(event_key, poll_event_id)
class GenericConfigEventHandler(agent_base.AgentBaseEventHandler,
@@ -237,6 +243,46 @@ class GenericConfigEventHandler(agent_base.AgentBaseEventHandler,
LOG.error(msg)
return
def send_periodic_hm_notification(self, ev, nfd, result, notification_id):
ev_copy = copy.deepcopy(ev)
ev_copy.data["context"]["notification_data"] = {}
ev_copy.data["context"]["context"]["nfp_context"]["id"] = (
notification_id)
ev_copy.data['context']['context']['nfd_id'] = nfd.get('vmid')
notification_data = self._prepare_notification_data(ev_copy, result)
self.notify._notification(notification_data)
def handle_periodic_hm(self, ev, result):
resource_data = ev.data['resource_data']
nfd = ev.data["resource_data"]['nfds'][0]
periodic_polling_reason = nfd["periodic_polling_reason"]
if result == common_const.FAILED:
"""If health monitoring fails continuously for MAX_FAIL_COUNT times
send fail notification to orchestrator
"""
resource_data['fail_count'] = resource_data.get('fail_count') + 1
if (resource_data.get('fail_count') >=
gen_cfg_const.MAX_FAIL_COUNT):
# REVISIT(Shishir): Remove statefull logic from here,
# need to come up with statleless logic.
if periodic_polling_reason == (
gen_cfg_const.DEVICE_TO_BECOME_DOWN):
notification_id = gen_cfg_const.DEVICE_NOT_REACHABLE
self.send_periodic_hm_notification(ev, nfd, result,
notification_id)
nfd["periodic_polling_reason"] = (
gen_cfg_const.DEVICE_TO_BECOME_UP)
elif result == common_const.SUCCESS:
"""set fail_count to 0 if it had failed earlier even once
"""
resource_data['fail_count'] = 0
if periodic_polling_reason == gen_cfg_const.DEVICE_TO_BECOME_UP:
notification_id = gen_cfg_const.DEVICE_REACHABLE
self.send_periodic_hm_notification(ev, nfd, result,
notification_id)
nfd["periodic_polling_reason"] = (
gen_cfg_const.DEVICE_TO_BECOME_DOWN)
def _process_event(self, ev):
LOG.debug(" Handling event %s " % (ev.data))
# Process single request data blob
@@ -274,38 +320,26 @@ class GenericConfigEventHandler(agent_base.AgentBaseEventHandler,
result == common_const.SUCCESS):
notification_data = self._prepare_notification_data(ev, result)
self.notify._notification(notification_data)
msg = ("VM Health check successful")
LOG.info(msg)
return {'poll': False}
elif resource_data['nfds'][0][
'periodicity'] == gen_cfg_const.FOREVER:
if result == common_const.FAILED:
"""If health monitoring fails continuously for 5 times
send fail notification to orchestrator
"""
resource_data['fail_count'] = resource_data.get(
'fail_count') + 1
if (resource_data.get('fail_count') >=
gen_cfg_const.MAX_FAIL_COUNT):
notification_data = self._prepare_notification_data(
ev,
result)
self.notify._notification(notification_data)
return {'poll': False}
elif result == common_const.SUCCESS:
"""set fail_count to 0 if it had failed earlier even once
"""
resource_data['fail_count'] = 0
elif ev.id == gen_cfg_const.EVENT_CLEAR_HEALTHMONITOR:
"""Stop current poll event. event.key is vmid which will stop
that particular service vm's health monitor
"""
notification_data = self._prepare_notification_data(ev, result)
self.notify._notification(notification_data)
return {'poll': False}
elif resource_data['nfds'][0]['periodicity'] == (
gen_cfg_const.FOREVER):
ev.data["context"]["resource"] = gen_cfg_const.PERIODIC_HM
self.handle_periodic_hm(ev, result)
else:
"""For other events, irrespective of result send notification"""
notification_data = self._prepare_notification_data(ev, result)
self.notify._notification(notification_data)
def prepare_notification_result(self, result):
if result in common_const.SUCCESS:
data = {'status_code': common_const.SUCCESS}
else:
data = {'status_code': common_const.FAILURE,
'error_msg': result}
return data
def _prepare_notification_data(self, ev, result):
"""Prepare notification data as expected by config agent
@@ -326,12 +360,7 @@ class GenericConfigEventHandler(agent_base.AgentBaseEventHandler,
service_type = agent_info['resource_type']
resource = agent_info['resource']
if result in common_const.SUCCESS:
data = {'status_code': common_const.SUCCESS}
else:
data = {'status_code': common_const.FAILURE,
'error_msg': result}
data = self.prepare_notification_result(result)
msg = {'info': {'service_type': service_type,
'context': context},
'notification': [{'resource': resource,

View File

@@ -19,10 +19,17 @@ EVENT_CONFIGURE_HEALTHMONITOR = 'CONFIGURE_HEALTHMONITOR'
EVENT_CLEAR_HEALTHMONITOR = 'CLEAR_HEALTHMONITOR'
# REVISIT: Need to make this configurable
MAX_FAIL_COUNT = 28 # 5 secs delay * 28 = 140 secs
MAX_FAIL_COUNT = 5
INITIAL = 'initial'
FOREVER = 'forever'
DEVICE_TO_BECOME_DOWN = 'DEVICE_TO_BECOME_DOWN'
DEVICE_TO_BECOME_UP = 'DEVICE_TO_BECOME_UP'
PERIODIC_HM = 'periodic_healthmonitor'
DEVICE_NOT_REACHABLE = 'PERIODIC_HM_DEVICE_NOT_REACHABLE'
DEVICE_REACHABLE = 'PERIODIC_HM_DEVICE_REACHABLE'
# POLLING EVENTS SPACING AND MAXRETRIES
EVENT_CONFIGURE_HEALTHMONITOR_SPACING = 10
EVENT_CONFIGURE_HEALTHMONITOR_MAXRETRY = 50
EVENT_CONFIGURE_HEALTHMONITOR_SPACING = 10 # unit in sec.
EVENT_CONFIGURE_HEALTHMONITOR_MAXRETRY = 100

View File

@@ -83,7 +83,7 @@ class ConfiguratorUtils(object):
return driver_objects
def load_agents(self):
def load_agents(self, pkgs):
"""Load all the agents inside pkg.
@param pkg : package

View File

@@ -422,7 +422,7 @@ def get_configurator_module_instance(sc, conf):
conf_utils = utils.ConfiguratorUtils(conf)
# Loads all the service agents under AGENT_PKG module path
cm.imported_sas = conf_utils.load_agents()
cm.imported_sas = conf_utils.load_agents(const.AGENTS_PKG)
msg = ("Configurator loaded service agents from %s location."
% (cm.imported_sas))
LOG.info(msg)

View File

@@ -12,6 +12,7 @@
import mock
import subprocess
import unittest
from neutron.tests import base
@@ -129,6 +130,7 @@ class GenericConfigRpcManagerTestCase(base.BaseTestCase):
self._test_event_creation(const.EVENT_CONFIGURE_HEALTHMONITOR)
@unittest.skip('not implemented yet')
def test_clear_hm_genericconfigrpcmanager(self):
""" Implements test case for clear healthmonitor method
of generic config agent RPCmanager.

View File

@@ -341,7 +341,8 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
'key': self.event.key}
orig_event_data['tenant_id'] = self.event.data[
'resource_owner_context']['admin_tenant_id']
ndo_handler.perform_health_check(self.event)
orig_event_data['periodicity'] = 'initial'
ndo_handler.perform_initial_health_check(self.event)
ndo_handler.configurator_rpc.create_network_function_device_config.\
assert_called_with(orig_event_data, param_req)

View File

@@ -38,6 +38,11 @@ CREATE = "create"
UPDATE = "update"
DELETE = "delete"
SUCCESS = 'SUCCESS'
FOREVER = 'forever'
INITIAL = 'initial'
ACTIVE_PORT = "ACTIVE"
STANDBY_PORT = "STANDBY"
MASTER_PORT = "MASTER"
@@ -74,6 +79,9 @@ MAXIMUM_INTERFACES = 'maximum_interfaces'
SUPPORTS_SHARING = 'supports_device_sharing'
SUPPORTS_HOTPLUG = 'supports_hotplug'
PERIODIC_HM = 'periodic_healthmonitor'
DEVICE_TO_BECOME_DOWN = 'DEVICE_TO_BECOME_DOWN'
METADATA_SUPPORTED_ATTRIBUTES = [MAXIMUM_INTERFACES,
SUPPORTS_SHARING,
SUPPORTS_HOTPLUG]
@@ -115,13 +123,14 @@ DELETE_USER_CONFIG_IN_PROGRESS_SPACING = 10
DELETE_USER_CONFIG_IN_PROGRESS_MAXRETRY = 20
CHECK_USER_CONFIG_COMPLETE_SPACING = 10
CHECK_USER_CONFIG_COMPLETE_MAXRETRY = 20
CHECK_USER_CONFIG_COMPLETE_MAXRETRY = 40
PULL_NOTIFICATIONS_SPACING = 10
#nfp_node_deriver_config
SERVICE_CREATE_TIMEOUT = 900
SERVICE_DELETE_TIMEOUT = 300
# all units in sec.
SERVICE_CREATE_TIMEOUT = 1500
SERVICE_DELETE_TIMEOUT = 600
# heat stack creation timeout
STACK_ACTION_WAIT_TIME = 300

View File

@@ -114,7 +114,8 @@ def get_network_function_info(device_data, resource_type):
nfd['svc_mgmt_fixed_ip'] = mgmt_ip
if resource_type == const.HEALTHMONITOR_RESOURCE:
nfd['periodicity'] = 'initial'
nfd['periodicity'] = device_data.get('periodicity')
nfd['periodic_polling_reason'] = const.DEVICE_TO_BECOME_DOWN
nfd['vmid'] = device_data['id']
config['config'][0]['resource'] = const.HEALTHMONITOR_RESOURCE
return config

View File

@@ -498,6 +498,21 @@ class NfpController(nfp_launcher.NfpLauncher, NfpService):
# Send to the distributor process.
self.pipe_send(self._pipe, event)
def stop_poll_event(self, key, id):
"""To stop the running poll event
:param key: key of polling event
:param id: id of polling event
"""
key = key + ":" + id
event = self.new_event(id='STOP_POLL_EVENT', data={'key': key})
event.desc.type = nfp_event.POLL_EVENT
event.desc.flag = nfp_event.POLL_EVENT_STOP
if self.PROCESS_TYPE == "worker":
self.pipe_send(self._pipe, event)
else:
self._manager.process_events([event])
def stash_event(self, event):
"""To stash an event.
@@ -629,6 +644,8 @@ def controller_init(conf, nfp_controller):
def nfp_modules_post_init(conf, nfp_modules, nfp_controller):
for module in nfp_modules:
try:
namespace = module.__name__.split(".")[-1]
nfp_logging.store_logging_context(namespace=namespace)
module.nfp_module_post_init(nfp_controller, conf)
except AttributeError:
message = ("(module - %s) - does not implement"

View File

@@ -32,6 +32,7 @@ EVENT_EXPIRED = 'event_expired'
EVENT_NEW = 'new_event'
EVENT_COMPLETE = 'event_done'
EVENT_ACK = 'event_ack'
POLL_EVENT_STOP = 'poll_event_stop'
"""Sequencer status. """
SequencerEmpty = nfp_seq.SequencerEmpty

View File

@@ -45,6 +45,10 @@ def IS_EVENT_GRAPH(event):
return event.desc.graph
def IS_POLL_EVENT_STOP(event):
return event.desc.type == nfp_event.POLL_EVENT and (
event.desc.flag == nfp_event.POLL_EVENT_STOP)
"""Manages the forked childs.
Invoked periodically, compares the alive childs with
@@ -252,8 +256,19 @@ class NfpResourceManager(NfpProcessManager, NfpEventManager):
self._event_sequencer.release(event.binding_key, event)
self._graph_event_complete(event)
def _stop_poll_event(self, event):
try:
poll_event = self._event_cache[event.data['key']]
poll_event.desc.poll_desc = None
except KeyError:
message = "(event - uuid=%s) - polling event not in cache" % (
event.data['key'])
LOG.debug(message)
def _non_schedule_event(self, event):
if event.desc.type == nfp_event.POLL_EVENT:
if IS_POLL_EVENT_STOP(event):
self._stop_poll_event(event)
elif event.desc.type == nfp_event.POLL_EVENT:
message = "(event - %s) - polling for event, spacing(%d)" % (
event.identify(), event.desc.poll_desc.spacing)
LOG.debug(message)
@@ -266,6 +281,9 @@ class NfpResourceManager(NfpProcessManager, NfpEventManager):
event.desc.worker = self._resource_map.keys()[0]
self._event_cache[ref_uuid] = event
cached_event = self._event_cache[ref_uuid]
cached_event.desc.poll_desc = event.desc.poll_desc
self._controller.poll_add(
event,
event.desc.poll_desc.spacing,
@@ -407,7 +425,9 @@ class NfpResourceManager(NfpProcessManager, NfpEventManager):
message = "(event - %s) - timedout" % (event.identify())
LOG.debug(message)
try:
assert event.desc.poll_desc
ref_event = self._event_cache[event.desc.poll_desc.ref]
assert ref_event.desc.poll_desc
evmanager = self._get_event_manager(ref_event.desc.worker)
assert evmanager
evmanager.dispatch_event(

View File

@@ -60,10 +60,15 @@ def events_init(controller, config, device_orchestrator):
'DELETE_CONFIGURATION_COMPLETED',
'DEVICE_BEING_DELETED',
'DEVICE_NOT_REACHABLE',
'DEVICE_CONFIGURATION_FAILED', 'PERFORM_HEALTH_CHECK',
'DEVICE_CONFIGURATION_FAILED',
'PLUG_INTERFACES', 'UNPLUG_INTERFACES',
'UPDATE_DEVICE_CONFIG_PARAMETERS',
'DEVICE_CONFIG_PARAMETERS_UPDATED']
'DEVICE_CONFIG_PARAMETERS_UPDATED',
'PERIODIC_HM_DEVICE_REACHABLE',
'PERIODIC_HM_DEVICE_NOT_REACHABLE',
'PERFORM_INITIAL_HEALTH_CHECK',
'PERFORM_PERIODIC_HEALTH_CHECK',
'PERFORM_CLEAR_HM']
events_to_register = []
for event in events:
events_to_register.append(
@@ -86,8 +91,10 @@ class RpcHandler(object):
self._controller = controller
self.rpc_event_mapping = {
nfp_constants.HEALTHMONITOR_RESOURCE: ['HEALTH_MONITOR_COMPLETE',
'DEVICE_NOT_REACHABLE',
'DEVICE_NOT_REACHABLE'],
'DEVICE_NOT_REACHABLE',
'DEVICE_NOT_REACHABLE',
'PERIODIC_HM_DEVICE_REACHABLE',
'PERIODIC_HM_DEVICE_NOT_REACHABLE', ],
nfp_constants.GENERIC_CONFIG: ['DEVICE_CONFIGURED',
'DELETE_CONFIGURATION_COMPLETED',
'DEVICE_CONFIGURATION_FAILED'],
@@ -116,6 +123,15 @@ class RpcHandler(object):
self._controller.post_event(ev)
self._log_event_created(event_id, event_data)
def handle_periodic_hm_resource(self, result):
if result == nfp_constants.SUCCESS:
event_id = self.rpc_event_mapping[
nfp_constants.HEALTHMONITOR_RESOURCE][3]
else:
event_id = self.rpc_event_mapping[
nfp_constants.HEALTHMONITOR_RESOURCE][4]
return event_id
# RPC APIs status notification from Configurator
def network_function_notification(self, context, notification_data):
info = notification_data.get('info')
@@ -129,11 +145,16 @@ class RpcHandler(object):
resource = response.get('resource')
data = response.get('data')
result = data.get('status_code')
if resource != nfp_constants.HEALTHMONITOR_RESOURCE:
if resource not in [nfp_constants.HEALTHMONITOR_RESOURCE,
nfp_constants.PERIODIC_HM]:
resource = nfp_constants.GENERIC_CONFIG
is_delete_request = True if operation == 'delete' else False
if resource == nfp_constants.PERIODIC_HM:
event_id = self.handle_periodic_hm_resource(result)
break
if is_delete_request:
event_id = self.rpc_event_mapping[resource][1]
else:
@@ -235,7 +256,10 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
event_handler_mapping = {
"CREATE_NETWORK_FUNCTION_DEVICE": (
self.create_network_function_device),
"PERFORM_HEALTH_CHECK": self.perform_health_check,
"PERFORM_INITIAL_HEALTH_CHECK": self.perform_initial_health_check,
"PERFORM_PERIODIC_HEALTH_CHECK":
self.perform_periodic_health_check,
"PERFORM_CLEAR_HM": self.perform_clear_hm,
"DEVICE_UP": self.device_up,
"PLUG_INTERFACES": self.plug_interfaces_fast,
"DEVICE_HEALTHY": self.plug_interfaces,
@@ -253,6 +277,10 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
"DELETE_DEVICE": self.delete_device,
"DELETE_CONFIGURATION": self.delete_device_configuration,
"DEVICE_NOT_REACHABLE": self.handle_device_not_reachable,
"PERIODIC_HM_DEVICE_REACHABLE": (
self.periodic_hm_handle_device_reachable),
"PERIODIC_HM_DEVICE_NOT_REACHABLE": (
self.periodic_hm_handle_device_not_reachable),
"PLUG_INTERFACE_FAILED": self.handle_plug_interface_failed,
"DEVICE_CONFIGURATION_FAILED": self.handle_device_config_failed,
"DEVICE_ERROR": self.handle_device_create_error,
@@ -664,9 +692,9 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
key=nf_id + nfi_id,
data=nfp_context)
hc_event = self._controller.new_event(id="PERFORM_HEALTH_CHECK",
key=nf_id + nfi_id,
data=nfp_context)
hc_event = self._controller.new_event(
id="PERFORM_INITIAL_HEALTH_CHECK", key=nf_id + nfi_id,
data=nfp_context)
plug_int_event = self._controller.new_event(id="PLUG_INTERFACES",
key=nf_id + nfi_id,
@@ -718,6 +746,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
def _post_configure_device_graph(self, nfp_context, serialize=False):
nf_id = nfp_context['network_function']['id']
nfi_id = nfp_context['network_function_instance']['id']
binding_key = nfp_context['service_details'][
'service_vendor'].lower() + nf_id
device_configure_event = self._controller.new_event(
@@ -740,19 +769,27 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
id='CONFIGURATION_COMPLETE',
key=nf_id,
data=nfp_context)
device_periodic_hm_event = self._controller.new_event(
id='PERFORM_PERIODIC_HEALTH_CHECK',
key=nf_id + nfi_id,
data=nfp_context)
# Start periodic health monitor after device configuration
GRAPH = ({
device_periodic_hm_event: [device_configured_event],
device_configured_event: [device_configure_event,
user_config_event],
user_config_event: [check_heat_config]})
self._controller.post_graph(GRAPH, device_configured_event,
self._controller.post_graph(GRAPH, device_periodic_hm_event,
graph_str='DEVICE_CONFIGURATION_GRAPH')
def device_up(self, event, serialize_config=False):
nfp_context = event.data
# Get the results of PLUG_INTERFACES & PERFORM_HEALTH_CHECK events
# results.
# Get the results of PLUG_INTERFACES & PERFORM_INITIAL_HEALTH_CHECK
# events results.
nf_id = nfp_context['network_function']['id']
nfi_id = nfp_context['network_function_instance']['id']
event_key = nf_id + nfi_id
@@ -790,7 +827,8 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
serialize=serialize_config)
self._controller.event_complete(event)
def perform_health_check(self, event):
def prepare_health_check_device_info(self, event, periodicity):
nfp_context = event.data
service_details = nfp_context['service_details']
@@ -810,11 +848,69 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
'mgmt_ip_address': mgmt_ip_address,
'service_details': service_details,
'network_function_id': network_function['id'],
'periodicity': periodicity,
'network_function_instance_id': network_function_instance['id'],
'nfp_context': {'event_desc': nfp_context['event_desc'],
'id': event.id, 'key': event.key}
'id': event.id, 'key': event.key},
}
return device, orchestration_driver
def perform_clear_hm(self, event):
nfp_context = event.data
network_function = nfp_context['network_function']
service_details = nfp_context['service_details']
orchestration_driver = self._get_orchestration_driver(
service_details['service_vendor'])
nfp_context['event_desc'] = event.desc.to_dict()
device = {
'id': nfp_context['network_function_device_id'],
'tenant_id': nfp_context['tenant_id'],
'mgmt_ip_address': nfp_context['mgmt_ip_address'],
'service_details': service_details,
'network_function_id': network_function['id'],
'network_function_instance_id': nfp_context[
'network_function_instance_id'],
'nfp_context': {'event_desc': nfp_context['event_desc'],
'id': event.id, 'key': event.key},
}
clear_hm_req = (
orchestration_driver.get_network_function_device_config(
device, nfp_constants.HEALTHMONITOR_RESOURCE))
if not clear_hm_req:
self._controller.event_complete(event, result="FAILED")
return None
self.configurator_rpc.delete_network_function_device_config(
device,
clear_hm_req)
LOG.debug("Clear HM RPC sent to configurator for device: "
"%s with parameters: %s" % (
device['id'], clear_hm_req))
self._controller.event_complete(event, result="SUCCESS")
def perform_periodic_health_check(self, event):
device, orchestration_driver = (
self.prepare_health_check_device_info(event,
nfp_constants.FOREVER))
hm_req = (
orchestration_driver.get_network_function_device_config(
device, nfp_constants.HEALTHMONITOR_RESOURCE))
if not hm_req:
self._controller.event_complete(event, result="FAILED")
return None
self.configurator_rpc.create_network_function_device_config(device,
hm_req)
LOG.debug("Health Check RPC sent to configurator for device: "
"%s with health check parameters: %s" % (
device['id'], hm_req))
self._controller.event_complete(event, result="SUCCESS")
def perform_initial_health_check(self, event):
device, orchestration_driver = (
self.prepare_health_check_device_info(event,
nfp_constants.INITIAL))
hm_req = (
orchestration_driver.get_network_function_device_config(
device, nfp_constants.HEALTHMONITOR_RESOURCE))
@@ -899,11 +995,11 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
nfp_context = event.data['nfp_context']
# Invoke event_complete for original event which is
# PERFORM_HEALTH_CHECK
# PERFORM_INITIAL_HEALTH_CHECK
event_desc = nfp_context.pop('event_desc')
nfp_context.pop('id')
key = nfp_context.pop('key')
event = self._controller.new_event(id="PERFORM_HEALTH_CHECK",
event = self._controller.new_event(id="PERFORM_INITIAL_HEALTH_CHECK",
key=key, desc_dict=event_desc)
self._controller.event_complete(event, result=result)
@@ -1292,6 +1388,18 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
event_data=device)
self.health_monitor_complete(event, result='FAILED')
def periodic_hm_handle_device_reachable(self, event):
device = event.data
status = nfp_constants.ACTIVE
desc = 'Device is ACTIVE'
self._update_network_function_device_db(device, status, desc)
def periodic_hm_handle_device_not_reachable(self, event):
device = event.data
status = nfp_constants.ERROR
desc = 'Device not reachable, Health Check Failed'
self._update_network_function_device_db(device, status, desc)
def handle_device_config_failed(self, event):
nfp_context = event.data['nfp_context']