Rework notification messages to remove instance tags
Rework the pull status and subscription notification to remove the top level instance tags that were returned with the notification. These tags were originally introduced to identify which PTP instance a given notification was associated with. Updates to the ORAN standard have clarified that the instance identification should instead be included in the Resource Address field. This commit includes the following changes: - Update notification format to place instance identifier in Resource Address instead of a top level tag (default behaviour) - Provide a helm override to enable legacy notification format with top level instance tags for compatibility. This can be enabled by setting the override "notification_format=legacy" - Ensure GM clockClass is correctly reported for both BC and GM configs - Changes result in new images for notificationclient-base and notificationservice-base-v2 - Pull status notifications are now delivered as a list of dicts when there are multiple instances in a request. The "overall sync status" message is delivered as a dict because it only ever has a single notification - Push notifications are delivered as individual dicts for each notification, this behaviour remains the same Test plan: Pass: Verify ptp-notification builds and deploys Pass: Verify container image builds Pass: Verify ptp-notification operations (pull status, subscribe, list, delete) Pass: Verify standard and legacy format output for each notification type Partial-bug: 2089035 Signed-off-by: Cole Walker <cole.walker@windriver.com> Change-Id: Ied6674a02b41ed31079a291fc9bace74d95d467a
This commit is contained in:
parent
c027954e2a
commit
bbeef95744
@ -176,6 +176,8 @@ spec:
|
|||||||
value: "{{ .Values.ptptrackingv2.log_level }}"
|
value: "{{ .Values.ptptrackingv2.log_level }}"
|
||||||
- name: CONTROL_TIMEOUT
|
- name: CONTROL_TIMEOUT
|
||||||
value: "{{ .Values.ptptrackingv2.control_timeout }}"
|
value: "{{ .Values.ptptrackingv2.control_timeout }}"
|
||||||
|
- name: NOTIFICATION_FORMAT
|
||||||
|
value: "{{ .Values.ptptrackingv2.notification_format }}"
|
||||||
command: ["python3", "/mnt/ptptracking_start_v2.py"]
|
command: ["python3", "/mnt/ptptracking_start_v2.py"]
|
||||||
{{- if .Values.ptptrackingv2.endpoint.liveness }}
|
{{- if .Values.ptptrackingv2.endpoint.liveness }}
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
|
@ -103,6 +103,7 @@ ptptracking:
|
|||||||
ptptrackingv2:
|
ptptrackingv2:
|
||||||
enabled: True
|
enabled: True
|
||||||
imagePullSecrets: default-registry-key
|
imagePullSecrets: default-registry-key
|
||||||
|
notification_format: "standard"
|
||||||
ptp4lSocket: /var/run/ptp4l-ptp4l-legacy
|
ptp4lSocket: /var/run/ptp4l-ptp4l-legacy
|
||||||
ptp4lServiceName: True
|
ptp4lServiceName: True
|
||||||
ptp4lClockClassLockedList: "6,7,135"
|
ptp4lClockClassLockedList: "6,7,135"
|
||||||
|
@ -9,6 +9,7 @@ RUN apt-get -y update \
|
|||||||
gcc \
|
gcc \
|
||||||
python3-dev \
|
python3-dev \
|
||||||
python3 \
|
python3 \
|
||||||
|
curl \
|
||||||
&& apt-get -y clean \
|
&& apt-get -y clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
RUN pip3 install --user pecan \
|
RUN pip3 install --user pecan \
|
||||||
|
@ -1,17 +1,16 @@
|
|||||||
#
|
#
|
||||||
# Copyright (c) 2021-2023 Wind River Systems, Inc.
|
# Copyright (c) 2021-2024 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
import logging
|
||||||
import re
|
import re
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
import logging
|
from notificationclientsdk.common.helpers import constants, log_helper
|
||||||
from datetime import datetime
|
|
||||||
from notificationclientsdk.common.helpers import constants
|
|
||||||
from notificationclientsdk.common.helpers import log_helper
|
|
||||||
from notificationclientsdk.exception import client_exception
|
from notificationclientsdk.exception import client_exception
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -33,13 +32,36 @@ def notify(subscriptioninfo, notification, timeout=2, retry=3):
|
|||||||
timeout=timeout)
|
timeout=timeout)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
else:
|
else:
|
||||||
# version 2
|
if isinstance(notification, list):
|
||||||
for item in notification:
|
# List-type notification response format
|
||||||
data = format_notification_data(subscriptioninfo, {item: notification[item]})
|
LOG.debug("Formatting subscription response: list")
|
||||||
data = json.dumps(data)
|
# Post notification for each list item
|
||||||
response = requests.post(url, data=data, headers=headers,
|
for item in notification:
|
||||||
timeout=timeout)
|
data = json.dumps(item)
|
||||||
response.raise_for_status()
|
LOG.info("Notification to post %s", (data))
|
||||||
|
response = requests.post(url, data=data, headers=headers,
|
||||||
|
timeout=timeout)
|
||||||
|
response.raise_for_status()
|
||||||
|
else:
|
||||||
|
# Dict type notification response format
|
||||||
|
LOG.debug("Formatting subscription response: dict")
|
||||||
|
if notification.get('id', None):
|
||||||
|
# Not a nested dict, post the data
|
||||||
|
data = json.dumps(notification)
|
||||||
|
LOG.info("Notification to post %s", (data))
|
||||||
|
response = requests.post(url, data=data, headers=headers,
|
||||||
|
timeout=timeout)
|
||||||
|
response.raise_for_status()
|
||||||
|
else:
|
||||||
|
for item in notification:
|
||||||
|
# Nested dict with instance tags, post each item
|
||||||
|
data = format_notification_data(subscriptioninfo, {item: notification[item]})
|
||||||
|
data = json.dumps(data)
|
||||||
|
LOG.info("Notification to post %s", (data))
|
||||||
|
response = requests.post(url, data=data, headers=headers,
|
||||||
|
timeout=timeout)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
if notification == {}:
|
if notification == {}:
|
||||||
if hasattr(subscriptioninfo, 'ResourceType'):
|
if hasattr(subscriptioninfo, 'ResourceType'):
|
||||||
resource = "{'ResourceType':'" + \
|
resource = "{'ResourceType':'" + \
|
||||||
@ -64,6 +86,7 @@ def notify(subscriptioninfo, notification, timeout=2, retry=3):
|
|||||||
raise errt
|
raise errt
|
||||||
except requests.exceptions.RequestException as ex:
|
except requests.exceptions.RequestException as ex:
|
||||||
LOG.warning("Failed to notify due to: {0}".format(str(ex)))
|
LOG.warning("Failed to notify due to: {0}".format(str(ex)))
|
||||||
|
LOG.warning(" %s", (notification))
|
||||||
raise ex
|
raise ex
|
||||||
except requests.exceptions.HTTPError as ex:
|
except requests.exceptions.HTTPError as ex:
|
||||||
LOG.warning("Failed to notify due to: {0}".format(str(ex)))
|
LOG.warning("Failed to notify due to: {0}".format(str(ex)))
|
||||||
@ -74,8 +97,11 @@ def notify(subscriptioninfo, notification, timeout=2, retry=3):
|
|||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def format_notification_data(subscriptioninfo, notification):
|
def format_notification_data(subscriptioninfo, notification):
|
||||||
|
if isinstance(notification, list):
|
||||||
|
return notification
|
||||||
|
|
||||||
|
# Formatting for legacy notification
|
||||||
if hasattr(subscriptioninfo, 'ResourceType'):
|
if hasattr(subscriptioninfo, 'ResourceType'):
|
||||||
LOG.debug("format_notification_data: Found v1 subscription, "
|
LOG.debug("format_notification_data: Found v1 subscription, "
|
||||||
"no formatting required.")
|
"no formatting required.")
|
||||||
@ -112,7 +138,7 @@ def format_notification_data(subscriptioninfo, notification):
|
|||||||
float(this_delivery_time)).strftime('%Y-%m-%dT%H:%M:%S%fZ')
|
float(this_delivery_time)).strftime('%Y-%m-%dT%H:%M:%S%fZ')
|
||||||
instance['time'] = format_time
|
instance['time'] = format_time
|
||||||
else:
|
else:
|
||||||
raise Exception("format_notification_data: No valid source "
|
LOG.warning("format_notification_data: No valid source "
|
||||||
"address found in notification")
|
"address found in notification")
|
||||||
LOG.debug("format_notification_data: Added parent key for client "
|
LOG.debug("format_notification_data: Added parent key for client "
|
||||||
"consumption: %s" % formatted_notification)
|
"consumption: %s" % formatted_notification)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# Copyright (c) 2021-2023 Wind River Systems, Inc.
|
# Copyright (c) 2021-2024 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
@ -48,34 +48,47 @@ class NotificationHandler(NotificationHandlerBase):
|
|||||||
resource_address = None
|
resource_address = None
|
||||||
try:
|
try:
|
||||||
self.notification_lock.acquire()
|
self.notification_lock.acquire()
|
||||||
|
LOG.info("Notification handler notification_info %s", notification_info)
|
||||||
subscription_repo = SubscriptionRepo(autocommit=True)
|
subscription_repo = SubscriptionRepo(autocommit=True)
|
||||||
resource_type = notification_info.get('ResourceType', None)
|
if isinstance(notification_info, dict):
|
||||||
# Get nodename from resource address
|
resource_type = notification_info.get('ResourceType', None)
|
||||||
if resource_type:
|
# Get nodename from resource address
|
||||||
node_name = notification_info.get('ResourceQualifier', {}).get('NodeName', None)
|
if resource_type:
|
||||||
if not resource_type:
|
node_name = notification_info.get('ResourceQualifier', {}).get('NodeName', None)
|
||||||
raise Exception("abnormal notification@{0}".format(node_name))
|
if not resource_type:
|
||||||
if not resource_type in self.__supported_resource_types:
|
raise Exception("abnormal notification@{0}".format(node_name))
|
||||||
raise Exception(
|
if not resource_type in self.__supported_resource_types:
|
||||||
"notification with unsupported resource type:{0}".format(resource_type))
|
raise Exception(
|
||||||
this_delivery_time = notification_info['EventTimestamp']
|
"notification with unsupported resource type:{0}".format(resource_type))
|
||||||
# Get subscriptions from DB to deliver notification to
|
this_delivery_time = notification_info['EventTimestamp']
|
||||||
entries = subscription_repo.get(Status=1, ResourceType=resource_type)
|
# Get subscriptions from DB to deliver notification to
|
||||||
else:
|
entries = subscription_repo.get(Status=1, ResourceType=resource_type)
|
||||||
parent_key = list(notification_info.keys())[0]
|
else:
|
||||||
source = notification_info[parent_key].get('source', None)
|
parent_key = list(notification_info.keys())[0]
|
||||||
values = notification_info[parent_key].get('data', {}).get('values', [])
|
source = notification_info[parent_key].get('source', None)
|
||||||
resource_address = values[0].get('ResourceAddress', None)
|
values = notification_info[parent_key].get('data', {}).get('values', [])
|
||||||
this_delivery_time = notification_info[parent_key].get('time')
|
resource_address = values[0].get('ResourceAddress', None)
|
||||||
if not resource_address:
|
this_delivery_time = notification_info[parent_key].get('time')
|
||||||
raise Exception("No resource address in notification source".format(source))
|
if not resource_address:
|
||||||
_, node_name, _, _, _ = subscription_helper.parse_resource_address(resource_address)
|
raise Exception("No resource address in notification source".format(source))
|
||||||
# Get subscriptions from DB to deliver notification to.
|
_, node_name, _, _, _ = subscription_helper.parse_resource_address(resource_address)
|
||||||
# Unable to filter on resource_address here because resource_address may contain
|
# Get subscriptions from DB to deliver notification to.
|
||||||
# either an unknown node name (ie. controller-0) or a '/./' resulting in entries
|
# Unable to filter on resource_address here because resource_address may contain
|
||||||
# being missed. Instead, filter these v2 subscriptions in the for loop below once
|
# either an unknown node name (ie. controller-0) or a '/./' resulting in entries
|
||||||
# the resource path has been obtained.
|
# being missed. Instead, filter these v2 subscriptions in the for loop below once
|
||||||
entries = subscription_repo.get(Status=1)
|
# the resource path has been obtained.
|
||||||
|
entries = subscription_repo.get(Status=1)
|
||||||
|
elif isinstance(notification_info, list):
|
||||||
|
LOG.debug("Handle list")
|
||||||
|
for item in notification_info:
|
||||||
|
source = item.get('source', None)
|
||||||
|
values = item.get('data', {}).get('values', [])
|
||||||
|
resource_address = values[0].get('ResourceAddress', None)
|
||||||
|
this_delivery_time = item.get('time')
|
||||||
|
if not resource_address:
|
||||||
|
raise Exception("No resource address in notification source".format(source))
|
||||||
|
_, node_name, _, _, _ = subscription_helper.parse_resource_address(resource_address)
|
||||||
|
entries = subscription_repo.get(Status=1)
|
||||||
|
|
||||||
for entry in entries:
|
for entry in entries:
|
||||||
subscriptionid = entry.SubscriptionId
|
subscriptionid = entry.SubscriptionId
|
||||||
@ -106,10 +119,11 @@ class NotificationHandler(NotificationHandlerBase):
|
|||||||
entry.SubscriptionId))
|
entry.SubscriptionId))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
subscription_helper.notify(subscription_dto2, notification_info)
|
notification_to_send = self.__format_timestamps(notification_info)
|
||||||
LOG.debug("notification is delivered successfully to {0}".format(
|
LOG.info("Sending notification to subscribers: %s", notification_to_send)
|
||||||
|
subscription_helper.notify(subscription_dto2, notification_to_send)
|
||||||
|
LOG.info("notification is delivered successfully to {0}".format(
|
||||||
entry.SubscriptionId))
|
entry.SubscriptionId))
|
||||||
|
|
||||||
self.update_delivery_timestamp(node_name, subscriptionid, this_delivery_time)
|
self.update_delivery_timestamp(node_name, subscriptionid, this_delivery_time)
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
@ -129,6 +143,28 @@ class NotificationHandler(NotificationHandlerBase):
|
|||||||
if not subscription_repo:
|
if not subscription_repo:
|
||||||
del subscription_repo
|
del subscription_repo
|
||||||
|
|
||||||
|
def __format_timestamps(self, ptpstatus):
|
||||||
|
if isinstance(ptpstatus, list):
|
||||||
|
LOG.debug("Format timestamps for standard subscription response")
|
||||||
|
for item in ptpstatus:
|
||||||
|
item['time'] = datetime.fromtimestamp(
|
||||||
|
item['time']).strftime(
|
||||||
|
'%Y-%m-%dT%H:%M:%S%fZ')
|
||||||
|
elif isinstance(ptpstatus, dict):
|
||||||
|
LOG.debug("Format timestamps for response with instance tags")
|
||||||
|
try:
|
||||||
|
for item in ptpstatus:
|
||||||
|
# Change time from float to ascii format
|
||||||
|
ptpstatus[item]['time'] = datetime.fromtimestamp(
|
||||||
|
ptpstatus[item]['time']).strftime(
|
||||||
|
'%Y-%m-%dT%H:%M:%S%fZ')
|
||||||
|
except (TypeError, AttributeError):
|
||||||
|
LOG.debug("Format timestamp for single notification")
|
||||||
|
ptpstatus['time'] = datetime.fromtimestamp(
|
||||||
|
ptpstatus['time']).strftime(
|
||||||
|
'%Y-%m-%dT%H:%M:%S%fZ')
|
||||||
|
return ptpstatus
|
||||||
|
|
||||||
def __get_latest_delivery_timestamp(self, node_name, subscriptionid):
|
def __get_latest_delivery_timestamp(self, node_name, subscriptionid):
|
||||||
last_delivery_stat = self.notification_stat.get(node_name, {}).get(subscriptionid, {})
|
last_delivery_stat = self.notification_stat.get(node_name, {}).get(subscriptionid, {})
|
||||||
last_delivery_time = last_delivery_stat.get('EventTimestamp', None)
|
last_delivery_time = last_delivery_stat.get('EventTimestamp', None)
|
||||||
@ -152,6 +188,7 @@ class NotificationHandler(NotificationHandlerBase):
|
|||||||
else:
|
else:
|
||||||
last_delivery_stat = self.notification_stat.get(node_name, {}).get(subscriptionid, {})
|
last_delivery_stat = self.notification_stat.get(node_name, {}).get(subscriptionid, {})
|
||||||
last_delivery_time = last_delivery_stat.get('EventTimestamp', None)
|
last_delivery_time = last_delivery_stat.get('EventTimestamp', None)
|
||||||
|
LOG.debug("last_delivery_time %s this_delivery_time %s" % (last_delivery_time, this_delivery_time))
|
||||||
if (last_delivery_time and last_delivery_time >= this_delivery_time):
|
if (last_delivery_time and last_delivery_time >= this_delivery_time):
|
||||||
return
|
return
|
||||||
last_delivery_stat['EventTimestamp'] = this_delivery_time
|
last_delivery_stat['EventTimestamp'] = this_delivery_time
|
||||||
|
@ -1,30 +1,29 @@
|
|||||||
#
|
#
|
||||||
# Copyright (c) 2021-2023 Wind River Systems, Inc.
|
# Copyright (c) 2021-2024 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
import oslo_messaging
|
|
||||||
import logging
|
|
||||||
import json
|
import json
|
||||||
import kombu
|
import logging
|
||||||
import requests
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from notificationclientsdk.client.notificationservice \
|
import kombu
|
||||||
import NotificationServiceClient
|
import oslo_messaging
|
||||||
from notificationclientsdk.common.helpers import subscription_helper
|
import requests
|
||||||
from notificationclientsdk.common.helpers import log_helper
|
from notificationclientsdk.client.notificationservice import \
|
||||||
from notificationclientsdk.common.helpers import constants
|
NotificationServiceClient
|
||||||
|
from notificationclientsdk.common.helpers import (constants, log_helper,
|
||||||
|
subscription_helper)
|
||||||
from notificationclientsdk.common.helpers.nodeinfo_helper import NodeInfoHelper
|
from notificationclientsdk.common.helpers.nodeinfo_helper import NodeInfoHelper
|
||||||
|
from notificationclientsdk.exception import client_exception
|
||||||
from notificationclientsdk.model.dto.resourcetype import ResourceType
|
from notificationclientsdk.model.dto.resourcetype import ResourceType
|
||||||
from notificationclientsdk.model.dto.subscription import SubscriptionInfoV1
|
from notificationclientsdk.model.dto.subscription import (SubscriptionInfoV1,
|
||||||
from notificationclientsdk.model.dto.subscription import SubscriptionInfoV2
|
SubscriptionInfoV2)
|
||||||
from notificationclientsdk.model.orm.subscription \
|
from notificationclientsdk.model.orm.subscription import \
|
||||||
import Subscription as SubscriptionOrm
|
Subscription as SubscriptionOrm
|
||||||
from notificationclientsdk.repository.node_repo import NodeRepo
|
from notificationclientsdk.repository.node_repo import NodeRepo
|
||||||
from notificationclientsdk.repository.subscription_repo import SubscriptionRepo
|
from notificationclientsdk.repository.subscription_repo import SubscriptionRepo
|
||||||
from notificationclientsdk.exception import client_exception
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
log_helper.config_logger(LOG)
|
log_helper.config_logger(LOG)
|
||||||
@ -272,18 +271,33 @@ class PtpService(object):
|
|||||||
timestamp = ptpstatus[constants.PTP_V1_KEY].get(
|
timestamp = ptpstatus[constants.PTP_V1_KEY].get(
|
||||||
'EventTimestamp', None)
|
'EventTimestamp', None)
|
||||||
ptpstatus = ptpstatus[constants.PTP_V1_KEY]
|
ptpstatus = ptpstatus[constants.PTP_V1_KEY]
|
||||||
else:
|
elif isinstance(ptpstatus, list):
|
||||||
|
LOG.debug("Format timestamps for standard subscription response")
|
||||||
for item in ptpstatus:
|
for item in ptpstatus:
|
||||||
timestamp = ptpstatus[item].get('time', None)
|
timestamp = item.get('time', None)
|
||||||
# Change time from float to ascii format
|
item['time'] = datetime.fromtimestamp(
|
||||||
ptpstatus[item]['time'] = datetime.fromtimestamp(
|
item['time']).strftime(
|
||||||
ptpstatus[item]['time']).strftime(
|
'%Y-%m-%dT%H:%M:%S%fZ')
|
||||||
|
elif isinstance(ptpstatus, dict):
|
||||||
|
LOG.debug("Format timestamps for response with instance tags")
|
||||||
|
try:
|
||||||
|
for item in ptpstatus:
|
||||||
|
timestamp = ptpstatus[item].get('time', None)
|
||||||
|
# Change time from float to ascii format
|
||||||
|
ptpstatus[item]['time'] = datetime.fromtimestamp(
|
||||||
|
ptpstatus[item]['time']).strftime(
|
||||||
|
'%Y-%m-%dT%H:%M:%S%fZ')
|
||||||
|
except (TypeError, AttributeError):
|
||||||
|
LOG.debug("Format timestamp for single notification")
|
||||||
|
timestamp = ptpstatus.get('time', None)
|
||||||
|
ptpstatus['time'] = datetime.fromtimestamp(
|
||||||
|
ptpstatus['time']).strftime(
|
||||||
'%Y-%m-%dT%H:%M:%S%fZ')
|
'%Y-%m-%dT%H:%M:%S%fZ')
|
||||||
|
|
||||||
nodes[default_node_name] = ptpstatus
|
nodes[default_node_name] = ptpstatus
|
||||||
|
|
||||||
subscription_orm = SubscriptionOrm(**subscription_dto.to_orm())
|
subscription_orm = SubscriptionOrm(**subscription_dto.to_orm())
|
||||||
subscription_orm.InitialDeliveryTimestamp = timestamp
|
subscription_orm.InitialDeliveryTimestamp = timestamp
|
||||||
|
LOG.debug("Setting initial delivery timestamp %s", timestamp)
|
||||||
entry = self.subscription_repo.add(subscription_orm)
|
entry = self.subscription_repo.add(subscription_orm)
|
||||||
|
|
||||||
# Delivery the initial notification of ptp status
|
# Delivery the initial notification of ptp status
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
@ -36,7 +36,7 @@ class ResourceAddressController(object):
|
|||||||
self.resource_address)
|
self.resource_address)
|
||||||
if nodename == constants.WILDCARD_CURRENT_NODE:
|
if nodename == constants.WILDCARD_CURRENT_NODE:
|
||||||
nodename = notification_control.get_residing_nodename()
|
nodename = notification_control.get_residing_nodename()
|
||||||
LOG.debug('Nodename to query: %s' % nodename)
|
LOG.info('Nodename to query: %s' % nodename)
|
||||||
if not notification_control.in_service_nodenames(nodename):
|
if not notification_control.in_service_nodenames(nodename):
|
||||||
LOG.warning("Node {} is not available".format(nodename))
|
LOG.warning("Node {} is not available".format(nodename))
|
||||||
raise client_exception.NodeNotAvailable(nodename)
|
raise client_exception.NodeNotAvailable(nodename)
|
||||||
@ -46,11 +46,23 @@ class ResourceAddressController(object):
|
|||||||
ptpservice = PtpService(notification_control)
|
ptpservice = PtpService(notification_control)
|
||||||
ptpstatus = ptpservice.query(nodename,
|
ptpstatus = ptpservice.query(nodename,
|
||||||
self.resource_address, optional)
|
self.resource_address, optional)
|
||||||
LOG.debug('Got ptpstatus: %s' % ptpstatus)
|
LOG.info('Received ptpstatus: %s', ptpstatus)
|
||||||
for item in ptpstatus:
|
if isinstance(ptpstatus, dict):
|
||||||
ptpstatus[item]['time'] = datetime.fromtimestamp(
|
try:
|
||||||
ptpstatus[item]['time']).strftime(
|
for item in ptpstatus:
|
||||||
'%Y-%m-%dT%H:%M:%S%fZ')
|
ptpstatus[item]['time'] = datetime.fromtimestamp(
|
||||||
|
ptpstatus[item]['time']).strftime(
|
||||||
|
'%Y-%m-%dT%H:%M:%S%fZ')
|
||||||
|
except TypeError:
|
||||||
|
# ptpstatus does not have instance tags
|
||||||
|
ptpstatus['time'] = datetime.fromtimestamp(
|
||||||
|
ptpstatus['time']).strftime(
|
||||||
|
'%Y-%m-%dT%H:%M:%S%fZ')
|
||||||
|
elif isinstance(ptpstatus, list):
|
||||||
|
for item in ptpstatus:
|
||||||
|
item['time'] = datetime.fromtimestamp(
|
||||||
|
item['time']).strftime(
|
||||||
|
'%Y-%m-%dT%H:%M:%S%fZ')
|
||||||
return ptpstatus
|
return ptpstatus
|
||||||
except client_exception.NodeNotAvailable as ex:
|
except client_exception.NodeNotAvailable as ex:
|
||||||
LOG.warning("{0}".format(str(ex)))
|
LOG.warning("{0}".format(str(ex)))
|
||||||
@ -66,7 +78,8 @@ class ResourceAddressController(object):
|
|||||||
# raise ex
|
# raise ex
|
||||||
abort(400)
|
abort(400)
|
||||||
except TypeError as ex:
|
except TypeError as ex:
|
||||||
LOG.error("Resource {0} not found on {1}".format(self.resource_address, nodename))
|
LOG.error("Resource {0} not found on {1}, error: {2}".format(
|
||||||
|
self.resource_address, nodename, ex))
|
||||||
abort(404)
|
abort(404)
|
||||||
except HTTPServerError as ex:
|
except HTTPServerError as ex:
|
||||||
LOG.error("Server side error:{0},{1}".format(type(ex), str(ex)))
|
LOG.error("Server side error:{0},{1}".format(type(ex), str(ex)))
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# Copyright (c) 2021-2023 Wind River Systems, Inc.
|
# Copyright (c) 2021-2024 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
@ -16,7 +16,7 @@ import logging
|
|||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
from trackingfunctionsdk.common.helpers import log_helper
|
from trackingfunctionsdk.common.helpers import log_helper, constants
|
||||||
|
|
||||||
log_helper.config_logger(LOG)
|
log_helper.config_logger(LOG)
|
||||||
|
|
||||||
@ -32,7 +32,8 @@ class PtpEventProducer(object):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def QueryStatus(self, ctx, **rpc_kwargs):
|
def QueryStatus(self, ctx, **rpc_kwargs):
|
||||||
LOG.debug("PtpEventProducer QueryStatus called %s" % rpc_kwargs)
|
# This is where the "GET" commands run through
|
||||||
|
LOG.info("PtpEventProducer QueryStatus called %s" % rpc_kwargs)
|
||||||
if self.handler:
|
if self.handler:
|
||||||
return self.handler.query_status(**rpc_kwargs)
|
return self.handler.query_status(**rpc_kwargs)
|
||||||
else:
|
else:
|
||||||
@ -85,16 +86,16 @@ class PtpEventProducer(object):
|
|||||||
try:
|
try:
|
||||||
self.local_broker_client.cast(
|
self.local_broker_client.cast(
|
||||||
topic, 'NotifyStatus', notification=ptpstatus)
|
topic, 'NotifyStatus', notification=ptpstatus)
|
||||||
LOG.debug("Published ptp status:{0}@Topic:{1}".format(ptpstatus, topic))
|
LOG.debug("Published ptp status local:{0}@Topic:{1}".format(ptpstatus, topic))
|
||||||
break
|
break
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.warning("Failed to publish ptp status:{0}@Topic:{1} due to: {2}".format(
|
LOG.warning("Failed to publish ptp status local:{0}@Topic:{1} due to: {2}".format(
|
||||||
ptpstatus, topic, str(ex)))
|
ptpstatus, topic, str(ex)))
|
||||||
retry = retry - 1
|
retry = retry - 1
|
||||||
isretrystopped = False if retry > 0 else True
|
isretrystopped = False if retry > 0 else True
|
||||||
|
|
||||||
if isretrystopped:
|
if isretrystopped:
|
||||||
LOG.error("Failed to publish ptp status:{0}@Topic:{1}".format(
|
LOG.error("Failed to publish ptp status local:{0}@Topic:{1}".format(
|
||||||
ptpstatus, topic))
|
ptpstatus, topic))
|
||||||
return isretrystopped == False
|
return isretrystopped == False
|
||||||
|
|
||||||
@ -108,16 +109,16 @@ class PtpEventProducer(object):
|
|||||||
try:
|
try:
|
||||||
self.registration_broker_client.cast(
|
self.registration_broker_client.cast(
|
||||||
topic_all, 'NotifyStatus', notification=ptpstatus)
|
topic_all, 'NotifyStatus', notification=ptpstatus)
|
||||||
LOG.debug("Published ptp status:{0}@Topic:{1}".format(ptpstatus, topic_all))
|
LOG.debug("Published ptp status all:{0}@Topic:{1}".format(ptpstatus, topic_all))
|
||||||
break
|
break
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.warning("Failed to publish ptp status:{0}@Topic:{1} due to: {2}".format(
|
LOG.warning("Failed to publish ptp status all:{0}@Topic:{1} due to: {2}".format(
|
||||||
ptpstatus, topic_all, str(ex)))
|
ptpstatus, topic_all, str(ex)))
|
||||||
retry = retry - 1
|
retry = retry - 1
|
||||||
isretrystopped = False if retry > 0 else True
|
isretrystopped = False if retry > 0 else True
|
||||||
|
|
||||||
if isretrystopped:
|
if isretrystopped:
|
||||||
LOG.error("Failed to publish ptp status:{0}@Topic:{1}".format(
|
LOG.error("Failed to publish ptp status all:{0}@Topic:{1}".format(
|
||||||
ptpstatus, topic_all))
|
ptpstatus, topic_all))
|
||||||
return isretrystopped == False
|
return isretrystopped == False
|
||||||
|
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
#
|
#
|
||||||
# Copyright (c) 2021-2023 Wind River Systems, Inc.
|
# Copyright (c) 2021-2024 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
from os import path
|
from os import path
|
||||||
|
import os
|
||||||
|
|
||||||
# phc states constants
|
# phc states constants
|
||||||
FREERUN_PHC_STATE = "Freerun"
|
FREERUN_PHC_STATE = "Freerun"
|
||||||
@ -41,6 +42,9 @@ GNSS_DPLL_1 = "DPLL1"
|
|||||||
|
|
||||||
UTC_OFFSET = "37"
|
UTC_OFFSET = "37"
|
||||||
|
|
||||||
|
# Notification formatting
|
||||||
|
NOTIFICATION_FORMAT = os.environ.get("NOTIFICATION_FORMAT", 'standard')
|
||||||
|
|
||||||
if path.exists('/ptp/linuxptp/ptpinstance'):
|
if path.exists('/ptp/linuxptp/ptpinstance'):
|
||||||
LINUXPTP_CONFIG_PATH = '/ptp/linuxptp/ptpinstance/'
|
LINUXPTP_CONFIG_PATH = '/ptp/linuxptp/ptpinstance/'
|
||||||
elif path.exists('/ptp/ptpinstance'):
|
elif path.exists('/ptp/ptpinstance'):
|
||||||
|
@ -15,6 +15,7 @@ from glob import glob
|
|||||||
from trackingfunctionsdk.common.helpers import log_helper
|
from trackingfunctionsdk.common.helpers import log_helper
|
||||||
from trackingfunctionsdk.common.helpers import constants
|
from trackingfunctionsdk.common.helpers import constants
|
||||||
from trackingfunctionsdk.model.dto.osclockstate import OsClockState
|
from trackingfunctionsdk.model.dto.osclockstate import OsClockState
|
||||||
|
from trackingfunctionsdk.common.helpers import ptpsync as utils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
log_helper.config_logger(LOG)
|
log_helper.config_logger(LOG)
|
||||||
@ -304,12 +305,17 @@ class OsClockMonitor:
|
|||||||
|
|
||||||
def set_os_clock_state(self):
|
def set_os_clock_state(self):
|
||||||
offset_int = int(self.offset)
|
offset_int = int(self.offset)
|
||||||
|
_, _, phc2sys, _ = \
|
||||||
|
utils.check_critical_resources('', self.phc2sys_instance)
|
||||||
if offset_int > self.phc2sys_tolerance_high or \
|
if offset_int > self.phc2sys_tolerance_high or \
|
||||||
offset_int < self.phc2sys_tolerance_low:
|
offset_int < self.phc2sys_tolerance_low:
|
||||||
LOG.warning("PHC2SYS offset is outside of tolerance")
|
LOG.warning("PHC2SYS offset is outside of tolerance")
|
||||||
self._state = OsClockState.Freerun
|
self._state = OsClockState.Freerun
|
||||||
|
elif not phc2sys:
|
||||||
|
LOG.warning("Phc2sys instance %s is not running", self.phc2sys_instance)
|
||||||
|
self._state = OsClockState.Freerun
|
||||||
else:
|
else:
|
||||||
LOG.info("PHC2SYS offset is within tolerance")
|
LOG.info("PHC2SYS offset is within tolerance: %s", offset_int)
|
||||||
self._state = OsClockState.Locked
|
self._state = OsClockState.Locked
|
||||||
|
|
||||||
# Perform an extra check for HA Phc2sys to ensure we have a source interface
|
# Perform an extra check for HA Phc2sys to ensure we have a source interface
|
||||||
@ -341,7 +347,7 @@ class OsClockMonitor:
|
|||||||
self._state = constants.HOLDOVER_PHC_STATE
|
self._state = constants.HOLDOVER_PHC_STATE
|
||||||
elif previous_sync_state == constants.HOLDOVER_PHC_STATE and \
|
elif previous_sync_state == constants.HOLDOVER_PHC_STATE and \
|
||||||
time_in_holdover < max_holdover_time:
|
time_in_holdover < max_holdover_time:
|
||||||
LOG.debug("OS Clock: Time in holdover is %s "
|
LOG.info("OS Clock: Time in holdover is %s "
|
||||||
"Max time in holdover is %s"
|
"Max time in holdover is %s"
|
||||||
% (time_in_holdover, max_holdover_time))
|
% (time_in_holdover, max_holdover_time))
|
||||||
self._state = constants.HOLDOVER_PHC_STATE
|
self._state = constants.HOLDOVER_PHC_STATE
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# Copyright (c) 2021-2023 Wind River Systems, Inc.
|
# Copyright (c) 2021-2024 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
@ -83,7 +83,7 @@ class PtpMonitor:
|
|||||||
|
|
||||||
def set_ptp_clock_class(self):
|
def set_ptp_clock_class(self):
|
||||||
try:
|
try:
|
||||||
clock_class = self.pmc_query_results['clockClass']
|
clock_class = self.pmc_query_results['gm.ClockClass']
|
||||||
# Reset retry counter upon getting clock class
|
# Reset retry counter upon getting clock class
|
||||||
self._clock_class_retry = 3
|
self._clock_class_retry = 3
|
||||||
except KeyError:
|
except KeyError:
|
||||||
@ -132,11 +132,12 @@ class PtpMonitor:
|
|||||||
# max holdover time is calculated to be in a 'safety' zone
|
# max holdover time is calculated to be in a 'safety' zone
|
||||||
max_holdover_time = (self.holdover_time - self.freq * 2)
|
max_holdover_time = (self.holdover_time - self.freq * 2)
|
||||||
|
|
||||||
pmc, ptp4l, phc2sys, ptp4lconf = \
|
pmc, ptp4l, _, ptp4lconf = \
|
||||||
utils.check_critical_resources(self.ptp4l_service_name,
|
utils.check_critical_resources(self.ptp4l_service_name,
|
||||||
self.phc2sys_service_name)
|
self.phc2sys_service_name)
|
||||||
# run pmc command if preconditions met
|
# run pmc command if preconditions met
|
||||||
if pmc and ptp4l and phc2sys and ptp4lconf:
|
# Removed check for phc2sys, ptp4l status should not depend on it
|
||||||
|
if pmc and ptp4l and ptp4lconf:
|
||||||
self.pmc_query_results, total_ptp_keywords, port_count = \
|
self.pmc_query_results, total_ptp_keywords, port_count = \
|
||||||
self.ptpsync()
|
self.ptpsync()
|
||||||
try:
|
try:
|
||||||
@ -147,12 +148,11 @@ class PtpMonitor:
|
|||||||
sync_state = previous_sync_state
|
sync_state = previous_sync_state
|
||||||
else:
|
else:
|
||||||
LOG.warning("Missing critical resource: "
|
LOG.warning("Missing critical resource: "
|
||||||
"PMC %s PTP4L %s PHC2SYS %s PTP4LCONF %s"
|
"PMC %s PTP4L %s PTP4LCONF %s"
|
||||||
% (pmc, ptp4l, phc2sys, ptp4lconf))
|
% (pmc, ptp4l, ptp4lconf))
|
||||||
sync_state = PtpState.Freerun
|
sync_state = PtpState.Freerun
|
||||||
# determine if transition into holdover mode (cannot be in holdover if
|
# determine if transition into holdover mode
|
||||||
# system clock is not in sync)
|
if sync_state == PtpState.Freerun:
|
||||||
if sync_state == PtpState.Freerun and phc2sys:
|
|
||||||
if previous_sync_state in [constants.UNKNOWN_PHC_STATE,
|
if previous_sync_state in [constants.UNKNOWN_PHC_STATE,
|
||||||
PtpState.Freerun]:
|
PtpState.Freerun]:
|
||||||
sync_state = PtpState.Freerun
|
sync_state = PtpState.Freerun
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#! /usr/bin/python3
|
#! /usr/bin/python3
|
||||||
#
|
#
|
||||||
# Copyright (c) 2021-2023 Wind River Systems, Inc.
|
# Copyright (c) 2021-2024 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
@ -114,7 +114,12 @@ def parse_resource_address(resource_address):
|
|||||||
return clusterName, nodeName, resource_path
|
return clusterName, nodeName, resource_path
|
||||||
|
|
||||||
|
|
||||||
def format_resource_address(node_name, resource):
|
def format_resource_address(node_name, resource, instance=None):
|
||||||
# Return a resource_address
|
# Return a resource_address
|
||||||
resource_address = '/./' + node_name + resource
|
resource_address = '/./' + node_name
|
||||||
|
if instance:
|
||||||
|
resource_address = resource_address + '/' + instance + resource
|
||||||
|
else:
|
||||||
|
resource_address = resource_address + resource
|
||||||
|
LOG.debug("format_resource_address %s" % resource_address)
|
||||||
return resource_address
|
return resource_address
|
||||||
|
@ -117,7 +117,12 @@ class PtpWatcherDefault:
|
|||||||
return lastStatus
|
return lastStatus
|
||||||
|
|
||||||
def query_status(self, **rpc_kwargs):
|
def query_status(self, **rpc_kwargs):
|
||||||
|
# Client PULL status requests come through here
|
||||||
|
# Dict is used for legacy notification format
|
||||||
lastStatus = {}
|
lastStatus = {}
|
||||||
|
# List is used for standard notification format
|
||||||
|
newStatus = []
|
||||||
|
|
||||||
resource_address = rpc_kwargs.get('ResourceAddress', None)
|
resource_address = rpc_kwargs.get('ResourceAddress', None)
|
||||||
optional = rpc_kwargs.get('optional', None)
|
optional = rpc_kwargs.get('optional', None)
|
||||||
if resource_address:
|
if resource_address:
|
||||||
@ -137,8 +142,9 @@ class PtpWatcherDefault:
|
|||||||
constants.SOURCE_SYNC_GNSS_SYNC_STATUS,
|
constants.SOURCE_SYNC_GNSS_SYNC_STATUS,
|
||||||
last_event_time,
|
last_event_time,
|
||||||
utils.format_resource_address(nodename,
|
utils.format_resource_address(nodename,
|
||||||
constants.SOURCE_SYNC_GNSS_SYNC_STATUS),
|
constants.SOURCE_SYNC_GNSS_SYNC_STATUS, optional),
|
||||||
sync_state)
|
sync_state)
|
||||||
|
newStatus.append(lastStatus[optional])
|
||||||
elif not optional:
|
elif not optional:
|
||||||
for config in self.daemon_context['GNSS_INSTANCES']:
|
for config in self.daemon_context['GNSS_INSTANCES']:
|
||||||
sync_state = \
|
sync_state = \
|
||||||
@ -151,8 +157,9 @@ class PtpWatcherDefault:
|
|||||||
constants.SOURCE_SYNC_GNSS_SYNC_STATUS,
|
constants.SOURCE_SYNC_GNSS_SYNC_STATUS,
|
||||||
last_event_time,
|
last_event_time,
|
||||||
utils.format_resource_address(nodename,
|
utils.format_resource_address(nodename,
|
||||||
constants.SOURCE_SYNC_GNSS_SYNC_STATUS),
|
constants.SOURCE_SYNC_GNSS_SYNC_STATUS, config),
|
||||||
sync_state)
|
sync_state)
|
||||||
|
newStatus.append(lastStatus[config])
|
||||||
else:
|
else:
|
||||||
lastStatus = None
|
lastStatus = None
|
||||||
self.watcher.gnsstracker_context_lock.release()
|
self.watcher.gnsstracker_context_lock.release()
|
||||||
@ -170,8 +177,9 @@ class PtpWatcherDefault:
|
|||||||
constants.SOURCE_SYNC_PTP_CLOCK_CLASS,
|
constants.SOURCE_SYNC_PTP_CLOCK_CLASS,
|
||||||
last_clock_class_event_time,
|
last_clock_class_event_time,
|
||||||
utils.format_resource_address(nodename,
|
utils.format_resource_address(nodename,
|
||||||
constants.SOURCE_SYNC_PTP_CLOCK_CLASS),
|
constants.SOURCE_SYNC_PTP_CLOCK_CLASS, optional),
|
||||||
clock_class, constants.VALUE_TYPE_METRIC)
|
clock_class, constants.VALUE_TYPE_METRIC)
|
||||||
|
newStatus.append(lastStatus[optional])
|
||||||
elif not optional:
|
elif not optional:
|
||||||
for config in self.daemon_context['PTP4L_INSTANCES']:
|
for config in self.daemon_context['PTP4L_INSTANCES']:
|
||||||
clock_class = \
|
clock_class = \
|
||||||
@ -185,8 +193,9 @@ class PtpWatcherDefault:
|
|||||||
constants.SOURCE_SYNC_PTP_CLOCK_CLASS,
|
constants.SOURCE_SYNC_PTP_CLOCK_CLASS,
|
||||||
last_clock_class_event_time,
|
last_clock_class_event_time,
|
||||||
utils.format_resource_address(nodename,
|
utils.format_resource_address(nodename,
|
||||||
constants.SOURCE_SYNC_PTP_CLOCK_CLASS),
|
constants.SOURCE_SYNC_PTP_CLOCK_CLASS, config),
|
||||||
clock_class, constants.VALUE_TYPE_METRIC)
|
clock_class, constants.VALUE_TYPE_METRIC)
|
||||||
|
newStatus.append(lastStatus[config])
|
||||||
else:
|
else:
|
||||||
lastStatus = None
|
lastStatus = None
|
||||||
self.watcher.ptptracker_context_lock.release()
|
self.watcher.ptptracker_context_lock.release()
|
||||||
@ -204,8 +213,9 @@ class PtpWatcherDefault:
|
|||||||
constants.SOURCE_SYNC_PTP_LOCK_STATE,
|
constants.SOURCE_SYNC_PTP_LOCK_STATE,
|
||||||
last_event_time,
|
last_event_time,
|
||||||
utils.format_resource_address(nodename,
|
utils.format_resource_address(nodename,
|
||||||
constants.SOURCE_SYNC_PTP_LOCK_STATE),
|
constants.SOURCE_SYNC_PTP_LOCK_STATE, optional),
|
||||||
sync_state)
|
sync_state)
|
||||||
|
newStatus.append(lastStatus[optional])
|
||||||
elif not optional:
|
elif not optional:
|
||||||
for config in self.daemon_context['PTP4L_INSTANCES']:
|
for config in self.daemon_context['PTP4L_INSTANCES']:
|
||||||
sync_state = \
|
sync_state = \
|
||||||
@ -218,8 +228,9 @@ class PtpWatcherDefault:
|
|||||||
constants.SOURCE_SYNC_PTP_LOCK_STATE,
|
constants.SOURCE_SYNC_PTP_LOCK_STATE,
|
||||||
last_event_time,
|
last_event_time,
|
||||||
utils.format_resource_address(nodename,
|
utils.format_resource_address(nodename,
|
||||||
constants.SOURCE_SYNC_PTP_LOCK_STATE),
|
constants.SOURCE_SYNC_PTP_LOCK_STATE, config),
|
||||||
sync_state)
|
sync_state)
|
||||||
|
newStatus.append(lastStatus[config])
|
||||||
else:
|
else:
|
||||||
lastStatus = None
|
lastStatus = None
|
||||||
self.watcher.ptptracker_context_lock.release()
|
self.watcher.ptptracker_context_lock.release()
|
||||||
@ -239,6 +250,7 @@ class PtpWatcherDefault:
|
|||||||
utils.format_resource_address(nodename,
|
utils.format_resource_address(nodename,
|
||||||
constants.SOURCE_SYNC_OS_CLOCK),
|
constants.SOURCE_SYNC_OS_CLOCK),
|
||||||
sync_state)
|
sync_state)
|
||||||
|
newStatus.append(lastStatus['os_clock_status'])
|
||||||
if resource_path == constants.SOURCE_SYNC_SYNC_STATE or \
|
if resource_path == constants.SOURCE_SYNC_SYNC_STATE or \
|
||||||
resource_path == constants.SOURCE_SYNC_ALL:
|
resource_path == constants.SOURCE_SYNC_ALL:
|
||||||
self.watcher.overalltracker_context_lock.acquire()
|
self.watcher.overalltracker_context_lock.acquire()
|
||||||
@ -253,9 +265,23 @@ class PtpWatcherDefault:
|
|||||||
utils.format_resource_address(nodename,
|
utils.format_resource_address(nodename,
|
||||||
constants.SOURCE_SYNC_SYNC_STATE),
|
constants.SOURCE_SYNC_SYNC_STATE),
|
||||||
sync_state)
|
sync_state)
|
||||||
LOG.debug("query_status: {}".format(lastStatus))
|
if resource_path == constants.SOURCE_SYNC_ALL:
|
||||||
|
newStatus.append(lastStatus['overall_sync_status'])
|
||||||
|
else:
|
||||||
|
# Special handling for overall_sync_status
|
||||||
|
# There will only ever be a single response from
|
||||||
|
# SOURCE_SYNC_SYNC_STATE.
|
||||||
|
# Return a dict rather than a list
|
||||||
|
newStatus = lastStatus['overall_sync_status']
|
||||||
|
|
||||||
|
|
||||||
|
if constants.NOTIFICATION_FORMAT == 'standard':
|
||||||
|
LOG.info("PULL status returning: %s", newStatus)
|
||||||
|
return newStatus
|
||||||
|
else:
|
||||||
|
LOG.info("PULL status returning: {}".format(lastStatus))
|
||||||
|
return lastStatus
|
||||||
|
|
||||||
return lastStatus
|
|
||||||
|
|
||||||
def trigger_delivery(self, **rpc_kwargs):
|
def trigger_delivery(self, **rpc_kwargs):
|
||||||
self.watcher.forced_publishing = True
|
self.watcher.forced_publishing = True
|
||||||
@ -383,6 +409,16 @@ class PtpWatcherDefault:
|
|||||||
notificationservice_health = HealthServer()
|
notificationservice_health = HealthServer()
|
||||||
notificationservice_health.run()
|
notificationservice_health.run()
|
||||||
|
|
||||||
|
# Need to give the notificationclient sidecar pods
|
||||||
|
# a few seconds to re-connect to the newly started
|
||||||
|
# RabbitMQ. If we don't wait here, the initial
|
||||||
|
# status delivieries can be sent before the clients
|
||||||
|
# are connected and they will never receive the
|
||||||
|
# notification
|
||||||
|
# This approach can probably be improved by
|
||||||
|
# checking the RabbitMQ endpoint
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
# announce the location
|
# announce the location
|
||||||
forced = self.forced_publishing
|
forced = self.forced_publishing
|
||||||
@ -526,6 +562,7 @@ class PtpWatcherDefault:
|
|||||||
last_event_time = self.osclocktracker_context.get('last_event_time',
|
last_event_time = self.osclocktracker_context.get('last_event_time',
|
||||||
time.time())
|
time.time())
|
||||||
lastStatus = {}
|
lastStatus = {}
|
||||||
|
newStatus = []
|
||||||
|
|
||||||
new_event, sync_state, new_event_time = self.__get_os_clock_status(
|
new_event, sync_state, new_event_time = self.__get_os_clock_status(
|
||||||
holdover_time, freq, sync_state, last_event_time)
|
holdover_time, freq, sync_state, last_event_time)
|
||||||
@ -559,13 +596,23 @@ class PtpWatcherDefault:
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.ptpeventproducer.publish_status(
|
|
||||||
lastStatus, constants.SOURCE_SYNC_OS_CLOCK)
|
newStatus.append(lastStatus['os_clock_status'])
|
||||||
self.ptpeventproducer.publish_status(
|
|
||||||
lastStatus, constants.SOURCE_SYNC_ALL)
|
if constants.NOTIFICATION_FORMAT == 'standard':
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
newStatus, constants.SOURCE_SYNC_OS_CLOCK)
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
newStatus, constants.SOURCE_SYNC_ALL)
|
||||||
|
else:
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
lastStatus, constants.SOURCE_SYNC_OS_CLOCK)
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
lastStatus, constants.SOURCE_SYNC_ALL)
|
||||||
|
|
||||||
def __publish_overall_sync_status(self, forced=False):
|
def __publish_overall_sync_status(self, forced=False):
|
||||||
lastStatus = {}
|
lastStatus = {}
|
||||||
|
newStatus = []
|
||||||
holdover_time = float(self.overalltracker_context['holdover_seconds'])
|
holdover_time = float(self.overalltracker_context['holdover_seconds'])
|
||||||
freq = float(self.overalltracker_context['poll_freq_seconds'])
|
freq = float(self.overalltracker_context['poll_freq_seconds'])
|
||||||
sync_state = self.overalltracker_context.get('sync_state', 'Unknown')
|
sync_state = self.overalltracker_context.get('sync_state', 'Unknown')
|
||||||
@ -605,14 +652,24 @@ class PtpWatcherDefault:
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.ptpeventproducer.publish_status(
|
newStatus.append(lastStatus['overall_sync_status'])
|
||||||
lastStatus, constants.SOURCE_SYNC_SYNC_STATE)
|
if constants.NOTIFICATION_FORMAT == 'standard':
|
||||||
self.ptpeventproducer.publish_status(
|
self.ptpeventproducer.publish_status(
|
||||||
lastStatus, constants.SOURCE_SYNC_ALL)
|
newStatus, constants.SOURCE_SYNC_SYNC_STATE)
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
newStatus, constants.SOURCE_SYNC_ALL)
|
||||||
|
else:
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
lastStatus, constants.SOURCE_SYNC_SYNC_STATE)
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
lastStatus, constants.SOURCE_SYNC_ALL)
|
||||||
|
|
||||||
def __publish_gnss_status(self, forced=False):
|
def __publish_gnss_status(self, forced=False):
|
||||||
lastStatus = {}
|
|
||||||
for gnss in self.observer_list:
|
for gnss in self.observer_list:
|
||||||
|
# Ensure that status structs are cleared between each iteration
|
||||||
|
lastStatus = {}
|
||||||
|
newStatus = []
|
||||||
holdover_time = float(
|
holdover_time = float(
|
||||||
self.gnsstracker_context[
|
self.gnsstracker_context[
|
||||||
gnss.ts2phc_service_name]['holdover_seconds'])
|
gnss.ts2phc_service_name]['holdover_seconds'])
|
||||||
@ -643,7 +700,9 @@ class PtpWatcherDefault:
|
|||||||
|
|
||||||
# publish new event in API version v2 format
|
# publish new event in API version v2 format
|
||||||
resource_address = utils.format_resource_address(
|
resource_address = utils.format_resource_address(
|
||||||
self.node_name, constants.SOURCE_SYNC_GNSS_SYNC_STATUS)
|
self.node_name,
|
||||||
|
constants.SOURCE_SYNC_GNSS_SYNC_STATUS,
|
||||||
|
gnss.ts2phc_service_name)
|
||||||
lastStatus[gnss.ts2phc_service_name] = {
|
lastStatus[gnss.ts2phc_service_name] = {
|
||||||
'id': uuidutils.generate_uuid(),
|
'id': uuidutils.generate_uuid(),
|
||||||
'specversion': constants.SPEC_VERSION,
|
'specversion': constants.SPEC_VERSION,
|
||||||
@ -663,15 +722,27 @@ class PtpWatcherDefault:
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.ptpeventproducer.publish_status(
|
newStatus.append(lastStatus[gnss.ts2phc_service_name])
|
||||||
lastStatus, constants.SOURCE_SYNC_GNSS_SYNC_STATUS)
|
if constants.NOTIFICATION_FORMAT == 'standard':
|
||||||
self.ptpeventproducer.publish_status(
|
self.ptpeventproducer.publish_status(
|
||||||
lastStatus, constants.SOURCE_SYNC_ALL)
|
newStatus, constants.SOURCE_SYNC_GNSS_SYNC_STATUS)
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
newStatus, constants.SOURCE_SYNC_ALL)
|
||||||
|
else:
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
newStatus, constants.SOURCE_SYNC_GNSS_SYNC_STATUS)
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
newStatus, constants.SOURCE_SYNC_ALL)
|
||||||
|
|
||||||
def __publish_ptpstatus(self, forced=False):
|
def __publish_ptpstatus(self, forced=False):
|
||||||
lastStatus = {}
|
|
||||||
lastClockClassStatus = {}
|
|
||||||
for ptp_monitor in self.ptp_monitor_list:
|
for ptp_monitor in self.ptp_monitor_list:
|
||||||
|
# Ensure that status structs are cleared between each iteration
|
||||||
|
newStatus = []
|
||||||
|
newClockClassStatus = []
|
||||||
|
lastStatus = {}
|
||||||
|
lastClockClassStatus = {}
|
||||||
|
|
||||||
holdover_time = float(self.ptptracker_context[
|
holdover_time = float(self.ptptracker_context[
|
||||||
ptp_monitor.ptp4l_service_name]['holdover_seconds'])
|
ptp_monitor.ptp4l_service_name]['holdover_seconds'])
|
||||||
freq = float(self.ptptracker_context[
|
freq = float(self.ptptracker_context[
|
||||||
@ -717,7 +788,9 @@ class PtpWatcherDefault:
|
|||||||
lastStatus = {}
|
lastStatus = {}
|
||||||
# publish new event in API version v2 format
|
# publish new event in API version v2 format
|
||||||
resource_address = utils.format_resource_address(
|
resource_address = utils.format_resource_address(
|
||||||
self.node_name, constants.SOURCE_SYNC_PTP_LOCK_STATE)
|
self.node_name,
|
||||||
|
constants.SOURCE_SYNC_PTP_LOCK_STATE,
|
||||||
|
ptp_monitor.ptp4l_service_name)
|
||||||
lastStatus[ptp_monitor.ptp4l_service_name] = {
|
lastStatus[ptp_monitor.ptp4l_service_name] = {
|
||||||
'id': uuidutils.generate_uuid(),
|
'id': uuidutils.generate_uuid(),
|
||||||
'specversion': constants.SPEC_VERSION,
|
'specversion': constants.SPEC_VERSION,
|
||||||
@ -737,10 +810,18 @@ class PtpWatcherDefault:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.ptptracker_context_lock.release()
|
self.ptptracker_context_lock.release()
|
||||||
self.ptpeventproducer.publish_status(
|
newStatus.append(lastStatus[ptp_monitor.ptp4l_service_name])
|
||||||
lastStatus, constants.SOURCE_SYNC_PTP_LOCK_STATE)
|
|
||||||
self.ptpeventproducer.publish_status(
|
if constants.NOTIFICATION_FORMAT == 'standard':
|
||||||
lastStatus, constants.SOURCE_SYNC_ALL)
|
self.ptpeventproducer.publish_status(
|
||||||
|
newStatus, constants.SOURCE_SYNC_PTP_LOCK_STATE)
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
newStatus, constants.SOURCE_SYNC_ALL)
|
||||||
|
else:
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
lastStatus, constants.SOURCE_SYNC_PTP_LOCK_STATE)
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
lastStatus, constants.SOURCE_SYNC_ALL)
|
||||||
|
|
||||||
if new_clock_class_event or forced:
|
if new_clock_class_event or forced:
|
||||||
# update context
|
# update context
|
||||||
@ -752,7 +833,9 @@ class PtpWatcherDefault:
|
|||||||
= clock_class_event_time
|
= clock_class_event_time
|
||||||
|
|
||||||
resource_address = utils.format_resource_address(
|
resource_address = utils.format_resource_address(
|
||||||
self.node_name, constants.SOURCE_SYNC_PTP_CLOCK_CLASS)
|
self.node_name,
|
||||||
|
constants.SOURCE_SYNC_PTP_CLOCK_CLASS,
|
||||||
|
ptp_monitor.ptp4l_service_name)
|
||||||
|
|
||||||
lastClockClassStatus[ptp_monitor.ptp4l_service_name] = {
|
lastClockClassStatus[ptp_monitor.ptp4l_service_name] = {
|
||||||
'id': uuidutils.generate_uuid(),
|
'id': uuidutils.generate_uuid(),
|
||||||
@ -772,14 +855,23 @@ class PtpWatcherDefault:
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
newClockClassStatus.append(lastClockClassStatus[ptp_monitor.ptp4l_service_name])
|
||||||
self.ptptracker_context_lock.release()
|
self.ptptracker_context_lock.release()
|
||||||
LOG.info("Publishing clockClass for %s: %s"
|
LOG.info("Publishing clockClass for %s: %s"
|
||||||
% (ptp_monitor.ptp4l_service_name, clock_class))
|
% (ptp_monitor.ptp4l_service_name, clock_class))
|
||||||
self.ptpeventproducer.publish_status(
|
|
||||||
lastClockClassStatus,
|
if constants.NOTIFICATION_FORMAT == 'standard':
|
||||||
constants.SOURCE_SYNC_PTP_CLOCK_CLASS)
|
self.ptpeventproducer.publish_status(
|
||||||
self.ptpeventproducer.publish_status(lastClockClassStatus,
|
newClockClassStatus,
|
||||||
constants.SOURCE_SYNC_ALL)
|
constants.SOURCE_SYNC_PTP_CLOCK_CLASS)
|
||||||
|
self.ptpeventproducer.publish_status(newClockClassStatus,
|
||||||
|
constants.SOURCE_SYNC_ALL)
|
||||||
|
else:
|
||||||
|
self.ptpeventproducer.publish_status(
|
||||||
|
lastClockClassStatus,
|
||||||
|
constants.SOURCE_SYNC_PTP_CLOCK_CLASS)
|
||||||
|
self.ptpeventproducer.publish_status(lastClockClassStatus,
|
||||||
|
constants.SOURCE_SYNC_ALL)
|
||||||
|
|
||||||
|
|
||||||
class DaemonControl(object):
|
class DaemonControl(object):
|
||||||
|
@ -85,7 +85,9 @@ class OsClockMonitorTests(unittest.TestCase):
|
|||||||
self.clockmon.get_os_clock_offset()
|
self.clockmon.get_os_clock_offset()
|
||||||
assert self.clockmon.offset == '37000000015'
|
assert self.clockmon.offset == '37000000015'
|
||||||
|
|
||||||
def test_set_os_closck_state(self):
|
@mock.patch('trackingfunctionsdk.common.helpers.ptpsync.check_critical_resources',
|
||||||
|
side_effect=[b'True'])
|
||||||
|
def test_set_os_clock_state(self, critical_patched):
|
||||||
self.clockmon = OsClockMonitor(phc2sys_config=phc2sys_test_config, init=False)
|
self.clockmon = OsClockMonitor(phc2sys_config=phc2sys_test_config, init=False)
|
||||||
self.clockmon.offset = '37000000015'
|
self.clockmon.offset = '37000000015'
|
||||||
self.clockmon.set_os_clock_state()
|
self.clockmon.set_os_clock_state()
|
||||||
|
Loading…
Reference in New Issue
Block a user