Rename to monasca, setup for tox, removed legacy bits

Removed manual tests which are no longer valid with a modern mini-mon
Removed debian creation bits all distribution is with pypi now
Minor pep8 fixes

Change-Id: I1f2fc4d0ad6375f4c39446f9627247945066e4ad
This commit is contained in:
Tim Kuhlman 2014-07-16 13:51:48 -06:00
parent 2065cb1bad
commit e6e54c6576
34 changed files with 75 additions and 119 deletions

1
.gitignore vendored
View File

@ -1,2 +1,3 @@
.idea .idea
*.pyc *.pyc
.tox

4
.gitreview Normal file
View File

@ -0,0 +1,4 @@
[gerrit]
host=review.openstack.org
port=29418
project=stackforge/monasca-notification.git

View File

@ -1,4 +1,4 @@
Mon-notification Style Commandments Monasca-notification Style Commandments
=============================================== ===============================================
Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/

View File

@ -51,7 +51,7 @@ sent out multiple times. To minimize this risk a number of techniques are used:
time has been set and exceeded when a new finished alarm comes in the offset is updated regardless of gaps. time has been set and exceeded when a new finished alarm comes in the offset is updated regardless of gaps.
# Operation # Operation
Yaml config file by default is in '/etc/mon/notification.yaml', a sample is in this project. Yaml config file by default is in '/etc/monasca/notification.yaml', a sample is in this project.
## Monitoring ## Monitoring
statsd is incorporated into the daemon and will send all stats to localhost on udp port 8125. In many cases the stats statsd is incorporated into the daemon and will send all stats to localhost on udp port 8125. In many cases the stats

6
debian/changelog vendored
View File

@ -1,6 +0,0 @@
mon-notification (0.0.1) precise; urgency=low
* Initial Package creation
-- Tim Kuhlman <tim.kuhlman@hp.com> Thu, 27 Feb 2014 04:12:44 -0600

1
debian/compat vendored
View File

@ -1 +0,0 @@
7

16
debian/control vendored
View File

@ -1,16 +0,0 @@
Source: mon-notification
Section: python
Priority: optional
Maintainer: HPCloud Monitoring <hpcs-mon@hp.com>
Build-Depends: debhelper (>= 7),
python (>= 2.6.6-3~),
python-setuptools
Standards-Version: 3.9.3
X-Python-Version: >= 2.6
Package: mon-notification
Architecture: all
Section: python
Depends: ${misc:Depends}, ${python:Depends}, libpython2.7, python-pkg-resources, kafka-python, python-yaml, python-mysqldb, python-kazoo, python-statsd
Description: Notification engine for monitoring.
Consumes alarms from Kafka and sends notifications appropriately.

4
debian/copyright vendored
View File

@ -1,4 +0,0 @@
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Files: *
Copyright: Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
License: Apache License, Version 2.0

4
debian/rules vendored
View File

@ -1,4 +0,0 @@
#!/usr/bin/make -f
%:
dh $@ --with python2

View File

@ -90,31 +90,31 @@ def main(argv=None):
config_file = argv[1] config_file = argv[1]
elif len(argv) > 2: elif len(argv) > 2:
print("Usage: " + argv[0] + " <config_file>") print("Usage: " + argv[0] + " <config_file>")
print("Config file defaults to /etc/mon/notification.yaml") print("Config file defaults to /etc/monasca/notification.yaml")
return 1 return 1
else: else:
config_file = '/etc/mon/notification.yaml' config_file = '/etc/monasca/notification.yaml'
config = yaml.load(open(config_file, 'r')) config = yaml.load(open(config_file, 'r'))
# Setup logging # Setup logging
logging.config.dictConfig(config['logging']) logging.config.dictConfig(config['logging'])
#Create the queues # Create the queues
alarms = multiprocessing.Queue(config['queues']['alarms_size']) alarms = multiprocessing.Queue(config['queues']['alarms_size'])
notifications = multiprocessing.Queue(config['queues']['notifications_size']) # [notification_object, ] notifications = multiprocessing.Queue(config['queues']['notifications_size']) # [notification_object, ]
sent_notifications = multiprocessing.Queue(config['queues']['sent_notifications_size']) # [notification_object, ] sent_notifications = multiprocessing.Queue(config['queues']['sent_notifications_size']) # [notification_object, ]
finished = multiprocessing.Queue(config['queues']['finished_size']) # Data is of the form (partition, offset) finished = multiprocessing.Queue(config['queues']['finished_size']) # Data is of the form (partition, offset)
#State Tracker - Used for tracking the progress of fully processed alarms and the zookeeper lock # State Tracker - Used for tracking the progress of fully processed alarms and the zookeeper lock
global tracker # Set to global for use in the cleanup function global tracker # Set to global for use in the cleanup function
tracker = ZookeeperStateTracker( tracker = ZookeeperStateTracker(
config['zookeeper']['url'], config['kafka']['alarm_topic'], finished, config['zookeeper']['max_offset_lag']) config['zookeeper']['url'], config['kafka']['alarm_topic'], finished, config['zookeeper']['max_offset_lag'])
tracker.lock(clean_exit) # Only begin if we have the processing lock tracker.lock(clean_exit) # Only begin if we have the processing lock
tracker_thread = threading.Thread(target=tracker.run) tracker_thread = threading.Thread(target=tracker.run)
## Define processors # Define processors
#KafkaConsumer # KafkaConsumer
kafka = multiprocessing.Process( kafka = multiprocessing.Process(
target=KafkaConsumer( target=KafkaConsumer(
alarms, alarms,
@ -126,7 +126,7 @@ def main(argv=None):
) )
processors.append(kafka) processors.append(kafka)
#AlarmProcessors # AlarmProcessors
alarm_processors = [] alarm_processors = []
for i in range(config['processors']['alarm']['number']): for i in range(config['processors']['alarm']['number']):
alarm_processors.append(multiprocessing.Process( alarm_processors.append(multiprocessing.Process(
@ -143,7 +143,7 @@ def main(argv=None):
) )
processors.extend(alarm_processors) processors.extend(alarm_processors)
#NotificationProcessors # NotificationProcessors
notification_processors = [] notification_processors = []
for i in range(config['processors']['notification']['number']): for i in range(config['processors']['notification']['number']):
notification_processors.append(multiprocessing.Process( notification_processors.append(multiprocessing.Process(
@ -156,7 +156,7 @@ def main(argv=None):
) )
processors.extend(notification_processors) processors.extend(notification_processors)
#SentNotificationProcessor # SentNotificationProcessor
sent_notification_processor = multiprocessing.Process( sent_notification_processor = multiprocessing.Process(
target=SentNotificationProcessor( target=SentNotificationProcessor(
sent_notifications, sent_notifications,
@ -167,7 +167,7 @@ def main(argv=None):
) )
processors.append(sent_notification_processor) processors.append(sent_notification_processor)
## Start # Start
try: try:
log.info('Starting processes') log.info('Starting processes')
for process in processors: for process in processors:

View File

@ -12,5 +12,3 @@
# implied. # implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from base import BaseProcessor

View File

@ -20,9 +20,9 @@ import MySQLdb
import statsd import statsd
import time import time
from mon_notification.notification import Notification from monasca_notification.notification import Notification
from mon_notification.notification_exceptions import AlarmFormatError from monasca_notification.notification_exceptions import AlarmFormatError
from mon_notification.processors import BaseProcessor from monasca_notification.processors.base import BaseProcessor
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -60,7 +60,7 @@ class AlarmProcessor(BaseProcessor):
for field in expected_fields: for field in expected_fields:
if field not in alarm: if field not in alarm:
raise AlarmFormatError('Alarm data missing field %s' % field) raise AlarmFormatError('Alarm data missing field %s' % field)
if (not 'tenantId' in alarm) or (not 'alarmId' in alarm): if ('tenantId' not in alarm) or ('alarmId' not in alarm):
raise AlarmFormatError raise AlarmFormatError
return alarm return alarm

View File

@ -18,7 +18,7 @@ import kafka.consumer
import logging import logging
import statsd import statsd
from mon_notification.processors import BaseProcessor from monasca_notification.processors.base import BaseProcessor
log = logging.getLogger(__name__) log = logging.getLogger(__name__)

View File

@ -20,7 +20,7 @@ import smtplib
import statsd import statsd
import time import time
from mon_notification.processors import BaseProcessor from monasca_notification.processors.base import BaseProcessor
log = logging.getLogger(__name__) log = logging.getLogger(__name__)

View File

@ -18,7 +18,7 @@ import kafka.producer
import logging import logging
import statsd import statsd
from mon_notification.processors import BaseProcessor from monasca_notification.processors.base import BaseProcessor
log = logging.getLogger(__name__) log = logging.getLogger(__name__)

View File

@ -21,7 +21,7 @@ import Queue
import statsd import statsd
import time import time
from mon_notification import notification_exceptions from monasca_notification import notification_exceptions
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -46,10 +46,10 @@ class ZookeeperStateTracker(object):
self.zookeeper = kazoo.client.KazooClient(url) self.zookeeper = kazoo.client.KazooClient(url)
self.zookeeper.start() self.zookeeper.start()
self.topic_path = '/consumers/mon-notification/%s' % topic self.topic_path = '/consumers/monasca-notification/%s' % topic
self.lock_retry_time = 15 # number of seconds to wait for retrying for the lock self.lock_retry_time = 15 # number of seconds to wait for retrying for the lock
self.lock_path = '/locks/mon-notification/%s' % topic self.lock_path = '/locks/monasca-notification/%s' % topic
self._offsets = None self._offsets = None
# This is a dictionary of sets used for tracking finished offsets when there is a gap and the committed offset # This is a dictionary of sets used for tracking finished offsets when there is a gap and the committed offset

View File

@ -1,6 +1,6 @@
kafka: kafka:
url: 192.168.10.10:9092 # or comma seperated list of multiple hosts url: 192.168.10.10:9092 # or comma seperated list of multiple hosts
group: mon-notification group: monasca-notification
alarm_topic: alarm-state-transitions alarm_topic: alarm-state-transitions
notification_topic: alarm-notifications notification_topic: alarm-notifications

View File

@ -18,11 +18,11 @@
import setuptools import setuptools
setuptools.setup( setuptools.setup(
name="mon-notification", name="monasca-notification",
version="1.0.1", version="1.0.0",
author="Tim Kuhlman", author="Tim Kuhlman",
author_email="tim.kuhlman@hp.com", author_email="tim.kuhlman@hp.com",
description="Notification engine used in the monitoring system", description="Notification engine used in the monasca monitoring system",
classifiers=[ classifiers=[
"Development Status :: 5 - Production/Stable", "Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License", "License :: OSI Approved :: Apache Software License",
@ -30,15 +30,15 @@ setuptools.setup(
], ],
license="Apache", license="Apache",
keywords="openstack monitoring email", keywords="openstack monitoring email",
url="https://github.com/hpcloud-mon/mon-notification", url="https://github.com/stackforge/monasca-notification",
# possibly preferable to have the OS precompiled mysql version, python-mysqldb package on Ubuntu # possibly preferable to have the OS precompiled mysql version, python-mysqldb package on Ubuntu
install_requires=["kafka-python>=0.9.0", "kazoo>=1.3", "MySQL-python", "python-statsd>=1.6.3", "PyYAML"], install_requires=["kafka-python>=0.9.0", "kazoo>=1.3", "MySQL-python", "python-statsd>=1.6.3", "PyYAML"],
packages=setuptools.find_packages(exclude=['tests']), packages=setuptools.find_packages(exclude=['tests']),
entry_points={ entry_points={
'console_scripts': [ 'console_scripts': [
'mon-notification = mon_notification.main:main' 'monasca-notification = monasca_notification.main:main'
], ],
}, },
scripts=['tools/mon_notification_offsets.py'], scripts=['tools/monasca_notification_offsets.py'],
test_suite='nose.collector' test_suite='nose.collector'
) )

View File

@ -1 +1,5 @@
flake8
pyflakes
pep8
nose
mock>=1.0.1 mock>=1.0.1

View File

@ -1,14 +0,0 @@
Basic testing.
Note that mini-mon is set to use smtp3.hp.com which must be used from within the HP corporate network.
# Load some notifications into the db.
- First edit the last line of test_notifications.sql to add in your email
- From the mysql vm run `mysql -uroot -ppassword mon < sample_notifications.sql`
# Feed alarm transition messages into kafka
- It is helpful to watch the log file, `tail -f /var/log/mon-notification/notification.log`
- If desired edit /etc/mon/notification.yaml to change logging options.
- Then for all of the json files or just some run
`/opt/kafka/bin/kafka-console-producer.sh --broker 192.168.10.10:9092 --topic alarm-state-transitions < *.json`
- Note with the alarm_ttl now implemented you may need to update the timestamp in the json files.
- `python -c "import time; print time.time()"` will give a current timestamp

View File

@ -1,2 +0,0 @@
not json
{ 'invalid_json': true }

View File

@ -1,3 +0,0 @@
{"alarm-transitioned":{"tenantId":"0","alarmId":"4","alarmName":"test Alarm","oldState":"OK","newState":"ALARM","stateChangeReason":"I am alarming!","timestamp":1394641255}}
{"alarm-transitioned":{"tenantId":"0","alarmId":"4","alarmName":"test Okay","oldState":"ALARM","newState":"OK","stateChangeReason":"Okay, I am not alarming.","timestamp":1394641255}}
{"alarm-transitioned":{"tenantId":"0","alarmId":"4","alarmName":"test Undetermined","oldState":"OK","newState":"UNDETERMINED","stateChangeReason":"Well, maybe I am alarming.","timestamp":1394641255}}

View File

@ -1,3 +0,0 @@
{"alarm-transitioned":{"tenantId":"0","alarmId":"0","alarmName":"test Alarm","oldState":"OK","newState":"ALARM","stateChangeReason":"I am alarming!","timestamp":1395346830}}
{"alarm-transitioned":{"tenantId":"0","alarmId":"1","alarmName":"test Okay","oldState":"ALARM","newState":"OK","stateChangeReason":"Okay, I am not alarming.","timestamp":1395346830}}
{"alarm-transitioned":{"tenantId":"0","alarmId":"2","alarmName":"test Undetermined","oldState":"OK","newState":"UNDETERMINED","stateChangeReason":"Well, maybe I am alarming.","timestamp":1395346830}}

View File

@ -1,17 +0,0 @@
INSERT INTO alarm (id, tenant_id, name, state, created_at, updated_at) VALUES ('0', '0', 'test Alarm', 'ALARM', NOW(), NOW());
INSERT INTO alarm (id, tenant_id, name, state, created_at, updated_at) VALUES ('1', '0', 'test Okay', 'OK', NOW(), NOW());
INSERT INTO alarm (id, tenant_id, name, state, created_at, updated_at) VALUES ('2', '0', 'test Undetermined', 'UNDETERMINED', NOW(), NOW());
INSERT INTO notification_method (id, tenant_id, name, type, address, created_at, updated_at)
VALUES ('0', '0', 'test notification', 'EMAIL', 'me@here.com', NOW(), NOW());
INSERT INTO alarm_action (alarm_id, alarm_state, action_id) VALUES ('0', 'OK', '0');
INSERT INTO alarm_action (alarm_id, alarm_state, action_id) VALUES ('0', 'ALARM', '0');
INSERT INTO alarm_action (alarm_id, alarm_state, action_id) VALUES ('0', 'UNDETERMINED', '0');
INSERT INTO alarm_action (alarm_id, alarm_state, action_id) VALUES ('1', 'OK', '0');
INSERT INTO alarm_action (alarm_id, alarm_state, action_id) VALUES ('1', 'ALARM', '0');
INSERT INTO alarm_action (alarm_id, alarm_state, action_id) VALUES ('1', 'UNDETERMINED', '0');
INSERT INTO alarm_action (alarm_id, alarm_state, action_id) VALUES ('2', 'OK', '0');
INSERT INTO alarm_action (alarm_id, alarm_state, action_id) VALUES ('2', 'ALARM', '0');
INSERT INTO alarm_action (alarm_id, alarm_state, action_id) VALUES ('2', 'UNDETERMINED', '0');

View File

@ -23,8 +23,8 @@ import Queue
import time import time
import unittest import unittest
from mon_notification.notification import Notification from monasca_notification.notification import Notification
from mon_notification.processors import alarm_processor from monasca_notification.processors import alarm_processor
alarm_tuple = collections.namedtuple('alarm_tuple', ['offset', 'message']) alarm_tuple = collections.namedtuple('alarm_tuple', ['offset', 'message'])
message_tuple = collections.namedtuple('message_tuple', ['value']) message_tuple = collections.namedtuple('message_tuple', ['value'])
@ -45,7 +45,7 @@ class TestAlarmProcessor(unittest.TestCase):
return [partition, alarm_tuple(offset, msg_tuple)] return [partition, alarm_tuple(offset, msg_tuple)]
@mock.patch('MySQLdb.connect') @mock.patch('MySQLdb.connect')
@mock.patch('mon_notification.processors.alarm_processor.log') @mock.patch('monasca_notification.processors.alarm_processor.log')
def _run_alarm_processor(self, queue, sql_response, mock_log, mock_mysql): def _run_alarm_processor(self, queue, sql_response, mock_log, mock_mysql):
"""Runs a mocked alarm processor reading from queue while running, returns (queue_message, log_message) """Runs a mocked alarm processor reading from queue while running, returns (queue_message, log_message)
""" """

View File

@ -16,7 +16,7 @@
"""Tests the notification class.""" """Tests the notification class."""
import json import json
from mon_notification import notification from monasca_notification import notification
def test_json(): def test_json():

View File

@ -20,8 +20,8 @@ import multiprocessing
import time import time
import unittest import unittest
from mon_notification.notification import Notification from monasca_notification.notification import Notification
from mon_notification.processors import notification_processor from monasca_notification.processors import notification_processor
class TestStateTracker(unittest.TestCase): class TestStateTracker(unittest.TestCase):
@ -38,8 +38,8 @@ class TestStateTracker(unittest.TestCase):
'timeout': 60, 'timeout': 60,
'from_addr': 'hpcs.mon@hp.com'} 'from_addr': 'hpcs.mon@hp.com'}
@mock.patch('mon_notification.processors.notification_processor.smtplib') @mock.patch('monasca_notification.processors.notification_processor.smtplib')
@mock.patch('mon_notification.processors.notification_processor.log') @mock.patch('monasca_notification.processors.notification_processor.log')
def _start_processor(self, mock_log, mock_smtp): def _start_processor(self, mock_log, mock_smtp):
"""Start the processor with the proper mocks """Start the processor with the proper mocks
""" """

View File

@ -21,7 +21,7 @@ import threading
import time import time
import unittest import unittest
from mon_notification import state_tracker from monasca_notification import state_tracker
class TestStateTracker(unittest.TestCase): class TestStateTracker(unittest.TestCase):

View File

@ -15,7 +15,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Used for manually querying/setting offsets associated with the mon_notification daemon. """Used for manually querying/setting offsets associated with the monasca_notification daemon.
This should only be used in unusual circumstances to force reprocessing or skip alarms. This should only be used in unusual circumstances to force reprocessing or skip alarms.
""" """
@ -25,7 +25,7 @@ import logging
import sys import sys
import yaml import yaml
from mon_notification import state_tracker from monasca_notification import state_tracker
def listener(): def listener():
@ -36,10 +36,10 @@ def listener():
def main(): def main():
# Parse args # Parse args
parser = argparse.ArgumentParser(description="Query and set(DANGEROUS) mon_notification kafka consumer offsets\n") parser = argparse.ArgumentParser(description="Query and set(DANGEROUS) monasca_notification kafka offsets\n")
parser.add_argument('--config', '-c', default='/etc/mon/notification.yaml', help='Configuration File') parser.add_argument('--config', '-c', default='/etc/monasca/notification.yaml', help='Configuration File')
## Either list or set not both # Either list or set not both
group = parser.add_mutually_exclusive_group(required=True) group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', '-l', action='store_true') group.add_argument('--list', '-l', action='store_true')
group.add_argument('--set-offsets', '-s', group.add_argument('--set-offsets', '-s',
@ -48,7 +48,7 @@ def main():
args = parser.parse_args() args = parser.parse_args()
# Silence most logging from mon_notification # Silence most logging from monasca_notification
logging.basicConfig(level=logging.CRITICAL) logging.basicConfig(level=logging.CRITICAL)
# Parse config and setup state tracker # Parse config and setup state tracker
@ -63,9 +63,9 @@ def main():
offsets = json.loads(args.set_offsets) offsets = json.loads(args.set_offsets)
raw_input("Warning setting offset will affect the behavior of the next notification engine to run.\n" + raw_input("Warning setting offset will affect the behavior of the next notification engine to run.\n" +
"\tCtrl-C to exit, enter to continue") "\tCtrl-C to exit, enter to continue")
print("All running mon_notification daemons must be shutdown to allow this process to grab the lock.") print("All running monasca_notification daemons must be shutdown to allow this process to grab the lock.")
log = logging.getLogger('mon_notification.state_tracker') log = logging.getLogger('monasca_notification.state_tracker')
log.setLevel(logging.DEBUG) log.setLevel(logging.DEBUG)
tracker.lock(listener) tracker.lock(listener)

19
tox.ini Normal file
View File

@ -0,0 +1,19 @@
[tox]
envlist = py27,pypy,pep8
skipsdist = True
[testenv]
commands = nosetests
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:pep8]
commands = flake8
[tox:jenkins]
downloadcache = ~/cache/pip
[flake8]
max-line-length = 120
ignore = F821
exclude=.venv,.git,.tox,dist,*openstack/common*,*egg,build