move to static LOG

there is no reason this was an instance level variable, it should
instead be static.

Change-Id: I47276856b8a0504ce6bf5483b251e48145329f8b
This commit is contained in:
Sean Dague 2014-01-14 07:34:33 -05:00
parent 9567161544
commit e75b996e60
2 changed files with 34 additions and 34 deletions

View File

@ -49,7 +49,6 @@ import yaml
import irc.bot
try:
import daemon.pidlockfile
pid_file_module = daemon.pidlockfile
@ -59,6 +58,8 @@ except Exception:
import daemon.pidfile
pid_file_module = daemon.pidfile
LOG = logging.getLogger('recheckwatchbot')
class RecheckWatchBot(irc.bot.SingleServerIRCBot):
def __init__(self, channels, nickname, password, server, port=6667,
@ -68,29 +69,28 @@ class RecheckWatchBot(irc.bot.SingleServerIRCBot):
self.channel_list = channels
self.nickname = nickname
self.password = password
self.log = logging.getLogger('recheckwatchbot')
def on_nicknameinuse(self, c, e):
self.log.info('Nick previously in use, recovering.')
LOG.info('Nick previously in use, recovering.')
c.nick(c.get_nickname() + "_")
c.privmsg("nickserv", "identify %s " % self.password)
c.privmsg("nickserv", "ghost %s %s" % (self.nickname, self.password))
c.privmsg("nickserv", "release %s %s" % (self.nickname, self.password))
time.sleep(1)
c.nick(self.nickname)
self.log.info('Nick previously in use, recovered.')
LOG.info('Nick previously in use, recovered.')
def on_welcome(self, c, e):
self.log.info('Identifying with IRC server.')
LOG.info('Identifying with IRC server.')
c.privmsg("nickserv", "identify %s " % self.password)
self.log.info('Identified with IRC server.')
LOG.info('Identified with IRC server.')
for channel in self.channel_list:
c.join(channel)
self.log.info('Joined channel %s' % channel)
LOG.info('Joined channel %s' % channel)
time.sleep(0.5)
def send(self, channel, msg):
self.log.info('Sending "%s" to %s' % (msg, channel))
LOG.info('Sending "%s" to %s' % (msg, channel))
self.connection.privmsg(channel, msg)
time.sleep(0.5)
@ -101,7 +101,6 @@ class RecheckWatch(threading.Thread):
super(RecheckWatch, self).__init__()
self.ircbot = ircbot
self.channel_config = channel_config
self.log = logging.getLogger('recheckwatchbot')
self.username = username
self.queries = queries
self.host = host
@ -113,7 +112,7 @@ class RecheckWatch(threading.Thread):
msg = '%s change: %s failed tempest with an unrecognized error' % (
data['change']['project'],
data['change']['url'])
self.log.info('Compiled Message %s: %s' % (channel, msg))
LOG.info('Compiled Message %s: %s' % (channel, msg))
self.ircbot.send(channel, msg)
def error_found(self, channel, data):
@ -123,7 +122,7 @@ class RecheckWatch(threading.Thread):
bug_urls = ['https://bugs.launchpad.net/bugs/%s' % x for x
in data['bug_numbers']]
msg += ' and '.join(bug_urls)
self.log.info('Compiled Message %s: %s' % (channel, msg))
LOG.info('Compiled Message %s: %s' % (channel, msg))
self.ircbot.send(channel, msg)
def _read(self, data):
@ -156,7 +155,7 @@ class RecheckWatch(threading.Thread):
if self.commenting:
stream.leave_comment(project, change_id, bug_numbers)
except Exception:
self.log.exception("Uncaught exception processing event.")
LOG.exception("Uncaught exception processing event.")
class ChannelConfig(object):

View File

@ -30,7 +30,7 @@ import elastic_recheck.loader as loader
import elastic_recheck.query_builder as qb
from elastic_recheck import results
logging.basicConfig()
LOG = logging.getLogger("recheckwatchbot")
ES_URL = "http://logstash.openstack.org/elasticsearch"
@ -57,9 +57,6 @@ class Stream(object):
Monitors gerrit stream looking for tempest-devstack failures.
"""
log = logging.getLogger("recheckwatchbot")
def __init__(self, user, host, key, thread=True):
port = 29418
self.gerrit = gerritlib.gerrit.Gerrit(host, user, port, key)
@ -71,14 +68,20 @@ class Stream(object):
def parse_jenkins_failure(event):
"""Is this comment a jenkins failure comment."""
if event.get('type', '') != 'comment-added':
LOG.debug("Skipping event type %s" % event.get('type', ''))
return False
username = event['author'].get('username', '')
if (username != 'jenkins'):
LOG.debug("Skipping comment from %s" %
event['author'].get('username', ''))
return False
if not ("Build failed. For information on how to proceed" in
event['comment']):
change = event['change']['number']
rev = event['patchSet']['number']
LOG.debug("Skipping passing job %s,%s" % (change, rev))
return False
failed_tests = {}
@ -94,8 +97,8 @@ class Stream(object):
if len(r) == 0:
raise ResultNotReady()
else:
self.log.debug("Console ready for %s %s,%s" %
(name, change, patch))
LOG.debug("Console ready for %s %s,%s" %
(name, change, patch))
def _has_required_files(self, change, patch, name):
query = qb.files_ready(change, patch)
@ -124,26 +127,26 @@ class Stream(object):
break
except ResultNotReady:
self.log.debug("Console logs not ready for %s %s,%s" %
(job_name, change_number, patch_number))
LOG.debug("Console logs not ready for %s %s,%s" %
(job_name, change_number, patch_number))
time.sleep(SLEEP_TIME)
continue
except pyelasticsearch.exceptions.InvalidJsonResponseError:
# If ElasticSearch returns an error code, sleep and retry
# TODO(jogo): if this works pull out search into a helper
# function that does this.
self.log.exception(
LOG.exception(
"Elastic Search not responding on attempt %d" % i)
time.sleep(NUMBER_OF_RETRIES)
continue
if i == NUMBER_OF_RETRIES - 1:
elapsed = datetime.datetime.now() - started_at
self.log.warn("Console logs not available after %ss for %s %s,%s" %
(elapsed, job_name, change_number, patch_number))
LOG.warn("Console logs not available after %ss for %s %s,%s" %
(elapsed, job_name, change_number, patch_number))
return False
self.log.debug(
LOG.debug(
"Found hits for change_number: %s, patch_number: %s"
% (change_number, patch_number))
@ -152,7 +155,7 @@ class Stream(object):
for job_name in job_fails:
self._has_required_files(
change_number, patch_number, job_name)
self.log.info(
LOG.info(
"All files present for change_number: %s, patch_number: %s"
% (change_number, patch_number))
time.sleep(10)
@ -162,12 +165,12 @@ class Stream(object):
# if we get to the end, we're broken
elapsed = datetime.datetime.now() - started_at
self.log.warn("Required files not ready after %ss for %s %d,%d" %
(elapsed, job_name, change_number, patch_number))
LOG.warn("Required files not ready after %ss for %s %d,%d" %
(elapsed, job_name, change_number, patch_number))
return False
def get_failed_tempest(self):
self.log.debug("entering get_failed_tempest")
LOG.debug("entering get_failed_tempest")
while True:
event = self.gerrit.getEvent()
@ -182,8 +185,8 @@ class Stream(object):
change = event['change']['number']
rev = event['patchSet']['number']
self.log.info("Looking for failures in %s,%s on %s" %
(change, rev, ", ".join(failed_jobs)))
LOG.info("Looking for failures in %s,%s on %s" %
(change, rev, ", ".join(failed_jobs)))
if self._does_es_have_data(change, rev, failed_jobs):
return event
@ -219,8 +222,6 @@ class Classifier():
Given a change and revision, query logstash with a list of known queries
that are mapped to specific bugs.
"""
log = logging.getLogger("recheckwatchbot")
queries = None
def __init__(self, queries_dir):
@ -234,12 +235,12 @@ class Classifier():
def classify(self, change_number, patch_number, skip_resolved=True):
"""Returns either empty list or list with matched bugs."""
self.log.debug("Entering classify")
LOG.debug("Entering classify")
#Reload each time
self.queries = loader.load(self.queries_dir, skip_resolved)
bug_matches = []
for x in self.queries:
self.log.debug(
LOG.debug(
"Looking for bug: https://bugs.launchpad.net/bugs/%s"
% x['bug'])
query = qb.single_patch(x['query'], change_number, patch_number)