diff --git a/elastic_recheck/cmd/graph.py b/elastic_recheck/cmd/graph.py index 1ff9ae77..d317ed26 100755 --- a/elastic_recheck/cmd/graph.py +++ b/elastic_recheck/cmd/graph.py @@ -22,6 +22,7 @@ import os import sys from launchpadlib import launchpad +import pyelasticsearch import pytz import requests @@ -50,10 +51,10 @@ LOG = logging.getLogger('ergraph') def get_launchpad_bug(bug): - lp = launchpad.Launchpad.login_anonymously('grabbing bugs', - 'production', - LPCACHEDIR) try: + lp = launchpad.Launchpad.login_anonymously('grabbing bugs', + 'production', + LPCACHEDIR) lp_bug = lp.bugs[bug] bugdata = {'name': lp_bug.title} projects = ", ".join(map(lambda x: "(%s - %s)" % @@ -65,6 +66,10 @@ def get_launchpad_bug(bug): # if someone makes a bug private, we lose access to it. bugdata = dict(name='Unknown (Private Bug)', affects='Unknown (Private Bug)', reviews=[]) + except requests.exceptions.RequestException: + LOG.exception("Failed to get Launchpad data for bug %s" % bug) + bugdata = dict(name='Unable to get launchpad data', + affects='Unknown', reviews=[]) return bugdata @@ -175,9 +180,18 @@ def main(): fails24=0, data=[]) buglist.append(bug) - results = classifier.hits_by_query(query['query'], - args.queue, - size=3000) + try: + results = classifier.hits_by_query(query['query'], + args.queue, + size=3000) + except pyelasticsearch.exceptions.InvalidJsonResponseError: + LOG.exception("Invalid Json while collecting metrics for query %s" + % query['query']) + continue + except requests.exceptions.ReadTimeout: + LOG.exception("Timeout while collecting metrics for query %s" % + query['query']) + continue facets_for_fail = er_results.FacetSet() facets_for_fail.detect_facets(results, diff --git a/elastic_recheck/cmd/uncategorized_fails.py b/elastic_recheck/cmd/uncategorized_fails.py index 74e1324e..c84a3b53 100755 --- a/elastic_recheck/cmd/uncategorized_fails.py +++ b/elastic_recheck/cmd/uncategorized_fails.py @@ -18,8 +18,10 @@ import argparse import collections import ConfigParser import datetime +import logging import operator import re +import requests import dateutil.parser as dp import jinja2 @@ -42,6 +44,8 @@ EXCLUDED_JOBS = ( EXCLUDED_JOBS_REGEX = re.compile('(' + '|'.join(EXCLUDED_JOBS) + ')') +LOG = logging.getLogger('eruncategorized') + def get_options(): parser = argparse.ArgumentParser( @@ -253,16 +257,19 @@ def _failure_percentage(hits, fails): def collect_metrics(classifier, fails): data = {} for q in classifier.queries: - results = classifier.hits_by_query(q['query'], size=30000) - hits = _status_count(results) - data[q['bug']] = { - 'fails': _failure_count(hits), - 'hits': hits, - 'percentages': _failure_percentage(results, fails), - 'query': q['query'], - 'failed_jobs': _failed_jobs(results) - } - + try: + results = classifier.hits_by_query(q['query'], size=30000) + hits = _status_count(results) + data[q['bug']] = { + 'fails': _failure_count(hits), + 'hits': hits, + 'percentages': _failure_percentage(results, fails), + 'query': q['query'], + 'failed_jobs': _failed_jobs(results) + } + except requests.exceptions.ReadTimeout: + LOG.exception("Failed to collection metrics for query %s" % + q['query']) return data