diff --git a/boartty/search/__init__.py b/boartty/search/__init__.py index 4b584e6..4ea7e51 100644 --- a/boartty/search/__init__.py +++ b/boartty/search/__init__.py @@ -51,6 +51,11 @@ class SearchCompiler(object): result = and_(boartty.db.story_table.c.project_key == boartty.db.project_table.c.key, result) tables.remove(boartty.db.project_table) + if boartty.db.tag_table in tables: + result = and_(boartty.db.story_tag_table.c.tag_key == boartty.db.tag_table.c.key, + boartty.db.story_tag_table.c.story_key == boartty.db.story_table.c.key, + result) + tables.remove(boartty.db.tag_table) if boartty.db.user_table in tables: result = and_(boartty.db.story_table.c.user_key == boartty.db.user_table.c.key, result) @@ -67,7 +72,7 @@ class SearchCompiler(object): if __name__ == '__main__': class Dummy(object): pass - query = 'recentlyseen:24 hours' + query = 'tag:zuulv3' lexer = tokenizer.SearchTokenizer() lexer.input(query) while True: diff --git a/boartty/search/parser.py b/boartty/search/parser.py index 82a4898..ded6dba 100644 --- a/boartty/search/parser.py +++ b/boartty/search/parser.py @@ -86,7 +86,7 @@ def SearchParser(): | projects_term | project_key_term | branch_term - | topic_term + | tag_term | ref_term | label_term | message_term @@ -195,12 +195,12 @@ def SearchParser(): else: p[0] = boartty.db.story_table.c.branch == p[2] - def p_topic_term(p): - '''topic_term : OP_TOPIC string''' + def p_tag_term(p): + '''tag_term : OP_TAG string''' if p[2].startswith('^'): - p[0] = func.matches(p[2], boartty.db.story_table.c.topic) + p[0] = func.matches(p[2], boartty.db.tag_table.c.name) else: - p[0] = boartty.db.story_table.c.topic == p[2] + p[0] = boartty.db.tag_table.c.name == p[2] def p_ref_term(p): '''ref_term : OP_REF string''' diff --git a/boartty/search/tokenizer.py b/boartty/search/tokenizer.py index 6b39ab4..32d09f4 100644 --- a/boartty/search/tokenizer.py +++ b/boartty/search/tokenizer.py @@ -28,7 +28,7 @@ operators = { 'projects': 'OP_PROJECTS', '_project_key': 'OP_PROJECT_KEY', # internal boartty use only 'branch': 'OP_BRANCH', - 'topic': 'OP_TOPIC', + 'tag': 'OP_TAG', 'ref': 'OP_REF', #'tr': 'OP_TR', # needs trackingids #'bug': 'OP_BUG', # needs trackingids @@ -56,7 +56,6 @@ tokens = [ 'LPAREN', 'RPAREN', 'NUMBER', - 'STORY_ID', 'SSTRING', 'DSTRING', 'USTRING', @@ -75,10 +74,6 @@ def SearchTokenizer(): t.type = operators.get(t.value[:-1], 'OP') return t - def t_STORY_ID(t): - r'I[a-fA-F0-9]{7,40}' - return t - def t_SSTRING(t): r"'([^\\']+|\\'|\\\\)*'" t.value=t.value[1:-1].decode("string-escape")