From 434b5d44b07fa6b5dbac35c72ee222c4db7693de Mon Sep 17 00:00:00 2001 From: stroeder Date: Wed, 29 Apr 2009 18:13:55 +0000 Subject: [PATCH] Fixed ldap.schema.tokenizer.split_tokens() to accept a single DOLLAR as separator --- Lib/ldap/schema/tokenizer.py | 85 +++++++++++++++++++++++++ Tests/Lib/ldap/schema/test_tokenizer.py | 30 +++++++++ 2 files changed, 115 insertions(+) create mode 100644 Lib/ldap/schema/tokenizer.py create mode 100644 Tests/Lib/ldap/schema/test_tokenizer.py diff --git a/Lib/ldap/schema/tokenizer.py b/Lib/ldap/schema/tokenizer.py new file mode 100644 index 0000000..469c326 --- /dev/null +++ b/Lib/ldap/schema/tokenizer.py @@ -0,0 +1,85 @@ +""" +ldap.schema.tokenizer - Low-level parsing functions for schema element strings + +See http://www.python-ldap.org/ for details. + +\$Id: tokenizer.py,v 1.13 2009/04/29 18:13:55 stroeder Exp $ +""" + + +def split_tokens(s,keywordDict): + """ + Returns list of syntax elements with quotes and spaces + stripped. + """ + result = [] + result_append = result.append + s_len = len(s) + i = 0 + while istart: + result_append(s[start:i]) + result_append(s[i]) + i +=1 # Consume parentheses + start = i + elif s[i]==" " or s[i]=="$": + if i>start: + result_append(s[start:i]) + i +=1 + # Consume more space chars + while istart: + result_append(s[start:i]) + i +=1 + if i>=s_len: + break + start = i + while i=start: + result_append(s[start:i]) + i +=1 + return result # split_tokens() + + +def extract_tokens(l,known_tokens): + """ + Returns dictionary of known tokens with all values + """ + assert l[0].strip()=="(" and l[-1].strip()==")",ValueError(l) + result = {} + result_has_key = result.has_key + result.update(known_tokens) + i = 0 + l_len = len(l) + while i',l + print 'differs from',r