Skip to content

Commit e19d0c2

Browse files
author
stroeder
committed
ldap.schema.tokenizer: PEP-8 and pylint
1 parent 03cbc30 commit e19d0c2

File tree

1 file changed

+37
-36
lines changed

1 file changed

+37
-36
lines changed

Lib/ldap/schema/tokenizer.py

Lines changed: 37 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,13 @@
2020
).findall
2121

2222

23-
def split_tokens(s):
23+
def split_tokens(sch_str):
2424
"""
2525
Returns list of syntax elements with quotes and spaces stripped.
2626
"""
2727
parts = []
2828
parens = 0
29-
for opar, cpar, unquoted, quoted, residue in TOKENS_FINDALL(s):
29+
for opar, cpar, unquoted, quoted, residue in TOKENS_FINDALL(sch_str):
3030
if unquoted:
3131
parts.append(unquoted)
3232
elif quoted:
@@ -39,42 +39,43 @@ def split_tokens(s):
3939
parts.append(cpar)
4040
elif residue == '$':
4141
if not parens:
42-
raise ValueError("'$' outside parenthesis in %r" % (s))
42+
raise ValueError("'$' outside parenthesis in %r" % (sch_str))
4343
else:
44-
raise ValueError(residue, s)
44+
raise ValueError(residue, sch_str)
4545
if parens:
46-
raise ValueError("Unbalanced parenthesis in %r" % (s))
46+
raise ValueError("Unbalanced parenthesis in %r" % (sch_str))
4747
return parts
4848

49-
def extract_tokens(l,known_tokens):
50-
"""
51-
Returns dictionary of known tokens with all values
52-
"""
53-
assert l[0].strip()=="(" and l[-1].strip()==")",ValueError(l)
54-
result = {}
55-
result.update(known_tokens)
56-
i = 0
57-
l_len = len(l)
58-
while i<l_len:
59-
if l[i] in result:
60-
token = l[i]
61-
i += 1 # Consume token
62-
if i<l_len:
63-
if l[i] in result:
64-
# non-valued
65-
result[token] = (())
66-
elif l[i]=="(":
67-
# multi-valued
68-
i += 1 # Consume left parentheses
69-
start = i
70-
while i<l_len and l[i]!=")":
71-
i += 1
72-
result[token] = tuple(filter(lambda v:v!='$',l[start:i]))
73-
i += 1 # Consume right parentheses
49+
def extract_tokens(tkl, known_tokens):
50+
"""
51+
Returns dictionary of known tokens with all values
52+
"""
53+
assert tkl[0].strip() == "(" and tkl[-1].strip() == ")", ValueError(tkl)
54+
result = dict(known_tokens)
55+
i = 0
56+
l_len = len(tkl)
57+
while i < l_len:
58+
if tkl[i] in result:
59+
token = tkl[i]
60+
i += 1 # Consume token
61+
if i < l_len:
62+
if tkl[i] in result:
63+
# non-valued
64+
result[token] = (())
65+
elif tkl[i] == "(":
66+
# multi-valued
67+
i += 1 # Consume left parentheses
68+
start = i
69+
while i < l_len and tkl[i] != ")":
70+
i += 1
71+
result[token] = tuple([
72+
v for v in tkl[start:i] if v != '$'
73+
])
74+
i += 1 # Consume right parentheses
75+
else:
76+
# single-valued
77+
result[token] = tkl[i],
78+
i += 1 # Consume single value
7479
else:
75-
# single-valued
76-
result[token] = l[i],
77-
i += 1 # Consume single value
78-
else:
79-
i += 1 # Consume unrecognized item
80-
return result
80+
i += 1 # Consume unrecognized item
81+
return result

0 commit comments

Comments
 (0)