Skip to content

Commit 10a428b

Browse files
holymonsonbenjaminp
authored andcommitted
closes bpo-34515: Support non-ASCII identifiers in lib2to3. (GH-8950)
1 parent d206731 commit 10a428b

File tree

3 files changed

+16
-6
lines changed

3 files changed

+16
-6
lines changed

Lib/lib2to3/pgen2/tokenize.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def _combinations(*l):
5656
Whitespace = r'[ \f\t]*'
5757
Comment = r'#[^\r\n]*'
5858
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
59-
Name = r'[a-zA-Z_]\w*'
59+
Name = r'\w+'
6060

6161
Binnumber = r'0[bB]_?[01]+(?:_[01]+)*'
6262
Hexnumber = r'0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?'
@@ -107,8 +107,8 @@ def _combinations(*l):
107107
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
108108
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
109109

110-
tokenprog, pseudoprog, single3prog, double3prog = list(map(
111-
re.compile, (Token, PseudoToken, Single3, Double3)))
110+
tokenprog, pseudoprog, single3prog, double3prog = map(
111+
re.compile, (Token, PseudoToken, Single3, Double3))
112112

113113
_strprefixes = (
114114
_combinations('r', 'R', 'f', 'F') |
@@ -349,7 +349,6 @@ def generate_tokens(readline):
349349
logical line; continuation lines are included.
350350
"""
351351
lnum = parenlev = continued = 0
352-
namechars, numchars = string.ascii_letters + '_', '0123456789'
353352
contstr, needcont = '', 0
354353
contline = None
355354
indents = [0]
@@ -451,7 +450,7 @@ def generate_tokens(readline):
451450
spos, epos, pos = (lnum, start), (lnum, end), end
452451
token, initial = line[start:end], line[start]
453452

454-
if initial in numchars or \
453+
if initial in string.digits or \
455454
(initial == '.' and token != '.'): # ordinary number
456455
yield (NUMBER, token, spos, epos, line)
457456
elif initial in '\r\n':
@@ -501,7 +500,7 @@ def generate_tokens(readline):
501500
yield stashed
502501
stashed = None
503502
yield (STRING, token, spos, epos, line)
504-
elif initial in namechars: # ordinary name
503+
elif initial.isidentifier(): # ordinary name
505504
if token in ('async', 'await'):
506505
if async_def:
507506
yield (ASYNC if token == 'async' else AWAIT,

Lib/lib2to3/tests/test_parser.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -529,6 +529,16 @@ def test_4(self):
529529
self.validate("""x = {2, 3, 4,}""")
530530

531531

532+
# Adapted from Python 3's Lib/test/test_unicode_identifiers.py and
533+
# Lib/test/test_tokenize.py:TokenizeTest.test_non_ascii_identifiers
534+
class TestIdentfier(GrammarTest):
535+
def test_non_ascii_identifiers(self):
536+
self.validate("Örter = 'places'\ngrün = 'green'")
537+
self.validate("蟒 = a蟒 = 锦蛇 = 1")
538+
self.validate("µ = aµ = µµ = 1")
539+
self.validate("𝔘𝔫𝔦𝔠𝔬𝔡𝔢 = a_𝔘𝔫𝔦𝔠𝔬𝔡𝔢 = 1")
540+
541+
532542
class TestNumericLiterals(GrammarTest):
533543
def test_new_octal_notation(self):
534544
self.validate("""0o7777777777777""")
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Fix parsing non-ASCII identifiers in :mod:`lib2to3.pgen2.tokenize` (PEP 3131).

0 commit comments

Comments
 (0)