Skip to content

Commit 6715f91

Browse files
gh-102856: Python tokenizer implementation for PEP 701 (#104323)
This commit replaces the Python implementation of the tokenize module with an implementation that reuses the real C tokenizer via a private extension module. The tokenize module now implements a compatibility layer that transforms tokens from the C tokenizer into Python tokenize tokens for backward compatibility. As the C tokenizer does not emit some tokens that the Python tokenizer provides (such as comments and non-semantic newlines), a new special mode has been added to the C tokenizer mode that currently is only used via the extension module that exposes it to the Python layer. This new mode forces the C tokenizer to emit these new extra tokens and add the appropriate metadata that is needed to match the old Python implementation. Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
1 parent 3ed57e4 commit 6715f91

22 files changed

+426
-376
lines changed

Doc/library/token-list.inc

+4
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Doc/library/token.rst

+2
Original file line numberDiff line numberDiff line change
@@ -50,11 +50,13 @@ The following token type values aren't used by the C tokenizer but are needed fo
5050
the :mod:`tokenize` module.
5151

5252
.. data:: COMMENT
53+
:noindex:
5354

5455
Token value used to indicate a comment.
5556

5657

5758
.. data:: NL
59+
:noindex:
5860

5961
Token value used to indicate a non-terminating newline. The
6062
:data:`NEWLINE` token indicates the end of a logical line of Python code;

Grammar/Tokens

+2-2
Original file line numberDiff line numberDiff line change
@@ -64,9 +64,9 @@ SOFT_KEYWORD
6464
FSTRING_START
6565
FSTRING_MIDDLE
6666
FSTRING_END
67+
COMMENT
68+
NL
6769
ERRORTOKEN
6870

6971
# These aren't used by the C tokenizer but are needed for tokenize.py
70-
COMMENT
71-
NL
7272
ENCODING

Include/internal/pycore_global_objects_fini_generated.h

+1
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Include/internal/pycore_global_strings.h

+1
Original file line numberDiff line numberDiff line change
@@ -406,6 +406,7 @@ struct _Py_global_strings {
406406
STRUCT_FOR_ID(exception)
407407
STRUCT_FOR_ID(exp)
408408
STRUCT_FOR_ID(extend)
409+
STRUCT_FOR_ID(extra_tokens)
409410
STRUCT_FOR_ID(facility)
410411
STRUCT_FOR_ID(factory)
411412
STRUCT_FOR_ID(false)

Include/internal/pycore_runtime_init_generated.h

+1
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Include/internal/pycore_token.h

+3-1
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,9 @@ extern "C" {
7777
#define FSTRING_START 61
7878
#define FSTRING_MIDDLE 62
7979
#define FSTRING_END 63
80-
#define ERRORTOKEN 64
80+
#define COMMENT 64
81+
#define NL 65
82+
#define ERRORTOKEN 66
8183
#define N_TOKENS 68
8284
#define NT_OFFSET 256
8385

Include/internal/pycore_unicodeobject_generated.h

+3
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Lib/inspect.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -2187,15 +2187,15 @@ def _signature_strip_non_python_syntax(signature):
21872187
if string == ',':
21882188
current_parameter += 1
21892189

2190-
if (type == ERRORTOKEN) and (string == '$'):
2190+
if (type == OP) and (string == '$'):
21912191
assert self_parameter is None
21922192
self_parameter = current_parameter
21932193
continue
21942194

21952195
add(string)
21962196
if (string == ','):
21972197
add(' ')
2198-
clean_signature = ''.join(text)
2198+
clean_signature = ''.join(text).strip()
21992199
return clean_signature, self_parameter
22002200

22012201

Lib/tabnanny.py

+10
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,10 @@ def check(file):
107107
errprint("%r: Token Error: %s" % (file, msg))
108108
return
109109

110+
except SyntaxError as msg:
111+
errprint("%r: Token Error: %s" % (file, msg))
112+
return
113+
110114
except IndentationError as msg:
111115
errprint("%r: Indentation Error: %s" % (file, msg))
112116
return
@@ -272,6 +276,12 @@ def format_witnesses(w):
272276
return prefix + " " + ', '.join(firsts)
273277

274278
def process_tokens(tokens):
279+
try:
280+
_process_tokens(tokens)
281+
except TabError as e:
282+
raise NannyNag(e.lineno, e.msg, e.text)
283+
284+
def _process_tokens(tokens):
275285
INDENT = tokenize.INDENT
276286
DEDENT = tokenize.DEDENT
277287
NEWLINE = tokenize.NEWLINE

Lib/test/test_tabnanny.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ def test_when_nannynag_error_verbose(self):
223223
with TemporaryPyFile(SOURCE_CODES["nannynag_errored"]) as file_path:
224224
out = f"{file_path!r}: *** Line 3: trouble in tab city! ***\n"
225225
out += "offending line: '\\tprint(\"world\")\\n'\n"
226-
out += "indent not equal e.g. at tab size 1\n"
226+
out += "inconsistent use of tabs and spaces in indentation\n"
227227

228228
tabnanny.verbose = 1
229229
self.verify_tabnanny_check(file_path, out=out)
@@ -315,7 +315,7 @@ def validate_cmd(self, *args, stdout="", stderr="", partial=False, expect_failur
315315
def test_with_errored_file(self):
316316
"""Should displays error when errored python file is given."""
317317
with TemporaryPyFile(SOURCE_CODES["wrong_indented"]) as file_path:
318-
stderr = f"{file_path!r}: Indentation Error: "
318+
stderr = f"{file_path!r}: Token Error: "
319319
stderr += ('unindent does not match any outer indentation level'
320320
' (<tokenize>, line 3)')
321321
self.validate_cmd(file_path, stderr=stderr, expect_failure=True)

0 commit comments

Comments
 (0)