3
3
from tokenize import (tokenize , untokenize , NUMBER , NAME , OP ,
4
4
STRING , ENDMARKER , ENCODING , tok_name , detect_encoding ,
5
5
open as tokenize_open , Untokenizer , generate_tokens ,
6
- NEWLINE , _generate_tokens_from_c_tokenizer , DEDENT , TokenInfo )
6
+ NEWLINE , _generate_tokens_from_c_tokenizer , DEDENT , TokenInfo ,
7
+ TokenError )
7
8
from io import BytesIO , StringIO
8
9
import unittest
9
10
from textwrap import dedent
@@ -286,7 +287,7 @@ def number_token(s):
286
287
for lit in INVALID_UNDERSCORE_LITERALS :
287
288
try :
288
289
number_token (lit )
289
- except SyntaxError :
290
+ except TokenError :
290
291
continue
291
292
self .assertNotEqual (number_token (lit ), lit )
292
293
@@ -1379,7 +1380,7 @@ def test_latin1_normalization(self):
1379
1380
self .assertEqual (found , "iso-8859-1" )
1380
1381
1381
1382
def test_syntaxerror_latin1 (self ):
1382
- # Issue 14629: need to raise SyntaxError if the first
1383
+ # Issue 14629: need to raise TokenError if the first
1383
1384
# line(s) have non-UTF-8 characters
1384
1385
lines = (
1385
1386
b'print("\xdf ")' , # Latin-1: LATIN SMALL LETTER SHARP S
@@ -2754,7 +2755,7 @@ def get_tokens(string):
2754
2755
"]" ,
2755
2756
]:
2756
2757
with self .subTest (case = case ):
2757
- self .assertRaises (SyntaxError , get_tokens , case )
2758
+ self .assertRaises (TokenError , get_tokens , case )
2758
2759
2759
2760
def test_max_indent (self ):
2760
2761
MAXINDENT = 100
@@ -2773,7 +2774,7 @@ def generate_source(indents):
2773
2774
2774
2775
invalid = generate_source (MAXINDENT )
2775
2776
the_input = StringIO (invalid )
2776
- self .assertRaises (SyntaxError , lambda : list (_generate_tokens_from_c_tokenizer (the_input .readline )))
2777
+ self .assertRaises (IndentationError , lambda : list (_generate_tokens_from_c_tokenizer (the_input .readline )))
2777
2778
self .assertRaises (
2778
2779
IndentationError , compile , invalid , "<string>" , "exec"
2779
2780
)
0 commit comments