diff options
Diffstat (limited to 'Lib/test/test_tokenize.py')
-rw-r--r-- | Lib/test/test_tokenize.py | 17 |
1 files changed, 15 insertions, 2 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index 3520a67bd4..93e40de96e 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -1,8 +1,8 @@ from test import support from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP, STRING, ENDMARKER, ENCODING, tok_name, detect_encoding, - open as tokenize_open, Untokenizer) -from io import BytesIO + open as tokenize_open, Untokenizer, generate_tokens) +from io import BytesIO, StringIO import unittest from unittest import TestCase, mock from test.test_grammar import (VALID_UNDERSCORE_LITERALS, @@ -919,6 +919,19 @@ async def f(): DEDENT '' (7, 0) (7, 0) """) +class GenerateTokensTest(TokenizeTest): + def check_tokenize(self, s, expected): + # Format the tokens in s in a table format. + # The ENDMARKER is omitted. + result = [] + f = StringIO(s) + for type, token, start, end, line in generate_tokens(f.readline): + if type == ENDMARKER: + break + type = tok_name[type] + result.append(f" {type:10} {token!r:13} {start} {end}") + self.assertEqual(result, expected.rstrip().splitlines()) + def decistmt(s): result = [] |