From 5b8d2c3af76e704926cf5915ad0e6af59a232e61 Mon Sep 17 00:00:00 2001 From: Terry Jan Reedy Date: Mon, 17 Feb 2014 23:12:16 -0500 Subject: Issue #8478: Untokenizer.compat now processes first token from iterator input. Patch based on lines from Georg Brandl, Eric Snow, and Gareth Rees. --- Lib/test/test_tokenize.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'Lib/test/test_tokenize.py') diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index 476ed761ba..7008d0e46f 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -1165,6 +1165,19 @@ class UntokenizeTest(TestCase): 'start (1,3) precedes previous end (2,2)') self.assertRaises(ValueError, u.add_whitespace, (2,1)) + def test_iter_compat(self): + u = Untokenizer() + token = (NAME, 'Hello') + tokens = [(ENCODING, 'utf-8'), token] + u.compat(token, iter([])) + self.assertEqual(u.tokens, ["Hello "]) + u = Untokenizer() + self.assertEqual(u.untokenize(iter([token])), 'Hello ') + u = Untokenizer() + self.assertEqual(u.untokenize(iter(tokens)), 'Hello ') + self.assertEqual(u.encoding, 'utf-8') + self.assertEqual(untokenize(iter(tokens)), b'Hello ') + __test__ = {"doctests" : doctests, 'decistmt': decistmt} -- cgit v1.2.1