diff options
Diffstat (limited to 'docutils/test/test_readers/test_python/showtok')
| -rwxr-xr-x | docutils/test/test_readers/test_python/showtok | 40 |
1 files changed, 0 insertions, 40 deletions
diff --git a/docutils/test/test_readers/test_python/showtok b/docutils/test/test_readers/test_python/showtok deleted file mode 100755 index efd250ce1..000000000 --- a/docutils/test/test_readers/test_python/showtok +++ /dev/null @@ -1,40 +0,0 @@ -#! /usr/bin/env python - - -""" -This is a tool for exploring token lists generated by -``tokenize.generate_tokens()`` from test data in -docutils/test/test_readers/test_python/test_parser or stdin. - -Usage:: - - showtok <key> <index> - - showtok < <module.py> - -Where ``<key>`` is the key to the ``totest`` dictionary, and ``<index>`` is -the index of the list ``totest[key]``. If no arguments are given, stdin is -used for input. -""" - -import sys -import tokenize -import pprint -from token import tok_name -import test_parser - -def name_tokens(tokens): - for i in range(len(tokens)): - tup = tokens[i] - tokens[i] = (tok_name[tup[0]], tup) - -if len(sys.argv) > 1: - key, caseno = sys.argv[1:] - print 'totest["%s"][%s][0]:\n' % (key, caseno) - input_text = test_parser.totest[key][int(caseno)][0] -else: - input_text = sys.stdin.read() -print input_text -tokens = list(tokenize.generate_tokens(iter(input_text.splitlines(1)).next)) -name_tokens(tokens) -pprint.pprint(tokens) |
