summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorNed Batchelder <ned@nedbatchelder.com>2009-11-15 11:12:12 -0500
committerNed Batchelder <ned@nedbatchelder.com>2009-11-15 11:12:12 -0500
commit7b49eec72eef2f3c8fda53fc8f3893759257bdf1 (patch)
treee5efb7eab6a3765cbcb75bd18e16c7a376cb2312 /test
parent4316cdbd6b9bc05dddf842d9986560bf208a6f28 (diff)
downloadpython-coveragepy-git-7b49eec72eef2f3c8fda53fc8f3893759257bdf1.tar.gz
Fix a problem with syntax coloring continued lines, and refactor for testability, and add tests. Fixes issue #30.
Diffstat (limited to 'test')
-rw-r--r--test/stress_phystoken.txt35
-rw-r--r--test/test_phystokens.py58
2 files changed, 93 insertions, 0 deletions
diff --git a/test/stress_phystoken.txt b/test/stress_phystoken.txt
new file mode 100644
index 00000000..bd6a453a
--- /dev/null
+++ b/test/stress_phystoken.txt
@@ -0,0 +1,35 @@
+# Here's some random Python so that test_tokenize_myself will have some
+# stressful stuff to try. This file is .txt instead of .py so pylint won't
+# complain about it.
+
+first_back = """\
+hey there!
+"""
+
+other_back = """
+hey \
+there
+"""
+
+lots_of_back = """\
+hey \
+there
+"""
+fake_back = """\
+ouch
+"""
+
+class C(object):
+ def there():
+ this = 5 + \
+ 7
+ that = \
+ "a continued line"
+
+cont1 = "one line of text" + \
+ "another line of text"
+
+def hello():
+ print("Hello world!")
+
+hello()
diff --git a/test/test_phystokens.py b/test/test_phystokens.py
new file mode 100644
index 00000000..03f2a929
--- /dev/null
+++ b/test/test_phystokens.py
@@ -0,0 +1,58 @@
+"""Tests for Coverage.py's improved tokenizer."""
+
+import os, re, sys
+
+sys.path.insert(0, os.path.split(__file__)[0]) # Force relative import for Py3k
+from coveragetest import CoverageTest
+
+from coverage.phystokens import source_token_lines
+
+
+SIMPLE = """\
+# yay!
+def foo():
+ say('two = %d' % 2)
+"""
+
+HERE = os.path.split(__file__)[0]
+
+class PhysTokensTest(CoverageTest):
+ """Tests for Coverage.py's improver tokenizer."""
+
+ def check_tokenization(self, source):
+ """Tokenize `source`, then put it back together, should be the same."""
+ tokenized = ""
+ for line in source_token_lines(source):
+ text = "".join([t for _,t in line])
+ tokenized += text + "\n"
+ source = re.sub("(?m)[ \t]+$", "", source)
+ tokenized = re.sub("(?m)[ \t]+$", "", tokenized)
+ #if source != tokenized:
+ # open(r"c:\foo\0.py", "w").write(source)
+ # open(r"c:\foo\1.py", "w").write(tokenized)
+ self.assertEqual(source, tokenized)
+
+ def check_file_tokenization(self, fname):
+ """Use the contents of `fname` for `check_tokenization`."""
+ self.check_tokenization(open(fname).read())
+
+ def test_simple(self):
+ self.assertEqual(list(source_token_lines(SIMPLE)),
+ [
+ [('com', "# yay!")],
+ [('key', 'def'), ('ws', ' '), ('nam', 'foo'), ('op', '('),
+ ('op', ')'), ('op', ':')],
+ [('ws', ' '), ('nam', 'say'), ('op', '('),
+ ('str', "'two = %d'"), ('ws', ' '), ('op', '%'),
+ ('ws', ' '), ('num', '2'), ('op', ')')]
+ ]
+ )
+ self.check_tokenization(SIMPLE)
+
+ def test_tokenize_real_file(self):
+ real_file = os.path.join(HERE, "test_coverage.py")
+ self.check_file_tokenization(real_file)
+
+ def test_stress(self):
+ stress = os.path.join(HERE, "stress_phystoken.txt")
+ self.check_file_tokenization(stress)