summaryrefslogtreecommitdiff
path: root/sphinx/pycode
diff options
context:
space:
mode:
Diffstat (limited to 'sphinx/pycode')
-rw-r--r--sphinx/pycode/__init__.py11
-rw-r--r--sphinx/pycode/pgen2/driver.py2
-rw-r--r--sphinx/pycode/pgen2/grammar.py13
-rw-r--r--sphinx/pycode/pgen2/literals.py3
-rw-r--r--sphinx/pycode/pgen2/pgen.py15
-rw-r--r--sphinx/pycode/pgen2/tokenize.py6
6 files changed, 28 insertions, 22 deletions
diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py
index 54e79da6b..ca5e8b095 100644
--- a/sphinx/pycode/__init__.py
+++ b/sphinx/pycode/__init__.py
@@ -8,6 +8,7 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
import sys
from os import path
@@ -182,7 +183,7 @@ class ModuleAnalyzer(object):
return cls.cache['file', filename]
try:
fileobj = open(filename, 'rb')
- except Exception, err:
+ except Exception as err:
raise PycodeError('error opening %r' % filename, err)
obj = cls(fileobj, modname, filename)
cls.cache['file', filename] = obj
@@ -202,7 +203,7 @@ class ModuleAnalyzer(object):
obj = cls.for_string(source, modname)
else:
obj = cls.for_file(source, modname)
- except PycodeError, err:
+ except PycodeError as err:
cls.cache['module', modname] = err
raise
cls.cache['module', modname] = obj
@@ -245,7 +246,7 @@ class ModuleAnalyzer(object):
return
try:
self.tokens = list(tokenize.generate_tokens(self.source.readline))
- except tokenize.TokenError, err:
+ except tokenize.TokenError as err:
raise PycodeError('tokenizing failed', err)
self.source.close()
@@ -256,7 +257,7 @@ class ModuleAnalyzer(object):
self.tokenize()
try:
self.parsetree = pydriver.parse_tokens(self.tokens)
- except parse.ParseError, err:
+ except parse.ParseError as err:
raise PycodeError('parsing failed', err)
def find_attr_docs(self, scope=''):
@@ -344,4 +345,4 @@ if __name__ == '__main__':
pprint.pprint(ma.find_tags())
x3 = time.time()
#print nodes.nice_repr(ma.parsetree, number2name)
- print "tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2)
+ print("tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2))
diff --git a/sphinx/pycode/pgen2/driver.py b/sphinx/pycode/pgen2/driver.py
index 422671dbc..c531edb34 100644
--- a/sphinx/pycode/pgen2/driver.py
+++ b/sphinx/pycode/pgen2/driver.py
@@ -131,7 +131,7 @@ def load_grammar(gt="Grammar.txt", gp=None,
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
- except IOError, e:
+ except IOError as e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
diff --git a/sphinx/pycode/pgen2/grammar.py b/sphinx/pycode/pgen2/grammar.py
index 01d843461..91874fa23 100644
--- a/sphinx/pycode/pgen2/grammar.py
+++ b/sphinx/pycode/pgen2/grammar.py
@@ -11,6 +11,7 @@ token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
+from __future__ import print_function
# Python imports
import pickle
@@ -100,17 +101,17 @@ class Grammar(object):
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
- print "s2n"
+ print("s2n")
pprint(self.symbol2number)
- print "n2s"
+ print("n2s")
pprint(self.number2symbol)
- print "states"
+ print("states")
pprint(self.states)
- print "dfas"
+ print("dfas")
pprint(self.dfas)
- print "labels"
+ print("labels")
pprint(self.labels)
- print "start", self.start
+ print("start", self.start)
# Map from operator to number (since tokenize doesn't do this)
diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py
index d48937028..ce4a0ebc3 100644
--- a/sphinx/pycode/pgen2/literals.py
+++ b/sphinx/pycode/pgen2/literals.py
@@ -4,6 +4,7 @@
# Extended to handle raw and unicode literals by Georg Brandl.
"""Safely evaluate Python string literals without using eval()."""
+from __future__ import print_function
import re
@@ -89,7 +90,7 @@ def test():
s = repr(c)
e = evalString(s)
if e != c:
- print i, c, s, e
+ print(i, c, s, e)
if __name__ == "__main__":
diff --git a/sphinx/pycode/pgen2/pgen.py b/sphinx/pycode/pgen2/pgen.py
index 0a04447d0..f572ad588 100644
--- a/sphinx/pycode/pgen2/pgen.py
+++ b/sphinx/pycode/pgen2/pgen.py
@@ -2,6 +2,7 @@
# Licensed to PSF under a Contributor Agreement.
# Pgen imports
+from __future__ import print_function
from sphinx.pycode.pgen2 import grammar, token, tokenize
class PgenGrammar(grammar.Grammar):
@@ -203,10 +204,10 @@ class ParserGenerator(object):
return states # List of DFAState instances; first one is start
def dump_nfa(self, name, start, finish):
- print "Dump of NFA for", name
+ print("Dump of NFA for", name)
todo = [start]
for i, state in enumerate(todo):
- print " State", i, state is finish and "(final)" or ""
+ print(" State", i, state is finish and "(final)" or "")
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
@@ -214,16 +215,16 @@ class ParserGenerator(object):
j = len(todo)
todo.append(next)
if label is None:
- print " -> %d" % j
+ print(" -> %d" % j)
else:
- print " %s -> %d" % (label, j)
+ print(" %s -> %d" % (label, j))
def dump_dfa(self, name, dfa):
- print "Dump of DFA for", name
+ print("Dump of DFA for", name)
for i, state in enumerate(dfa):
- print " State", i, state.isfinal and "(final)" or ""
+ print(" State", i, state.isfinal and "(final)" or "")
for label, next in state.arcs.iteritems():
- print " %s -> %d" % (label, dfa.index(next))
+ print(" %s -> %d" % (label, dfa.index(next)))
def simplify_dfa(self, dfa):
# This is not theoretically optimal, but works well enough.
diff --git a/sphinx/pycode/pgen2/tokenize.py b/sphinx/pycode/pgen2/tokenize.py
index 7ad9f012c..93fd6578f 100644
--- a/sphinx/pycode/pgen2/tokenize.py
+++ b/sphinx/pycode/pgen2/tokenize.py
@@ -25,6 +25,8 @@ are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
+from __future__ import print_function
+
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
@@ -146,8 +148,8 @@ class StopTokenizing(Exception): pass
def printtoken(type, token, scell, ecell, line): # for testing
srow, scol = scell
erow, ecol = ecell
- print "%d,%d-%d,%d:\t%s\t%s" % \
- (srow, scol, erow, ecol, tok_name[type], repr(token))
+ print("%d,%d-%d,%d:\t%s\t%s" %
+ (srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""