diff options
Diffstat (limited to 'sphinx/pycode')
| -rw-r--r-- | sphinx/pycode/__init__.py | 47 | ||||
| -rw-r--r-- | sphinx/pycode/nodes.py | 10 | ||||
| -rw-r--r-- | sphinx/pycode/pgen2/grammar.py | 18 | ||||
| -rw-r--r-- | sphinx/pycode/pgen2/parse.py | 15 | ||||
| -rw-r--r-- | sphinx/pycode/pgen2/pgen.py | 25 | ||||
| -rw-r--r-- | sphinx/pycode/pgen2/tokenize.py | 16 |
6 files changed, 82 insertions, 49 deletions
diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index 8dbc95da8..eabcc8188 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -24,6 +24,10 @@ from sphinx.util import get_module_source, detect_encoding from sphinx.util.pycompat import TextIOWrapper from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc +if False: + # For type annotation + from typing import Any, Dict, List, Tuple # NOQA + # load the Python grammar _grammarfile = path.join(package_dir, 'pycode', @@ -48,7 +52,7 @@ number2name.update(token.tok_name) _eq = nodes.Leaf(token.EQUAL, '=') -emptyline_re = re.compile('^\s*(#.*)?$') +emptyline_re = re.compile(r'^\s*(#.*)?$') class AttrDocVisitor(nodes.NodeVisitor): @@ -63,10 +67,10 @@ class AttrDocVisitor(nodes.NodeVisitor): self.scope = scope self.in_init = 0 self.encoding = encoding - self.namespace = [] - self.collected = {} + self.namespace = [] # type: List[unicode] + self.collected = {} # type: Dict[Tuple[unicode, unicode], unicode] self.tagnumber = 0 - self.tagorder = {} + self.tagorder = {} # type: Dict[unicode, int] def add_tag(self, name): name = '.'.join(self.namespace + [name]) @@ -102,10 +106,10 @@ class AttrDocVisitor(nodes.NodeVisitor): parent = node.parent idx = parent.children.index(node) + 1 while idx < len(parent): - if parent[idx].type == sym.SEMI: + if parent[idx].type == sym.SEMI: # type: ignore idx += 1 continue # skip over semicolon - if parent[idx].type == sym.NEWLINE: + if parent[idx].type == sym.NEWLINE: # type: ignore prefix = parent[idx].get_prefix() if not isinstance(prefix, text_type): prefix = prefix.decode(self.encoding) @@ -138,8 +142,8 @@ class AttrDocVisitor(nodes.NodeVisitor): prev = node.get_prev_sibling() if not prev: return - if prev.type == sym.simple_stmt and \ - prev[0].type == sym.expr_stmt and _eq in prev[0].children: + if (prev.type == sym.simple_stmt and # type: ignore + prev[0].type == sym.expr_stmt and _eq in prev[0].children): # type: ignore # need to "eval" the string because it's returned in its # original form docstring = literals.evalString(node[0].value, self.encoding) @@ -178,7 +182,7 @@ class AttrDocVisitor(nodes.NodeVisitor): class ModuleAnalyzer(object): # cache for analyzer objects -- caches both by module and file name - cache = {} + cache = {} # type: Dict[Tuple[unicode, unicode], Any] @classmethod def for_string(cls, string, modname, srcname='<string>'): @@ -240,14 +244,14 @@ class ModuleAnalyzer(object): self.source.seek(pos) # will be filled by tokenize() - self.tokens = None + self.tokens = None # type: List[unicode] # will be filled by parse() - self.parsetree = None + self.parsetree = None # type: Any # will be filled by find_attr_docs() - self.attr_docs = None - self.tagorder = None + self.attr_docs = None # type: List[unicode] + self.tagorder = None # type: Dict[unicode, int] # will be filled by find_tags() - self.tags = None + self.tags = None # type: List[unicode] def tokenize(self): """Generate tokens from the source.""" @@ -289,9 +293,10 @@ class ModuleAnalyzer(object): return self.tags self.tokenize() result = {} - namespace = [] - stack = [] + namespace = [] # type: List[unicode] + stack = [] # type: List[Tuple[unicode, unicode, unicode, int]] indent = 0 + decopos = None defline = False expect_indent = False emptylines = 0 @@ -301,7 +306,7 @@ class ModuleAnalyzer(object): if tokentup[0] not in ignore: yield tokentup tokeniter = tokeniter() - for type, tok, spos, epos, line in tokeniter: + for type, tok, spos, epos, line in tokeniter: # type: ignore if expect_indent and type != token.NL: if type != token.INDENT: # no suite -- one-line definition @@ -312,11 +317,15 @@ class ModuleAnalyzer(object): result[fullname] = (dtype, startline, endline - emptylines) expect_indent = False if tok in ('def', 'class'): - name = next(tokeniter)[1] + name = next(tokeniter)[1] # type: ignore namespace.append(name) fullname = '.'.join(namespace) - stack.append((tok, fullname, spos[0], indent)) + stack.append((tok, fullname, decopos or spos[0], indent)) defline = True + decopos = None + elif type == token.OP and tok == '@': + if decopos is None: + decopos = spos[0] elif type == token.INDENT: expect_indent = False indent += 1 diff --git a/sphinx/pycode/nodes.py b/sphinx/pycode/nodes.py index 2dae4f6ee..cecde9bd0 100644 --- a/sphinx/pycode/nodes.py +++ b/sphinx/pycode/nodes.py @@ -9,12 +9,16 @@ :license: BSD, see LICENSE for details. """ +if False: + # For type annotation + from typing import Callable # NOQA + class BaseNode(object): """ Node superclass for both terminal and nonterminal nodes. """ - parent = None + parent = None # type: BaseNode def _eq(self, other): raise NotImplementedError @@ -29,7 +33,7 @@ class BaseNode(object): return NotImplemented return not self._eq(other) - __hash__ = None + __hash__ = None # type: Callable[[object], int] def get_prev_sibling(self): """Return previous child in parent's children, or None.""" @@ -204,5 +208,5 @@ class NodeVisitor(object): def generic_visit(self, node): """Called if no explicit visitor function exists for a node.""" if isinstance(node, Node): - for child in node: + for child in node: # type: ignore self.visit(child) diff --git a/sphinx/pycode/pgen2/grammar.py b/sphinx/pycode/pgen2/grammar.py index 42e6d72ee..ac276776e 100644 --- a/sphinx/pycode/pgen2/grammar.py +++ b/sphinx/pycode/pgen2/grammar.py @@ -19,6 +19,10 @@ import pickle # Local imports from sphinx.pycode.pgen2 import token +if False: + # For type annotation + from typing import Dict, List, Tuple # NOQA + class Grammar(object): """Pgen parsing tables tables conversion class. @@ -75,14 +79,14 @@ class Grammar(object): """ def __init__(self): - self.symbol2number = {} - self.number2symbol = {} - self.states = [] - self.dfas = {} + self.symbol2number = {} # type: Dict[unicode, int] + self.number2symbol = {} # type: Dict[int, unicode] + self.states = [] # type: List[List[List[Tuple[int, int]]]] + self.dfas = {} # type: Dict[int, Tuple[List[List[Tuple[int, int]]], unicode]] self.labels = [(0, "EMPTY")] - self.keywords = {} - self.tokens = {} - self.symbol2label = {} + self.keywords = {} # type: Dict[unicode, unicode] + self.tokens = {} # type: Dict[unicode, unicode] + self.symbol2label = {} # type: Dict[unicode, unicode] self.start = 256 def dump(self, filename): diff --git a/sphinx/pycode/pgen2/parse.py b/sphinx/pycode/pgen2/parse.py index 60eec05ea..660a47e68 100644 --- a/sphinx/pycode/pgen2/parse.py +++ b/sphinx/pycode/pgen2/parse.py @@ -13,6 +13,10 @@ how this parsing engine works. # Local imports from sphinx.pycode.pgen2 import token +if False: + # For type annotation + from typing import Any, List, Set, Tuple # NOQA + class ParseError(Exception): """Exception to signal the parser is stuck.""" @@ -104,11 +108,12 @@ class Parser(object): # Each stack entry is a tuple: (dfa, state, node). # A node is a tuple: (type, value, context, children), # where children is a list of nodes or None, and context may be None. - newnode = (start, None, None, []) + newnode = (start, None, None, []) # type: Tuple[unicode, unicode, unicode, List] stackentry = (self.grammar.dfas[start], 0, newnode) self.stack = [stackentry] - self.rootnode = None - self.used_names = set() # Aliased to self.rootnode.used_names in pop() + self.rootnode = None # type: Any + self.used_names = set() # type: Set[unicode] + # Aliased to self.rootnode.used_names in pop() def addtoken(self, type, value, context): """Add a token; return True iff this is the end of the program.""" @@ -175,7 +180,7 @@ class Parser(object): def shift(self, type, value, newstate, context): """Shift a token. (Internal)""" dfa, state, node = self.stack[-1] - newnode = (type, value, context, None) + newnode = (type, value, context, None) # type: Tuple[unicode, unicode, unicode, List] newnode = self.convert(self.grammar, newnode) if newnode is not None: node[-1].append(newnode) @@ -184,7 +189,7 @@ class Parser(object): def push(self, type, newdfa, newstate, context): """Push a nonterminal. (Internal)""" dfa, state, node = self.stack[-1] - newnode = (type, None, context, []) + newnode = (type, None, context, []) # type: Tuple[unicode, unicode, unicode, List] self.stack[-1] = (dfa, newstate, node) self.stack.append((newdfa, 0, newnode)) diff --git a/sphinx/pycode/pgen2/pgen.py b/sphinx/pycode/pgen2/pgen.py index 7598e6abc..8d9cc786a 100644 --- a/sphinx/pycode/pgen2/pgen.py +++ b/sphinx/pycode/pgen2/pgen.py @@ -7,9 +7,13 @@ from six import iteritems from collections import OrderedDict # Pgen imports - from sphinx.pycode.pgen2 import grammar, token, tokenize +if False: + # For type annotation + from typing import Any, Dict, List, Tuple # NOQA + + class PgenGrammar(grammar.Grammar): pass @@ -27,7 +31,8 @@ class ParserGenerator(object): self.dfas, self.startsymbol = self.parse() if close_stream is not None: close_stream() - self.first = {} # map from symbol name to set of tokens + self.first = {} # type: Dict[unicode, List[unicode]] + # map from symbol name to set of tokens self.addfirstsets() def make_grammar(self): @@ -42,7 +47,7 @@ class ParserGenerator(object): c.number2symbol[i] = name for name in names: dfa = self.dfas[name] - states = [] + states = [] # type: List[List[Tuple[int, int]]] for state in dfa: arcs = [] for label, next in iteritems(state.arcs): @@ -122,7 +127,7 @@ class ParserGenerator(object): dfa = self.dfas[name] self.first[name] = None # dummy to detect left recursion state = dfa[0] - totalset = {} + totalset = {} # type: Dict[unicode, int] overlapcheck = {} for label, next in iteritems(state.arcs): if label in self.dfas: @@ -138,7 +143,7 @@ class ParserGenerator(object): else: totalset[label] = 1 overlapcheck[label] = {label: 1} - inverse = {} + inverse = {} # type: Dict[unicode, unicode] for label, itsfirst in sorted(overlapcheck.items()): for symbol in sorted(itsfirst): if symbol in inverse: @@ -180,7 +185,7 @@ class ParserGenerator(object): assert isinstance(start, NFAState) assert isinstance(finish, NFAState) def closure(state): - base = {} + base = {} # type: Dict addclosure(state, base) return base def addclosure(state, base): @@ -193,7 +198,7 @@ class ParserGenerator(object): addclosure(next, base) states = [DFAState(closure(start), finish)] for state in states: # NB states grows while we're iterating - arcs = {} + arcs = {} # type: Dict[unicode, Dict] for nfastate in state.nfaset: for label, next in nfastate.arcs: if label is not None: @@ -343,7 +348,8 @@ class ParserGenerator(object): class NFAState(object): def __init__(self): - self.arcs = [] # list of (label, NFAState) pairs + self.arcs = [] # type: List[Tuple[unicode, Any]] + # list of (label, NFAState) pairs def addarc(self, next, label=None): assert label is None or isinstance(label, str) @@ -361,7 +367,8 @@ class DFAState(object): assert isinstance(final, NFAState) self.nfaset = nfaset self.isfinal = final in nfaset - self.arcs = OrderedDict() # map from label to DFAState + self.arcs = OrderedDict() # type: OrderedDict + # map from label to DFAState def __hash__(self): return hash(tuple(self.arcs)) diff --git a/sphinx/pycode/pgen2/tokenize.py b/sphinx/pycode/pgen2/tokenize.py index c7013bf91..8b533d422 100644 --- a/sphinx/pycode/pgen2/tokenize.py +++ b/sphinx/pycode/pgen2/tokenize.py @@ -37,6 +37,10 @@ from six import PY3 from sphinx.pycode.pgen2.token import * from sphinx.pycode.pgen2 import token +if False: + # For type annotation + from typing import List # NOQA + __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize", "generate_tokens", "untokenize"] del token @@ -183,7 +187,7 @@ def tokenize_loop(readline, tokeneater): class Untokenizer: def __init__(self): - self.tokens = [] + self.tokens = [] # type: List[unicode] self.prev_row = 1 self.prev_col = 0 @@ -294,17 +298,17 @@ def generate_tokens(readline): if contstr: # continued string if not line: - raise TokenError("EOF in multi-line string", strstart) - endmatch = endprog.match(line) + raise TokenError("EOF in multi-line string", strstart) # type: ignore + endmatch = endprog.match(line) # type: ignore if endmatch: pos = end = endmatch.end(0) yield (STRING, contstr + line[:end], - strstart, (lnum, end), contline + line) + strstart, (lnum, end), contline + line) # type: ignore contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield (ERRORTOKEN, contstr + line, - strstart, (lnum, len(line)), contline) + strstart, (lnum, len(line)), contline) # type: ignore contstr = '' contline = None continue @@ -333,7 +337,7 @@ def generate_tokens(readline): yield (NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) else: - yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], + yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], # type: ignore (lnum, pos), (lnum, len(line)), line) continue |
