diff options
Diffstat (limited to 'sphinx/pycode')
-rw-r--r-- | sphinx/pycode/__init__.py | 26 | ||||
-rw-r--r-- | sphinx/pycode/parser.py | 51 |
2 files changed, 36 insertions, 41 deletions
diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index fe6b5f7e1..e3e80772c 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -8,14 +8,11 @@ :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ -from __future__ import print_function import re -from io import BytesIO +from io import BytesIO, StringIO from zipfile import ZipFile -from six import StringIO - from sphinx.errors import PycodeError from sphinx.pycode.parser import Parser from sphinx.util import get_module_source, detect_encoding @@ -23,23 +20,22 @@ from sphinx.util import get_module_source, detect_encoding if False: # For type annotation from typing import Any, Dict, IO, List, Tuple # NOQA - from sphinx.util.typing import unicode # NOQA class ModuleAnalyzer: # cache for analyzer objects -- caches both by module and file name - cache = {} # type: Dict[Tuple[unicode, unicode], Any] + cache = {} # type: Dict[Tuple[str, str], Any] @classmethod def for_string(cls, string, modname, srcname='<string>'): - # type: (unicode, unicode, unicode) -> ModuleAnalyzer + # type: (str, str, str) -> ModuleAnalyzer if isinstance(string, bytes): return cls(BytesIO(string), modname, srcname) return cls(StringIO(string), modname, srcname, decoded=True) @classmethod def for_file(cls, filename, modname): - # type: (unicode, unicode) -> ModuleAnalyzer + # type: (str, str) -> ModuleAnalyzer if ('file', filename) in cls.cache: return cls.cache['file', filename] try: @@ -55,7 +51,7 @@ class ModuleAnalyzer: @classmethod def for_egg(cls, filename, modname): - # type: (unicode, unicode) -> ModuleAnalyzer + # type: (str, str) -> ModuleAnalyzer eggpath, relpath = re.split('(?<=\\.egg)/', filename) try: with ZipFile(eggpath) as egg: @@ -86,7 +82,7 @@ class ModuleAnalyzer: return obj def __init__(self, source, modname, srcname, decoded=False): - # type: (IO, unicode, unicode, bool) -> None + # type: (IO, str, str, bool) -> None self.modname = modname # name of the module self.srcname = srcname # name of the source file @@ -101,9 +97,9 @@ class ModuleAnalyzer: self.code = source.read() # will be filled by parse() - self.attr_docs = None # type: Dict[Tuple[unicode, unicode], List[unicode]] - self.tagorder = None # type: Dict[unicode, int] - self.tags = None # type: Dict[unicode, Tuple[unicode, int, int]] + self.attr_docs = None # type: Dict[Tuple[str, str], List[str]] + self.tagorder = None # type: Dict[str, int] + self.tags = None # type: Dict[str, Tuple[str, int, int]] def parse(self): # type: () -> None @@ -125,7 +121,7 @@ class ModuleAnalyzer: raise PycodeError('parsing %r failed: %r' % (self.srcname, exc)) def find_attr_docs(self): - # type: () -> Dict[Tuple[unicode, unicode], List[unicode]] + # type: () -> Dict[Tuple[str, str], List[str]] """Find class and module-level attributes and their documentation.""" if self.attr_docs is None: self.parse() @@ -133,7 +129,7 @@ class ModuleAnalyzer: return self.attr_docs def find_tags(self): - # type: () -> Dict[unicode, Tuple[unicode, int, int]] + # type: () -> Dict[str, Tuple[str, int, int]] """Find class, function and method definitions and their location.""" if self.tags is None: self.parse() diff --git a/sphinx/pycode/parser.py b/sphinx/pycode/parser.py index e0ebe164e..71d7df781 100644 --- a/sphinx/pycode/parser.py +++ b/sphinx/pycode/parser.py @@ -22,7 +22,6 @@ from six import text_type if False: # For type annotation from typing import Any, Dict, IO, List, Tuple # NOQA - from sphinx.util.typing import unicode # NOQA comment_re = re.compile(u'^\\s*#: ?(.*)\r?\n?$') indent_re = re.compile(u'^\\s*$') @@ -36,7 +35,7 @@ else: def filter_whitespace(code): - # type: (unicode) -> unicode + # type: (str) -> str return code.replace('\f', ' ') # replace FF (form feed) with whitespace @@ -50,7 +49,7 @@ def get_assign_targets(node): def get_lvar_names(node, self=None): - # type: (ast.AST, ast.arg) -> List[unicode] + # type: (ast.AST, ast.arg) -> List[str] """Convert assignment-AST to variable names. This raises `TypeError` if the assignment does not create new variable:: @@ -93,7 +92,7 @@ def get_lvar_names(node, self=None): def dedent_docstring(s): - # type: (unicode) -> unicode + # type: (str) -> str """Remove common leading indentation from docstring.""" def dummy(): # type: () -> None @@ -109,7 +108,7 @@ class Token: """Better token wrapper for tokenize module.""" def __init__(self, kind, value, start, end, source): - # type: (int, Any, Tuple[int, int], Tuple[int, int], unicode) -> None # NOQA + # type: (int, Any, Tuple[int, int], Tuple[int, int], str) -> None self.kind = kind self.value = value self.start = start @@ -141,7 +140,7 @@ class Token: class TokenProcessor: def __init__(self, buffers): - # type: (List[unicode]) -> None + # type: (List[str]) -> None lines = iter(buffers) self.buffers = buffers self.tokens = tokenize.generate_tokens(lambda: next(lines)) @@ -149,7 +148,7 @@ class TokenProcessor: self.previous = None # type: Token def get_line(self, lineno): - # type: (int) -> unicode + # type: (int) -> str """Returns specified line.""" return self.buffers[lineno - 1] @@ -196,9 +195,9 @@ class AfterCommentParser(TokenProcessor): """ def __init__(self, lines): - # type: (List[unicode]) -> None + # type: (List[str]) -> None super(AfterCommentParser, self).__init__(lines) - self.comment = None # type: unicode + self.comment = None # type: str def fetch_rvalue(self): # type: () -> List[Token] @@ -240,20 +239,20 @@ class VariableCommentPicker(ast.NodeVisitor): """Python source code parser to pick up variable comments.""" def __init__(self, buffers, encoding): - # type: (List[unicode], unicode) -> None + # type: (List[str], str) -> None self.counter = itertools.count() self.buffers = buffers self.encoding = encoding - self.context = [] # type: List[unicode] - self.current_classes = [] # type: List[unicode] + self.context = [] # type: List[str] + self.current_classes = [] # type: List[str] self.current_function = None # type: ast.FunctionDef - self.comments = {} # type: Dict[Tuple[unicode, unicode], unicode] + self.comments = {} # type: Dict[Tuple[str, str], str] self.previous = None # type: ast.AST - self.deforders = {} # type: Dict[unicode, int] + self.deforders = {} # type: Dict[str, int] super(VariableCommentPicker, self).__init__() def add_entry(self, name): - # type: (unicode) -> None + # type: (str) -> None if self.current_function: if self.current_classes and self.context[-1] == "__init__": # store variable comments inside __init__ method of classes @@ -266,7 +265,7 @@ class VariableCommentPicker(ast.NodeVisitor): self.deforders[".".join(definition)] = next(self.counter) def add_variable_comment(self, name, comment): - # type: (unicode, unicode) -> None + # type: (str, str) -> None if self.current_function: if self.current_classes and self.context[-1] == "__init__": # store variable comments inside __init__ method of classes @@ -287,7 +286,7 @@ class VariableCommentPicker(ast.NodeVisitor): return None def get_line(self, lineno): - # type: (int) -> unicode + # type: (int) -> str """Returns specified line.""" return self.buffers[lineno - 1] @@ -388,15 +387,15 @@ class VariableCommentPicker(ast.NodeVisitor): class DefinitionFinder(TokenProcessor): def __init__(self, lines): - # type: (List[unicode]) -> None + # type: (List[str]) -> None super(DefinitionFinder, self).__init__(lines) self.decorator = None # type: Token - self.context = [] # type: List[unicode] + self.context = [] # type: List[str] self.indents = [] # type: List - self.definitions = {} # type: Dict[unicode, Tuple[unicode, int, int]] + self.definitions = {} # type: Dict[str, Tuple[str, int, int]] def add_definition(self, name, entry): - # type: (unicode, Tuple[unicode, int, int]) -> None + # type: (str, Tuple[str, int, int]) -> None if self.indents and self.indents[-1][0] == 'def' and entry[0] == 'def': # ignore definition of inner function pass @@ -425,7 +424,7 @@ class DefinitionFinder(TokenProcessor): self.finalize_block() def parse_definition(self, typ): - # type: (unicode) -> None + # type: (str) -> None name = self.fetch_token() self.context.append(name.value) funcname = '.'.join(self.context) @@ -465,12 +464,12 @@ class Parser: """ def __init__(self, code, encoding='utf-8'): - # type: (unicode, unicode) -> None + # type: (str, str) -> None self.code = filter_whitespace(code) self.encoding = encoding - self.comments = {} # type: Dict[Tuple[unicode, unicode], unicode] - self.deforders = {} # type: Dict[unicode, int] - self.definitions = {} # type: Dict[unicode, Tuple[unicode, int, int]] + self.comments = {} # type: Dict[Tuple[str, str], str] + self.deforders = {} # type: Dict[str, int] + self.definitions = {} # type: Dict[str, Tuple[str, int, int]] def parse(self): # type: () -> None |