summaryrefslogtreecommitdiff
path: root/sphinx/ext
diff options
context:
space:
mode:
Diffstat (limited to 'sphinx/ext')
-rw-r--r--sphinx/ext/__init__.py1
-rw-r--r--sphinx/ext/apidoc.py93
-rw-r--r--sphinx/ext/autodoc/__init__.py454
-rw-r--r--sphinx/ext/autodoc/directive.py33
-rw-r--r--sphinx/ext/autodoc/importer.py103
-rw-r--r--sphinx/ext/autodoc/inspector.py187
-rw-r--r--sphinx/ext/autosectionlabel.py10
-rw-r--r--sphinx/ext/autosummary/__init__.py178
-rw-r--r--sphinx/ext/autosummary/generate.py47
-rw-r--r--sphinx/ext/coverage.py37
-rw-r--r--sphinx/ext/doctest.py125
-rw-r--r--sphinx/ext/extlinks.py10
-rw-r--r--sphinx/ext/githubpages.py3
-rw-r--r--sphinx/ext/graphviz.py148
-rw-r--r--sphinx/ext/ifconfig.py7
-rw-r--r--sphinx/ext/imgconverter.py53
-rw-r--r--sphinx/ext/imgmath.py90
-rw-r--r--sphinx/ext/inheritance_diagram.py88
-rw-r--r--sphinx/ext/intersphinx.py120
-rw-r--r--sphinx/ext/jsmath.py79
-rw-r--r--sphinx/ext/linkcode.py22
-rw-r--r--sphinx/ext/mathbase.py11
-rw-r--r--sphinx/ext/mathjax.py21
-rw-r--r--sphinx/ext/napoleon/__init__.py72
-rw-r--r--sphinx/ext/napoleon/docstring.py254
-rw-r--r--sphinx/ext/napoleon/iterators.py5
-rw-r--r--sphinx/ext/todo.py66
-rw-r--r--sphinx/ext/viewcode.py45
28 files changed, 978 insertions, 1384 deletions
diff --git a/sphinx/ext/__init__.py b/sphinx/ext/__init__.py
index 5d1f08ee1..729148a03 100644
--- a/sphinx/ext/__init__.py
+++ b/sphinx/ext/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext
~~~~~~~~~~
diff --git a/sphinx/ext/apidoc.py b/sphinx/ext/apidoc.py
index 591c640b8..94f07d91f 100644
--- a/sphinx/ext/apidoc.py
+++ b/sphinx/ext/apidoc.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.apidoc
~~~~~~~~~~~~~~~~~
@@ -15,8 +14,6 @@
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
-
import argparse
import glob
import locale
@@ -25,14 +22,12 @@ import sys
from fnmatch import fnmatch
from os import path
-from six import binary_type
-
import sphinx.locale
from sphinx import __display_version__, package_dir
from sphinx.cmd.quickstart import EXTENSIONS
from sphinx.locale import __
from sphinx.util import rst
-from sphinx.util.osutil import FileAvoidWrite, ensuredir, walk
+from sphinx.util.osutil import FileAvoidWrite, ensuredir
if False:
# For type annotation
@@ -54,7 +49,7 @@ PY_SUFFIXES = set(['.py', '.pyx'])
def makename(package, module):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Join package and module with a dot."""
# Both package and module can be None/empty.
if package:
@@ -67,7 +62,7 @@ def makename(package, module):
def write_file(name, text, opts):
- # type: (unicode, unicode, Any) -> None
+ # type: (str, str, Any) -> None
"""Write the output file for module/package <name>."""
fname = path.join(opts.destdir, '%s.%s' % (name, opts.suffix))
if opts.dryrun:
@@ -82,7 +77,7 @@ def write_file(name, text, opts):
def format_heading(level, text, escape=True):
- # type: (int, unicode, bool) -> unicode
+ # type: (int, str, bool) -> str
"""Create a heading of <level> [1, 2 or 3 supported]."""
if escape:
text = rst.escape(text)
@@ -91,7 +86,7 @@ def format_heading(level, text, escape=True):
def format_directive(module, package=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Create the automodule directive and add the options."""
directive = '.. automodule:: %s\n' % makename(package, module)
for option in OPTIONS:
@@ -100,7 +95,7 @@ def format_directive(module, package=None):
def create_module_file(package, module, opts):
- # type: (unicode, unicode, Any) -> None
+ # type: (str, str, Any) -> None
"""Build the text of the file and write the file."""
if not opts.noheadings:
text = format_heading(1, '%s module' % module)
@@ -112,7 +107,7 @@ def create_module_file(package, module, opts):
def create_package_file(root, master_package, subroot, py_files, opts, subs, is_namespace, excludes=[]): # NOQA
- # type: (unicode, unicode, unicode, List[unicode], Any, List[unicode], bool, List[unicode]) -> None # NOQA
+ # type: (str, str, str, List[str], Any, List[str], bool, List[str]) -> None
"""Build the text of the file and write the file."""
text = format_heading(1, ('%s package' if not is_namespace else "%s namespace")
% makename(master_package, subroot))
@@ -172,14 +167,14 @@ def create_package_file(root, master_package, subroot, py_files, opts, subs, is_
def create_modules_toc_file(modules, opts, name='modules'):
- # type: (List[unicode], Any, unicode) -> None
+ # type: (List[str], Any, str) -> None
"""Create the module's index."""
text = format_heading(1, '%s' % opts.header, escape=False)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
- prev_module = '' # type: unicode
+ prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
@@ -191,7 +186,7 @@ def create_modules_toc_file(modules, opts, name='modules'):
def shall_skip(module, opts, excludes=[]):
- # type: (unicode, Any, List[unicode]) -> bool
+ # type: (str, Any, List[str]) -> bool
"""Check if we want to skip this module."""
# skip if the file doesn't exist and not using implicit namespaces
if not opts.implicit_namespaces and not path.exists(module):
@@ -218,7 +213,7 @@ def shall_skip(module, opts, excludes=[]):
def recurse_tree(rootpath, excludes, opts):
- # type: (unicode, List[unicode], Any) -> List[unicode]
+ # type: (str, List[str], Any) -> List[str]
"""
Look for every file in the directory tree and create the corresponding
ReST files.
@@ -235,7 +230,7 @@ def recurse_tree(rootpath, excludes, opts):
root_package = None
toplevels = []
- for root, subs, files in walk(rootpath, followlinks=followlinks):
+ for root, subs, files in os.walk(rootpath, followlinks=followlinks):
# document only Python module files (that aren't excluded)
py_files = sorted(f for f in files
if path.splitext(f)[1] in PY_SUFFIXES and
@@ -253,7 +248,7 @@ def recurse_tree(rootpath, excludes, opts):
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if includeprivate:
- exclude_prefixes = ('.',) # type: Tuple[unicode, ...]
+ exclude_prefixes = ('.',) # type: Tuple[str, ...]
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and
@@ -283,7 +278,7 @@ def recurse_tree(rootpath, excludes, opts):
def is_excluded(root, excludes):
- # type: (unicode, List[unicode]) -> bool
+ # type: (str, List[str]) -> bool
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
@@ -376,6 +371,8 @@ Note: By default this script will not overwrite already created files."""))
'defaults to --doc-version'))
group = parser.add_argument_group(__('extension options'))
+ group.add_argument('--extensions', metavar='EXTENSIONS', dest='extensions',
+ action='append', help=__('enable arbitrary extensions'))
for ext in EXTENSIONS:
group.add_argument('--ext-%s' % ext, action='append_const',
const='sphinx.ext.%s' % ext, dest='extensions',
@@ -412,46 +409,42 @@ def main(argv=sys.argv[1:]):
if args.full:
from sphinx.cmd import quickstart as qs
modules.sort()
- prev_module = '' # type: unicode
+ prev_module = ''
text = ''
for module in modules:
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
- d = dict(
- path = args.destdir,
- sep = False,
- dot = '_',
- project = args.header,
- author = args.author or 'Author',
- version = args.version or '',
- release = args.release or args.version or '',
- suffix = '.' + args.suffix,
- master = 'index',
- epub = True,
- extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
- 'sphinx.ext.todo'],
- makefile = True,
- batchfile = True,
- make_mode = True,
- mastertocmaxdepth = args.maxdepth,
- mastertoctree = text,
- language = 'en',
- module_path = rootpath,
- append_syspath = args.append_syspath,
- )
+ d = {
+ 'path': args.destdir,
+ 'sep': False,
+ 'dot': '_',
+ 'project': args.header,
+ 'author': args.author or 'Author',
+ 'version': args.version or '',
+ 'release': args.release or args.version or '',
+ 'suffix': '.' + args.suffix,
+ 'master': 'index',
+ 'epub': True,
+ 'extensions': ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
+ 'sphinx.ext.todo'],
+ 'makefile': True,
+ 'batchfile': True,
+ 'make_mode': True,
+ 'mastertocmaxdepth': args.maxdepth,
+ 'mastertoctree': text,
+ 'language': 'en',
+ 'module_path': rootpath,
+ 'append_syspath': args.append_syspath,
+ }
if args.extensions:
d['extensions'].extend(args.extensions)
- if isinstance(args.header, binary_type):
- d['project'] = d['project'].decode('utf-8')
- if isinstance(args.author, binary_type):
- d['author'] = d['author'].decode('utf-8')
- if isinstance(args.version, binary_type):
- d['version'] = d['version'].decode('utf-8')
- if isinstance(args.release, binary_type):
- d['release'] = d['release'].decode('utf-8')
+ for ext in d['extensions'][:]:
+ if ',' in ext:
+ d['extensions'].remove(ext)
+ d['extensions'].extend(ext.split(','))
if not args.dryrun:
qs.generate(d, silent=True, overwrite=args.force)
diff --git a/sphinx/ext/autodoc/__init__.py b/sphinx/ext/autodoc/__init__.py
index df3319695..86f80de8f 100644
--- a/sphinx/ext/autodoc/__init__.py
+++ b/sphinx/ext/autodoc/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.autodoc
~~~~~~~~~~~~~~~~~~
@@ -13,23 +12,19 @@
import inspect
import re
-import sys
import warnings
from typing import Any
-from docutils.statemachine import ViewList
-from six import iteritems, itervalues, text_type, class_types, string_types
+from docutils.statemachine import StringList
import sphinx
-from sphinx.deprecation import RemovedInSphinx20Warning, RemovedInSphinx30Warning
-from sphinx.errors import ExtensionError
+from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.ext.autodoc.importer import mock, import_object, get_object_members
from sphinx.ext.autodoc.importer import _MockImporter # to keep compatibility # NOQA
-from sphinx.ext.autodoc.inspector import format_annotation, formatargspec # to keep compatibility # NOQA
from sphinx.locale import _, __
from sphinx.pycode import ModuleAnalyzer, PycodeError
from sphinx.util import logging
-from sphinx.util import rpartition, force_decode
+from sphinx.util import rpartition
from sphinx.util.docstrings import prepare_docstring
from sphinx.util.inspect import Signature, isdescriptor, safe_getmembers, \
safe_getattr, object_description, is_builtin_class_method, \
@@ -75,7 +70,7 @@ INSTANCEATTR = object()
def members_option(arg):
- # type: (Any) -> Union[object, List[unicode]]
+ # type: (Any) -> Union[object, List[str]]
"""Used to convert the :members: option to auto directives."""
if arg is None:
return ALL
@@ -83,7 +78,7 @@ def members_option(arg):
def members_set_option(arg):
- # type: (Any) -> Union[object, Set[unicode]]
+ # type: (Any) -> Union[object, Set[str]]
"""Used to convert the :members: option to auto directives."""
if arg is None:
return ALL
@@ -124,62 +119,10 @@ def merge_special_members_option(options):
options['members'] = options['special-members']
-class AutodocReporter(object):
- """
- A reporter replacement that assigns the correct source name
- and line number to a system message, as recorded in a ViewList.
- """
- def __init__(self, viewlist, reporter):
- # type: (ViewList, Reporter) -> None
- warnings.warn('AutodocReporter is now deprecated. '
- 'Use sphinx.util.docutils.switch_source_input() instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- self.viewlist = viewlist
- self.reporter = reporter
-
- def __getattr__(self, name):
- # type: (unicode) -> Any
- return getattr(self.reporter, name)
-
- def system_message(self, level, message, *children, **kwargs):
- # type: (int, unicode, Any, Any) -> nodes.system_message
- if 'line' in kwargs and 'source' not in kwargs:
- try:
- source, line = self.viewlist.items[kwargs['line']]
- except IndexError:
- pass
- else:
- kwargs['source'] = source
- kwargs['line'] = line
- return self.reporter.system_message(level, message,
- *children, **kwargs)
-
- def debug(self, *args, **kwargs):
- # type: (Any, Any) -> nodes.system_message
- if self.reporter.debug_flag:
- return self.system_message(0, *args, **kwargs)
-
- def info(self, *args, **kwargs):
- # type: (Any, Any) -> nodes.system_message
- return self.system_message(1, *args, **kwargs)
-
- def warning(self, *args, **kwargs):
- # type: (Any, Any) -> nodes.system_message
- return self.system_message(2, *args, **kwargs)
-
- def error(self, *args, **kwargs):
- # type: (Any, Any) -> nodes.system_message
- return self.system_message(3, *args, **kwargs)
-
- def severe(self, *args, **kwargs):
- # type: (Any, Any) -> nodes.system_message
- return self.system_message(4, *args, **kwargs)
-
-
# Some useful event listener factories for autodoc-process-docstring.
def cut_lines(pre, post=0, what=None):
- # type: (int, int, unicode) -> Callable
+ # type: (int, int, str) -> Callable
"""Return a listener that removes the first *pre* and last *post*
lines of every docstring. If *what* is a sequence of strings,
only docstrings of a type in *what* will be processed.
@@ -192,7 +135,7 @@ def cut_lines(pre, post=0, what=None):
This can (and should) be used in place of :confval:`automodule_skip_lines`.
"""
def process(app, what_, name, obj, options, lines):
- # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
+ # type: (Sphinx, str, str, Any, Any, List[str]) -> None
if what and what_ not in what:
return
del lines[:pre]
@@ -208,7 +151,7 @@ def cut_lines(pre, post=0, what=None):
def between(marker, what=None, keepempty=False, exclude=False):
- # type: (unicode, Sequence[unicode], bool, bool) -> Callable
+ # type: (str, Sequence[str], bool, bool) -> Callable
"""Return a listener that either keeps, or if *exclude* is True excludes,
lines between lines that match the *marker* regular expression. If no line
matches, the resulting docstring would be empty, so no change will be made
@@ -220,7 +163,7 @@ def between(marker, what=None, keepempty=False, exclude=False):
marker_re = re.compile(marker)
def process(app, what_, name, obj, options, lines):
- # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
+ # type: (Sphinx, str, str, Any, Any, List[str]) -> None
if what and what_ not in what:
return
deleted = 0
@@ -248,14 +191,14 @@ def between(marker, what=None, keepempty=False, exclude=False):
class Options(dict):
"""A dict/attribute hybrid that returns None on nonexisting keys."""
def __getattr__(self, name):
- # type: (unicode) -> Any
+ # type: (str) -> Any
try:
return self[name.replace('_', '-')]
except KeyError:
return None
-class Documenter(object):
+class Documenter:
"""
A Documenter knows how to autodocument a single object type. When
registered with the AutoDirective, it will be used to document objects
@@ -274,7 +217,7 @@ class Documenter(object):
#: generated directive name
objtype = 'object'
#: indentation by which to indent the directive content
- content_indent = u' '
+ content_indent = ' '
#: priority if multiple documenters return True from can_document_member
priority = 0
#: order if autodoc_member_order is set to 'groupwise'
@@ -282,21 +225,21 @@ class Documenter(object):
#: true if the generated content may contain titles
titles_allowed = False
- option_spec = {'noindex': bool_option} # type: Dict[unicode, Callable]
+ option_spec = {'noindex': bool_option} # type: Dict[str, Callable]
def get_attr(self, obj, name, *defargs):
- # type: (Any, unicode, Any) -> Any
+ # type: (Any, str, Any) -> Any
"""getattr() override for types such as Zope interfaces."""
return autodoc_attrgetter(self.env.app, obj, name, *defargs)
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
"""Called to see if a member can be documented by this documenter."""
raise NotImplementedError('must be implemented in subclasses')
- def __init__(self, directive, name, indent=u''):
- # type: (DocumenterBridge, unicode, unicode) -> None
+ def __init__(self, directive, name, indent=''):
+ # type: (DocumenterBridge, str, str) -> None
self.directive = directive
self.env = directive.env # type: BuildEnvironment
self.options = directive.genopt
@@ -306,33 +249,33 @@ class Documenter(object):
# qualified name (all set after resolve_name succeeds)
self.modname = None # type: str
self.module = None # type: ModuleType
- self.objpath = None # type: List[unicode]
- self.fullname = None # type: unicode
+ self.objpath = None # type: List[str]
+ self.fullname = None # type: str
# extra signature items (arguments and return annotation,
# also set after resolve_name succeeds)
- self.args = None # type: unicode
- self.retann = None # type: unicode
+ self.args = None # type: str
+ self.retann = None # type: str
# the object to document (set after import_object succeeds)
self.object = None # type: Any
- self.object_name = None # type: unicode
+ self.object_name = None # type: str
# the parent/owner of the object to document
self.parent = None # type: Any
# the module analyzer to get at attribute docs, or None
- self.analyzer = None # type: Any
+ self.analyzer = None # type: ModuleAnalyzer
@property
def documenters(self):
- # type: () -> Dict[unicode, Type[Documenter]]
+ # type: () -> Dict[str, Type[Documenter]]
"""Returns registered Documenter classes"""
return get_documenters(self.env.app)
def add_line(self, line, source, *lineno):
- # type: (unicode, unicode, int) -> None
+ # type: (str, str, int) -> None
"""Append one line of generated reST to the output."""
self.directive.result.append(self.indent + line, source, *lineno)
def resolve_name(self, modname, parents, path, base):
- # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
+ # type: (str, Any, str, Any) -> Tuple[str, List[str]]
"""Resolve the module and name of the object to document given by the
arguments and the current module/class.
@@ -356,7 +299,8 @@ class Documenter(object):
explicit_modname, path, base, args, retann = \
py_ext_sig_re.match(self.name).groups()
except AttributeError:
- logger.warning(__('invalid signature for auto%s (%r)') % (self.objtype, self.name))
+ logger.warning(__('invalid signature for auto%s (%r)') % (self.objtype, self.name),
+ type='autodoc')
return False
# support explicit module and class name separation via ::
@@ -367,7 +311,7 @@ class Documenter(object):
modname = None
parents = []
- self.modname, self.objpath = self.resolve_name(modname, parents, path, base) # type: ignore # NOQA
+ self.modname, self.objpath = self.resolve_name(modname, parents, path, base)
if not self.modname:
return False
@@ -393,7 +337,7 @@ class Documenter(object):
self.module, self.parent, self.object_name, self.object = ret
return True
except ImportError as exc:
- logger.warning(exc.args[0])
+ logger.warning(exc.args[0], type='autodoc', subtype='import_object')
self.env.note_reread()
return False
@@ -422,7 +366,7 @@ class Documenter(object):
return True
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
"""Format the argument signature of *self.object*.
Should return None if the object does not have a signature.
@@ -430,7 +374,7 @@ class Documenter(object):
return None
def format_name(self):
- # type: () -> unicode
+ # type: () -> str
"""Format the name of *self.object*.
This normally should be something that can be parsed by the generated
@@ -442,21 +386,21 @@ class Documenter(object):
return '.'.join(self.objpath) or self.modname
def format_signature(self):
- # type: () -> unicode
+ # type: () -> str
"""Format the signature (arguments and return annotation) of the object.
Let the user process it via the ``autodoc-process-signature`` event.
"""
if self.args is not None:
# signature given explicitly
- args = "(%s)" % self.args # type: unicode
+ args = "(%s)" % self.args
else:
# try to introspect the signature
try:
args = self.format_args()
except Exception as err:
logger.warning(__('error while formatting arguments for %s: %s') %
- (self.fullname, err))
+ (self.fullname, err), type='autodoc')
args = None
retann = self.retann
@@ -473,38 +417,36 @@ class Documenter(object):
return ''
def add_directive_header(self, sig):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Add the directive header and options to the generated content."""
domain = getattr(self, 'domain', 'py')
directive = getattr(self, 'directivetype', self.objtype)
name = self.format_name()
sourcename = self.get_sourcename()
- self.add_line(u'.. %s:%s:: %s%s' % (domain, directive, name, sig),
+ self.add_line('.. %s:%s:: %s%s' % (domain, directive, name, sig),
sourcename)
if self.options.noindex:
- self.add_line(u' :noindex:', sourcename)
+ self.add_line(' :noindex:', sourcename)
if self.objpath:
# Be explicit about the module, this is necessary since .. class::
# etc. don't support a prepended module name
- self.add_line(u' :module: %s' % self.modname, sourcename)
+ self.add_line(' :module: %s' % self.modname, sourcename)
def get_doc(self, encoding=None, ignore=1):
- # type: (unicode, int) -> List[List[unicode]]
+ # type: (str, int) -> List[List[str]]
"""Decode and return lines of the docstring(s) for the object."""
+ if encoding is not None:
+ warnings.warn("The 'encoding' argument to autodoc.%s.get_doc() is deprecated."
+ % self.__class__.__name__,
+ RemovedInSphinx40Warning)
docstring = getdoc(self.object, self.get_attr,
self.env.config.autodoc_inherit_docstrings)
- # make sure we have Unicode docstrings, then sanitize and split
- # into lines
- if isinstance(docstring, text_type):
+ if docstring:
return [prepare_docstring(docstring, ignore)]
- elif isinstance(docstring, str): # this will not trigger on Py3
- return [prepare_docstring(force_decode(docstring, encoding),
- ignore)]
- # ... else it is something strange, let's ignore it
return []
def process_doc(self, docstrings):
- # type: (List[List[unicode]]) -> Iterator[unicode]
+ # type: (List[List[str]]) -> Iterator[str]
"""Let the user process the docstrings before adding them."""
for docstringlines in docstrings:
if self.env.app:
@@ -512,20 +454,13 @@ class Documenter(object):
self.env.app.emit('autodoc-process-docstring',
self.objtype, self.fullname, self.object,
self.options, docstringlines)
- for line in docstringlines:
- yield line
+ yield from docstringlines
def get_sourcename(self):
- # type: () -> unicode
+ # type: () -> str
if self.analyzer:
- # prevent encoding errors when the file name is non-ASCII
- if not isinstance(self.analyzer.srcname, text_type):
- filename = text_type(self.analyzer.srcname,
- sys.getfilesystemencoding(), 'replace')
- else:
- filename = self.analyzer.srcname
- return u'%s:docstring of %s' % (filename, self.fullname)
- return u'docstring of %s' % self.fullname
+ return '%s:docstring of %s' % (self.analyzer.srcname, self.fullname)
+ return 'docstring of %s' % self.fullname
def add_content(self, more_content, no_docstring=False):
# type: (Any, bool) -> None
@@ -544,8 +479,7 @@ class Documenter(object):
# add content from docstrings
if not no_docstring:
- encoding = self.analyzer and self.analyzer.encoding
- docstrings = self.get_doc(encoding)
+ docstrings = self.get_doc()
if not docstrings:
# append at least a dummy docstring, so that the event
# autodoc-process-docstring is fired and can add some
@@ -560,7 +494,7 @@ class Documenter(object):
self.add_line(line, src[0], src[1])
def get_object_members(self, want_all):
- # type: (bool) -> Tuple[bool, List[Tuple[unicode, Any]]]
+ # type: (bool) -> Tuple[bool, List[Tuple[str, Any]]]
"""Return `(members_check_module, members)` where `members` is a
list of `(membername, member)` pairs of the members of *self.object*.
@@ -578,16 +512,16 @@ class Documenter(object):
selected.append((name, members[name].value))
else:
logger.warning(__('missing attribute %s in object %s') %
- (name, self.fullname))
+ (name, self.fullname), type='autodoc')
return False, sorted(selected)
elif self.options.inherited_members:
- return False, sorted((m.name, m.value) for m in itervalues(members))
+ return False, sorted((m.name, m.value) for m in members.values())
else:
- return False, sorted((m.name, m.value) for m in itervalues(members)
+ return False, sorted((m.name, m.value) for m in members.values()
if m.directly_defined)
def filter_members(self, members, want_all):
- # type: (List[Tuple[unicode, Any]], bool) -> List[Tuple[unicode, Any, bool]]
+ # type: (List[Tuple[str, Any]], bool) -> List[Tuple[str, Any, bool]]
"""Filter the given member list.
Members are skipped if
@@ -667,7 +601,7 @@ class Documenter(object):
except Exception as exc:
logger.warning(__('autodoc: failed to determine %r to be documented.'
'the following exception was raised:\n%s'),
- member, exc)
+ member, exc, type='autodoc')
keep = False
if keep:
@@ -705,7 +639,7 @@ class Documenter(object):
# document non-skipped members
memberdocumenters = [] # type: List[Tuple[Documenter, bool]]
for (mname, member, isattr) in self.filter_members(members, want_all):
- classes = [cls for cls in itervalues(self.documenters)
+ classes = [cls for cls in self.documenters.values()
if cls.can_document_member(member, mname, isattr, self)]
if not classes:
# don't know how to document this member
@@ -760,7 +694,7 @@ class Documenter(object):
__('don\'t know which module to import for autodocumenting '
'%r (try placing a "module" or "currentmodule" directive '
'in the document, or giving an explicit module name)') %
- self.name)
+ self.name, type='autodoc')
return
# now, import the module and get object to document
@@ -800,14 +734,14 @@ class Documenter(object):
# make sure that the result starts with an empty line. This is
# necessary for some situations where another directive preprocesses
# reST and no starting newline is present
- self.add_line(u'', sourcename)
+ self.add_line('', sourcename)
# format the object's signature, if any
sig = self.format_signature()
# generate the directive header and options, if applicable
self.add_directive_header(sig)
- self.add_line(u'', sourcename)
+ self.add_line('', sourcename)
# e.g. the module directive doesn't have content
self.indent += self.content_indent
@@ -824,7 +758,7 @@ class ModuleDocumenter(Documenter):
Specialized Documenter subclass for modules.
"""
objtype = 'module'
- content_indent = u''
+ content_indent = ''
titles_allowed = True
option_spec = {
@@ -835,51 +769,51 @@ class ModuleDocumenter(Documenter):
'member-order': identity, 'exclude-members': members_set_option,
'private-members': bool_option, 'special-members': members_option,
'imported-members': bool_option, 'ignore-module-all': bool_option
- } # type: Dict[unicode, Callable]
+ } # type: Dict[str, Callable]
def __init__(self, *args):
# type: (Any) -> None
- super(ModuleDocumenter, self).__init__(*args)
+ super().__init__(*args)
merge_special_members_option(self.options)
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
# don't document submodules automatically
return False
def resolve_name(self, modname, parents, path, base):
- # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
+ # type: (str, Any, str, Any) -> Tuple[str, List[str]]
if modname is not None:
- logger.warning(__('"::" in automodule name doesn\'t make sense'))
+ logger.warning(__('"::" in automodule name doesn\'t make sense'),
+ type='autodoc')
return (path or '') + base, []
def parse_name(self):
# type: () -> bool
- ret = Documenter.parse_name(self)
+ ret = super().parse_name()
if self.args or self.retann:
logger.warning(__('signature arguments or return annotation '
- 'given for automodule %s') % self.fullname)
+ 'given for automodule %s') % self.fullname,
+ type='autodoc')
return ret
def add_directive_header(self, sig):
- # type: (unicode) -> None
+ # type: (str) -> None
Documenter.add_directive_header(self, sig)
sourcename = self.get_sourcename()
# add some module-specific options
if self.options.synopsis:
- self.add_line(
- u' :synopsis: ' + self.options.synopsis, sourcename)
+ self.add_line(' :synopsis: ' + self.options.synopsis, sourcename)
if self.options.platform:
- self.add_line(
- u' :platform: ' + self.options.platform, sourcename)
+ self.add_line(' :platform: ' + self.options.platform, sourcename)
if self.options.deprecated:
- self.add_line(u' :deprecated:', sourcename)
+ self.add_line(' :deprecated:', sourcename)
def get_object_members(self, want_all):
- # type: (bool) -> Tuple[bool, List[Tuple[unicode, object]]]
+ # type: (bool) -> Tuple[bool, List[Tuple[str, object]]]
if want_all:
if (self.options.ignore_module_all or not
hasattr(self.object, '__all__')):
@@ -890,11 +824,13 @@ class ModuleDocumenter(Documenter):
memberlist = self.object.__all__
# Sometimes __all__ is broken...
if not isinstance(memberlist, (list, tuple)) or not \
- all(isinstance(entry, string_types) for entry in memberlist):
+ all(isinstance(entry, str) for entry in memberlist):
logger.warning(
__('__all__ should be a list of strings, not %r '
'(in module %s) -- ignoring __all__') %
- (memberlist, self.fullname))
+ (memberlist, self.fullname),
+ type='autodoc'
+ )
# fall back to all members
return True, safe_getmembers(self.object)
else:
@@ -907,7 +843,9 @@ class ModuleDocumenter(Documenter):
logger.warning(
__('missing attribute mentioned in :members: or __all__: '
'module %s, attribute %s') %
- (safe_getattr(self.object, '__name__', '???'), mname))
+ (safe_getattr(self.object, '__name__', '???'), mname),
+ type='autodoc'
+ )
return False, ret
@@ -917,7 +855,7 @@ class ModuleLevelDocumenter(Documenter):
classes, data/constants).
"""
def resolve_name(self, modname, parents, path, base):
- # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
+ # type: (str, Any, str, Any) -> Tuple[str, List[str]]
if modname is None:
if path:
modname = path.rstrip('.')
@@ -938,7 +876,7 @@ class ClassLevelDocumenter(Documenter):
attributes).
"""
def resolve_name(self, modname, parents, path, base):
- # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
+ # type: (str, Any, str, Any) -> Tuple[str, List[str]]
if modname is None:
if path:
mod_cls = path.rstrip('.')
@@ -954,7 +892,7 @@ class ClassLevelDocumenter(Documenter):
# ... if still None, there's no way to know
if mod_cls is None:
return None, []
- modname, cls = rpartition(mod_cls, '.') # type: ignore
+ modname, cls = rpartition(mod_cls, '.')
parents = [cls]
# if the module name is still missing, get it like above
if not modname:
@@ -965,15 +903,19 @@ class ClassLevelDocumenter(Documenter):
return modname, parents + [base]
-class DocstringSignatureMixin(object):
+class DocstringSignatureMixin:
"""
Mixin for FunctionDocumenter and MethodDocumenter to provide the
feature of reading the signature from the docstring.
"""
def _find_signature(self, encoding=None):
- # type: (unicode) -> Tuple[str, str]
- docstrings = self.get_doc(encoding)
+ # type: (str) -> Tuple[str, str]
+ if encoding is not None:
+ warnings.warn("The 'encoding' argument to autodoc.%s._find_signature() is "
+ "deprecated." % self.__class__.__name__,
+ RemovedInSphinx40Warning)
+ docstrings = self.get_doc()
self._new_docstrings = docstrings[:]
result = None
for i, doclines in enumerate(docstrings):
@@ -998,24 +940,28 @@ class DocstringSignatureMixin(object):
result = args, retann
# don't look any further
break
- return result # type: ignore
+ return result
def get_doc(self, encoding=None, ignore=1):
- # type: (unicode, int) -> List[List[unicode]]
+ # type: (str, int) -> List[List[str]]
+ if encoding is not None:
+ warnings.warn("The 'encoding' argument to autodoc.%s.get_doc() is deprecated."
+ % self.__class__.__name__,
+ RemovedInSphinx40Warning)
lines = getattr(self, '_new_docstrings', None)
if lines is not None:
return lines
- return Documenter.get_doc(self, encoding, ignore) # type: ignore
+ return super().get_doc(None, ignore) # type: ignore
def format_signature(self):
- # type: () -> unicode
+ # type: () -> str
if self.args is None and self.env.config.autodoc_docstring_signature: # type: ignore
# only act if a signature is not explicitly given already, and if
# the feature is enabled
result = self._find_signature()
if result is not None:
self.args, self.retann = result
- return Documenter.format_signature(self) # type: ignore
+ return super().format_signature() # type: ignore
class DocstringStripSignatureMixin(DocstringSignatureMixin):
@@ -1024,7 +970,7 @@ class DocstringStripSignatureMixin(DocstringSignatureMixin):
feature of stripping any function signature from the docstring.
"""
def format_signature(self):
- # type: () -> unicode
+ # type: () -> str
if self.args is None and self.env.config.autodoc_docstring_signature: # type: ignore
# only act if a signature is not explicitly given already, and if
# the feature is enabled
@@ -1034,7 +980,7 @@ class DocstringStripSignatureMixin(DocstringSignatureMixin):
# DocstringSignatureMixin.format_signature.
# Documenter.format_signature use self.args value to format.
_args, self.retann = result
- return Documenter.format_signature(self) # type: ignore
+ return super().format_signature()
class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type: ignore
@@ -1046,11 +992,11 @@ class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # typ
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
return isfunction(member) or isbuiltin(member)
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
if isbuiltin(self.object) or inspect.ismethoddescriptor(self.object):
# cannot introspect arguments of a C function or method
return None
@@ -1098,21 +1044,21 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
'show-inheritance': bool_option, 'member-order': identity,
'exclude-members': members_set_option,
'private-members': bool_option, 'special-members': members_option,
- } # type: Dict[unicode, Callable]
+ } # type: Dict[str, Callable]
def __init__(self, *args):
# type: (Any) -> None
- super(ClassDocumenter, self).__init__(*args)
+ super().__init__(*args)
merge_special_members_option(self.options)
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
- return isinstance(member, class_types)
+ # type: (Any, str, bool, Any) -> bool
+ return isinstance(member, type)
def import_object(self):
# type: () -> Any
- ret = ModuleLevelDocumenter.import_object(self)
+ ret = super().import_object()
# if the class is documented under another name, document it
# as data/attribute
if ret:
@@ -1123,7 +1069,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
return ret
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
# for classes, the relevant signature is the __init__ method's
initmeth = self.get_attr(self.object, '__init__', None)
# classes without __init__ method, default __init__ or
@@ -1140,32 +1086,36 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
return None
def format_signature(self):
- # type: () -> unicode
+ # type: () -> str
if self.doc_as_attr:
return ''
- return DocstringSignatureMixin.format_signature(self)
+ return super().format_signature()
def add_directive_header(self, sig):
- # type: (unicode) -> None
+ # type: (str) -> None
if self.doc_as_attr:
self.directivetype = 'attribute'
- Documenter.add_directive_header(self, sig)
+ super().add_directive_header(sig)
# add inheritance info, if wanted
if not self.doc_as_attr and self.options.show_inheritance:
sourcename = self.get_sourcename()
- self.add_line(u'', sourcename)
+ self.add_line('', sourcename)
if hasattr(self.object, '__bases__') and len(self.object.__bases__):
bases = [b.__module__ in ('__builtin__', 'builtins') and
- u':class:`%s`' % b.__name__ or
- u':class:`%s.%s`' % (b.__module__, b.__name__)
+ ':class:`%s`' % b.__name__ or
+ ':class:`%s.%s`' % (b.__module__, b.__name__)
for b in self.object.__bases__]
- self.add_line(u' ' + _(u'Bases: %s') % ', '.join(bases),
+ self.add_line(' ' + _('Bases: %s') % ', '.join(bases),
sourcename)
def get_doc(self, encoding=None, ignore=1):
- # type: (unicode, int) -> List[List[unicode]]
+ # type: (str, int) -> List[List[str]]
+ if encoding is not None:
+ warnings.warn("The 'encoding' argument to autodoc.%s.get_doc() is deprecated."
+ % self.__class__.__name__,
+ RemovedInSphinx40Warning)
lines = getattr(self, '_new_docstrings', None)
if lines is not None:
return lines
@@ -1201,14 +1151,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
docstrings = [initdocstring]
else:
docstrings.append(initdocstring)
- doc = []
- for docstring in docstrings:
- if isinstance(docstring, text_type):
- doc.append(prepare_docstring(docstring, ignore))
- elif isinstance(docstring, str): # this will not trigger on Py3
- doc.append(prepare_docstring(force_decode(docstring, encoding),
- ignore))
- return doc
+ return [prepare_docstring(docstring, ignore) for docstring in docstrings]
def add_content(self, more_content, no_docstring=False):
# type: (Any, bool) -> None
@@ -1220,19 +1163,17 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
module = safe_getattr(self.object, '__module__', None)
parentmodule = safe_getattr(self.parent, '__module__', None)
if module and module != parentmodule:
- classname = str(module) + u'.' + str(classname)
- content = ViewList(
- [_('alias of :class:`%s`') % classname], source='')
- ModuleLevelDocumenter.add_content(self, content,
- no_docstring=True)
+ classname = str(module) + '.' + str(classname)
+ content = StringList([_('alias of :class:`%s`') % classname], source='')
+ super().add_content(content, no_docstring=True)
else:
- ModuleLevelDocumenter.add_content(self, more_content)
+ super().add_content(more_content)
def document_members(self, all_members=False):
# type: (bool) -> None
if self.doc_as_attr:
return
- ModuleLevelDocumenter.document_members(self, all_members)
+ super().document_members(all_members)
def generate(self, more_content=None, real_modname=None,
check_module=False, all_members=False):
@@ -1242,9 +1183,9 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
# If a class gets imported into the module real_modname
# the analyzer won't find the source of the class, if
# it looks in real_modname.
- return super(ClassDocumenter, self).generate(more_content=more_content,
- check_module=check_module,
- all_members=all_members)
+ return super().generate(more_content=more_content,
+ check_module=check_module,
+ all_members=all_members)
class ExceptionDocumenter(ClassDocumenter):
@@ -1259,9 +1200,8 @@ class ExceptionDocumenter(ClassDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
- return isinstance(member, class_types) and \
- issubclass(member, BaseException) # type: ignore
+ # type: (Any, str, bool, Any) -> bool
+ return isinstance(member, type) and issubclass(member, BaseException)
class DataDocumenter(ModuleLevelDocumenter):
@@ -1276,12 +1216,12 @@ class DataDocumenter(ModuleLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
return isinstance(parent, ModuleDocumenter) and isattr
def add_directive_header(self, sig):
- # type: (unicode) -> None
- ModuleLevelDocumenter.add_directive_header(self, sig)
+ # type: (str) -> None
+ super().add_directive_header(sig)
sourcename = self.get_sourcename()
if not self.options.annotation:
try:
@@ -1289,11 +1229,11 @@ class DataDocumenter(ModuleLevelDocumenter):
except ValueError:
pass
else:
- self.add_line(u' :annotation: = ' + objrepr, sourcename)
+ self.add_line(' :annotation: = ' + objrepr, sourcename)
elif self.options.annotation is SUPPRESS:
pass
else:
- self.add_line(u' :annotation: %s' % self.options.annotation,
+ self.add_line(' :annotation: %s' % self.options.annotation,
sourcename)
def document_members(self, all_members=False):
@@ -1316,13 +1256,13 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # type:
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
return inspect.isroutine(member) and \
not isinstance(parent, ModuleDocumenter)
def import_object(self):
# type: () -> Any
- ret = ClassLevelDocumenter.import_object(self)
+ ret = super().import_object()
if not ret:
return ret
@@ -1344,7 +1284,7 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # type:
return ret
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
if isbuiltin(self.object) or inspect.ismethoddescriptor(self.object):
# can never get arguments of a C function or method
return None
@@ -1381,7 +1321,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
non_attr_types = (type, MethodDescriptorType)
isdatadesc = isdescriptor(member) and not \
cls.is_function_or_method(member) and not \
@@ -1392,7 +1332,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
# exported anywhere by Python
return isdatadesc or (not isinstance(parent, ModuleDocumenter) and
not inspect.isroutine(member) and
- not isinstance(member, class_types))
+ not isinstance(member, type))
def document_members(self, all_members=False):
# type: (bool) -> None
@@ -1400,7 +1340,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
def import_object(self):
# type: () -> Any
- ret = ClassLevelDocumenter.import_object(self)
+ ret = super().import_object()
if isenumattribute(self.object):
self.object = self.object.value
if isdescriptor(self.object) and \
@@ -1417,8 +1357,8 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
or self.modname
def add_directive_header(self, sig):
- # type: (unicode) -> None
- ClassLevelDocumenter.add_directive_header(self, sig)
+ # type: (str) -> None
+ super().add_directive_header(sig)
sourcename = self.get_sourcename()
if not self.options.annotation:
if not self._datadescriptor:
@@ -1427,12 +1367,11 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
except ValueError:
pass
else:
- self.add_line(u' :annotation: = ' + objrepr, sourcename)
+ self.add_line(' :annotation: = ' + objrepr, sourcename)
elif self.options.annotation is SUPPRESS:
pass
else:
- self.add_line(u' :annotation: %s' % self.options.annotation,
- sourcename)
+ self.add_line(' :annotation: %s' % self.options.annotation, sourcename)
def add_content(self, more_content, no_docstring=False):
# type: (Any, bool) -> None
@@ -1440,7 +1379,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
# if it's not a data descriptor, its docstring is very probably the
# wrong thing to display
no_docstring = True
- ClassLevelDocumenter.add_content(self, more_content, no_docstring)
+ super().add_content(more_content, no_docstring)
class InstanceAttributeDocumenter(AttributeDocumenter):
@@ -1457,7 +1396,7 @@ class InstanceAttributeDocumenter(AttributeDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
"""This documents only INSTANCEATTR members."""
return isattr and (member is INSTANCEATTR)
@@ -1472,90 +1411,19 @@ class InstanceAttributeDocumenter(AttributeDocumenter):
def add_content(self, more_content, no_docstring=False):
# type: (Any, bool) -> None
"""Never try to get a docstring from the object."""
- AttributeDocumenter.add_content(self, more_content, no_docstring=True)
-
-
-class DeprecatedDict(dict):
- def __init__(self, message):
- # type: (str) -> None
- self.message = message
- super(DeprecatedDict, self).__init__()
-
- def __setitem__(self, key, value):
- # type: (unicode, Any) -> None
- warnings.warn(self.message, RemovedInSphinx20Warning, stacklevel=2)
- super(DeprecatedDict, self).__setitem__(key, value)
-
- def setdefault(self, key, default=None):
- # type: (unicode, Any) -> None
- warnings.warn(self.message, RemovedInSphinx20Warning, stacklevel=2)
- super(DeprecatedDict, self).setdefault(key, default)
-
- def update(self, other=None): # type: ignore
- # type: (Dict) -> None
- warnings.warn(self.message, RemovedInSphinx20Warning, stacklevel=2)
- super(DeprecatedDict, self).update(other)
-
-
-class AutodocRegistry(object):
- """
- A registry of Documenters and attrgetters.
-
- Note: When importing an object, all items along the import chain are
- accessed using the descendant's *_special_attrgetters*, thus this
- dictionary should include all necessary functions for accessing
- attributes of the parents.
- """
- # a registry of objtype -> documenter class (Deprecated)
- _registry = DeprecatedDict(
- 'AutoDirective._registry has been deprecated. '
- 'Please use app.add_autodocumenter() instead.'
- ) # type: Dict[unicode, Type[Documenter]]
-
- # a registry of type -> getattr function
- _special_attrgetters = DeprecatedDict(
- 'AutoDirective._special_attrgetters has been deprecated. '
- 'Please use app.add_autodoc_attrgetter() instead.'
- ) # type: Dict[Type, Callable]
-
-
-AutoDirective = AutodocRegistry # for backward compatibility
-
-
-def add_documenter(cls):
- # type: (Type[Documenter]) -> None
- """Register a new Documenter."""
- warnings.warn('sphinx.ext.autodoc.add_documenter() has been deprecated. '
- 'Please use app.add_autodocumenter() instead.',
- RemovedInSphinx20Warning, stacklevel=2)
-
- if not issubclass(cls, Documenter):
- raise ExtensionError('autodoc documenter %r must be a subclass '
- 'of Documenter' % cls)
- # actually, it should be possible to override Documenters
- # if cls.objtype in AutoDirective._registry:
- # raise ExtensionError('autodoc documenter for %r is already '
- # 'registered' % cls.objtype)
- AutoDirective._registry[cls.objtype] = cls
+ super().add_content(more_content, no_docstring=True)
def get_documenters(app):
- # type: (Sphinx) -> Dict[unicode, Type[Documenter]]
+ # type: (Sphinx) -> Dict[str, Type[Documenter]]
"""Returns registered Documenter classes"""
- classes = dict(AutoDirective._registry) # registered directly
- if app:
- classes.update(app.registry.documenters) # registered by API
- return classes
+ return app.registry.documenters
def autodoc_attrgetter(app, obj, name, *defargs):
- # type: (Sphinx, Any, unicode, Any) -> Any
+ # type: (Sphinx, Any, str, Any) -> Any
"""Alternative getattr() for types"""
- candidates = dict(AutoDirective._special_attrgetters)
- if app:
- candidates.update(app.registry.autodoc_attrgettrs)
-
- for typ, func in iteritems(candidates):
+ for typ, func in app.registry.autodoc_attrgettrs.items():
if isinstance(obj, typ):
return func(obj, name, *defargs)
@@ -1577,17 +1445,17 @@ def merge_autodoc_default_flags(app, config):
RemovedInSphinx30Warning, stacklevel=2)
for option in config.autodoc_default_flags:
- if isinstance(option, string_types):
+ if isinstance(option, str):
config.autodoc_default_options[option] = None
else:
logger.warning(
__("Ignoring invalid option in autodoc_default_flags: %r"),
- option
+ option, type='autodoc'
)
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_autodocumenter(ModuleDocumenter)
app.add_autodocumenter(ClassDocumenter)
app.add_autodocumenter(ExceptionDocumenter)
diff --git a/sphinx/ext/autodoc/directive.py b/sphinx/ext/autodoc/directive.py
index 3a3434fc8..57ba12017 100644
--- a/sphinx/ext/autodoc/directive.py
+++ b/sphinx/ext/autodoc/directive.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.autodoc.directive
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -8,7 +7,7 @@
"""
from docutils import nodes
-from docutils.statemachine import ViewList
+from docutils.statemachine import StringList
from docutils.utils import assemble_option_dict
from sphinx.ext.autodoc import Options, get_documenters
@@ -18,8 +17,9 @@ from sphinx.util.nodes import nested_parse_with_titles
if False:
# For type annotation
- from typing import Any, Dict, List, Set, Type # NOQA
- from docutils.statemachine import State, StateMachine, StringList # NOQA
+ from typing import Any, Callable, Dict, List, Set, Type # NOQA
+ from docutils.parsers.rst.state import RSTState # NOQA
+ from docutils.statemachine import StateMachine, StringList # NOQA
from docutils.utils import Reporter # NOQA
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
@@ -31,18 +31,23 @@ logger = logging.getLogger(__name__)
# common option names for autodoc directives
AUTODOC_DEFAULT_OPTIONS = ['members', 'undoc-members', 'inherited-members',
'show-inheritance', 'private-members', 'special-members',
- 'ignore-module-all', 'exclude-members']
+ 'ignore-module-all', 'exclude-members', 'member-order']
-class DummyOptionSpec(object):
+class DummyOptionSpec(dict):
"""An option_spec allows any options."""
+ def __bool__(self):
+ # type: () -> bool
+ """Behaves like some options are defined."""
+ return True
+
def __getitem__(self, key):
- # type: (Any) -> Any
+ # type: (str) -> Callable[[str], str]
return lambda x: x
-class DocumenterBridge(object):
+class DocumenterBridge:
"""A parameters container for Documenters."""
def __init__(self, env, reporter, options, lineno):
@@ -51,11 +56,11 @@ class DocumenterBridge(object):
self.reporter = reporter
self.genopt = options
self.lineno = lineno
- self.filename_set = set() # type: Set[unicode]
- self.result = ViewList()
+ self.filename_set = set() # type: Set[str]
+ self.result = StringList()
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
logger.warning(msg, location=(self.env.docname, self.lineno))
@@ -74,11 +79,11 @@ def process_documenter_options(documenter, config, options):
def parse_generated_content(state, content, documenter):
- # type: (State, StringList, Documenter) -> List[nodes.Node]
+ # type: (RSTState, StringList, Documenter) -> List[nodes.Node]
"""Parse a generated content by Documenter."""
with switch_source_input(state, content):
if documenter.titles_allowed:
- node = nodes.section()
+ node = nodes.section() # type: nodes.Element
# necessary so that the child nodes get the right source/line set
node.document = state.document
nested_parse_with_titles(state, content, node)
@@ -107,7 +112,7 @@ class AutodocDirective(SphinxDirective):
reporter = self.state.document.reporter
try:
- source, lineno = reporter.get_source_and_line(self.lineno)
+ source, lineno = reporter.get_source_and_line(self.lineno) # type: ignore
except AttributeError:
source, lineno = (None, None)
logger.debug('[autodoc] %s:%s: input:\n%s', source, lineno, self.block_text)
diff --git a/sphinx/ext/autodoc/importer.py b/sphinx/ext/autodoc/importer.py
index 09f4be156..309415e9f 100644
--- a/sphinx/ext/autodoc/importer.py
+++ b/sphinx/ext/autodoc/importer.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.autodoc.importer
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -10,25 +9,27 @@
"""
import contextlib
+import os
import sys
import traceback
import warnings
from collections import namedtuple
+from importlib.abc import Loader, MetaPathFinder
+from importlib.machinery import ModuleSpec
from types import FunctionType, MethodType, ModuleType
-from six import PY2, iteritems
-
+from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.util import logging
from sphinx.util.inspect import isenumclass, safe_getattr
if False:
# For type annotation
- from typing import Any, Callable, Dict, Generator, Iterator, List, Optional, Tuple # NOQA
+ from typing import Any, Callable, Dict, Generator, Iterator, List, Optional, Sequence, Tuple, Union # NOQA
logger = logging.getLogger(__name__)
-class _MockObject(object):
+class _MockObject:
"""Used by autodoc_mock_imports."""
def __new__(cls, *args, **kwargs):
@@ -77,15 +78,18 @@ class _MockObject(object):
class _MockModule(ModuleType):
"""Used by autodoc_mock_imports."""
- __file__ = '/dev/null'
+ __file__ = os.devnull
- def __init__(self, name, loader):
+ def __init__(self, name, loader=None):
# type: (str, _MockImporter) -> None
- self.__name__ = self.__package__ = name
- self.__loader__ = loader
+ super().__init__(name)
self.__all__ = [] # type: List[str]
self.__path__ = [] # type: List[str]
+ if loader is not None:
+ warnings.warn('The loader argument for _MockModule is deprecated.',
+ RemovedInSphinx30Warning)
+
def __getattr__(self, name):
# type: (str) -> _MockObject
o = _MockObject()
@@ -93,7 +97,7 @@ class _MockModule(ModuleType):
return o
-class _MockImporter(object):
+class _MockImporter(MetaPathFinder):
def __init__(self, names):
# type: (List[str]) -> None
self.names = names
@@ -101,6 +105,9 @@ class _MockImporter(object):
# enable hook by adding itself to meta_path
sys.meta_path.insert(0, self)
+ warnings.warn('_MockImporter is now deprecated.',
+ RemovedInSphinx30Warning)
+
def disable(self):
# type: () -> None
# remove `self` from `sys.meta_path` to disable import hook
@@ -112,7 +119,7 @@ class _MockImporter(object):
del sys.modules[m]
def find_module(self, name, path=None):
- # type: (str, str) -> Any
+ # type: (str, Sequence[Union[bytes, str]]) -> Any
# check if name is (or is a descendant of) one of our base_packages
for n in self.names:
if n == name or name.startswith(n + '.'):
@@ -132,14 +139,66 @@ class _MockImporter(object):
return module
+class MockLoader(Loader):
+ """A loader for mocking."""
+ def __init__(self, finder):
+ # type: (MockFinder) -> None
+ super().__init__()
+ self.finder = finder
+
+ def create_module(self, spec):
+ # type: (ModuleSpec) -> ModuleType
+ logger.debug('[autodoc] adding a mock module as %s!', spec.name)
+ self.finder.mocked_modules.append(spec.name)
+ return _MockModule(spec.name)
+
+ def exec_module(self, module):
+ # type: (ModuleType) -> None
+ pass # nothing to do
+
+
+class MockFinder(MetaPathFinder):
+ """A finder for mocking."""
+
+ def __init__(self, modnames):
+ # type: (List[str]) -> None
+ super().__init__()
+ self.modnames = modnames
+ self.loader = MockLoader(self)
+ self.mocked_modules = [] # type: List[str]
+
+ def find_spec(self, fullname, path, target=None):
+ # type: (str, Sequence[Union[bytes, str]], ModuleType) -> ModuleSpec
+ for modname in self.modnames:
+ # check if fullname is (or is a descendant of) one of our targets
+ if modname == fullname or fullname.startswith(modname + '.'):
+ return ModuleSpec(fullname, self.loader)
+
+ return None
+
+ def invalidate_caches(self):
+ # type: () -> None
+ """Invalidate mocked modules on sys.modules."""
+ for modname in self.mocked_modules:
+ sys.modules.pop(modname, None)
+
+
@contextlib.contextmanager
-def mock(names):
- # type: (List[str]) -> Generator
+def mock(modnames):
+ # type: (List[str]) -> Generator[None, None, None]
+ """Insert mock modules during context::
+
+ with mock(['target.module.name']):
+ # mock modules are enabled here
+ ...
+ """
try:
- importer = _MockImporter(names)
+ finder = MockFinder(modnames)
+ sys.meta_path.insert(0, finder)
yield
finally:
- importer.disable()
+ sys.meta_path.remove(finder)
+ finder.invalidate_caches()
def import_module(modname, warningiserror=False):
@@ -160,7 +219,7 @@ def import_module(modname, warningiserror=False):
def import_object(modname, objpath, objtype='', attrgetter=safe_getattr, warningiserror=False):
- # type: (str, List[unicode], str, Callable[[Any, unicode], Any], bool) -> Any
+ # type: (str, List[str], str, Callable[[Any, str], Any], bool) -> Any
if objpath:
logger.debug('[autodoc] from %s import %s', modname, '.'.join(objpath))
else:
@@ -219,8 +278,6 @@ def import_object(modname, objpath, objtype='', attrgetter=safe_getattr, warning
else:
errmsg += '; the following exception was raised:\n%s' % traceback.format_exc()
- if PY2:
- errmsg = errmsg.decode('utf-8') # type: ignore
logger.debug(errmsg)
raise ImportError(errmsg)
@@ -229,17 +286,11 @@ Attribute = namedtuple('Attribute', ['name', 'directly_defined', 'value'])
def get_object_members(subject, objpath, attrgetter, analyzer=None):
- # type: (Any, List[unicode], Callable, Any) -> Dict[str, Attribute] # NOQA
+ # type: (Any, List[str], Callable, Any) -> Dict[str, Attribute] # NOQA
"""Get members and attributes of target object."""
# the members directly defined in the class
obj_dict = attrgetter(subject, '__dict__', {})
- # Py34 doesn't have enum members in __dict__.
- if sys.version_info[:2] == (3, 4) and isenumclass(subject):
- obj_dict = dict(obj_dict)
- for name, value in subject.__members__.items():
- obj_dict[name] = value
-
members = {} # type: Dict[str, Attribute]
# enum members
@@ -249,7 +300,7 @@ def get_object_members(subject, objpath, attrgetter, analyzer=None):
members[name] = Attribute(name, True, value)
superclass = subject.__mro__[1]
- for name, value in iteritems(obj_dict):
+ for name, value in obj_dict.items():
if name not in superclass.__dict__:
members[name] = Attribute(name, True, value)
diff --git a/sphinx/ext/autodoc/inspector.py b/sphinx/ext/autodoc/inspector.py
deleted file mode 100644
index 378048f7e..000000000
--- a/sphinx/ext/autodoc/inspector.py
+++ /dev/null
@@ -1,187 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.ext.autodoc.inspector
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Inspect utilities for autodoc
-
- :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import typing
-import warnings
-
-from six import StringIO, string_types
-
-from sphinx.deprecation import RemovedInSphinx20Warning
-from sphinx.util.inspect import object_description
-
-if False:
- # For type annotation
- from typing import Any, Callable, Dict, Tuple # NOQA
-
-
-def format_annotation(annotation):
- # type: (Any) -> str
- """Return formatted representation of a type annotation.
-
- Show qualified names for types and additional details for types from
- the ``typing`` module.
-
- Displaying complex types from ``typing`` relies on its private API.
- """
- warnings.warn('format_annotation() is now deprecated. '
- 'Please use sphinx.util.inspect.Signature instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- if isinstance(annotation, typing.TypeVar): # type: ignore
- return annotation.__name__
- if annotation == Ellipsis:
- return '...'
- if not isinstance(annotation, type):
- return repr(annotation)
-
- qualified_name = (annotation.__module__ + '.' + annotation.__qualname__ # type: ignore
- if annotation else repr(annotation))
-
- if annotation.__module__ == 'builtins':
- return annotation.__qualname__ # type: ignore
- else:
- if hasattr(typing, 'GenericMeta') and \
- isinstance(annotation, typing.GenericMeta):
- # In Python 3.5.2+, all arguments are stored in __args__,
- # whereas __parameters__ only contains generic parameters.
- #
- # Prior to Python 3.5.2, __args__ is not available, and all
- # arguments are in __parameters__.
- params = None
- if hasattr(annotation, '__args__'):
- if annotation.__args__ is None or len(annotation.__args__) <= 2: # type: ignore # NOQA
- params = annotation.__args__ # type: ignore
- else: # typing.Callable
- args = ', '.join(format_annotation(a) for a in annotation.__args__[:-1]) # type: ignore # NOQA
- result = format_annotation(annotation.__args__[-1]) # type: ignore
- return '%s[[%s], %s]' % (qualified_name, args, result)
- elif hasattr(annotation, '__parameters__'):
- params = annotation.__parameters__ # type: ignore
- if params is not None:
- param_str = ', '.join(format_annotation(p) for p in params)
- return '%s[%s]' % (qualified_name, param_str)
- elif (hasattr(typing, 'UnionMeta') and
- isinstance(annotation, typing.UnionMeta) and # type: ignore
- hasattr(annotation, '__union_params__')):
- params = annotation.__union_params__
- if params is not None:
- param_str = ', '.join(format_annotation(p) for p in params)
- return '%s[%s]' % (qualified_name, param_str)
- elif (hasattr(typing, 'CallableMeta') and
- isinstance(annotation, typing.CallableMeta) and # type: ignore
- getattr(annotation, '__args__', None) is not None and
- hasattr(annotation, '__result__')):
- # Skipped in the case of plain typing.Callable
- args = annotation.__args__
- if args is None:
- return qualified_name
- elif args is Ellipsis:
- args_str = '...'
- else:
- formatted_args = (format_annotation(a) for a in args)
- args_str = '[%s]' % ', '.join(formatted_args)
- return '%s[%s, %s]' % (qualified_name,
- args_str,
- format_annotation(annotation.__result__))
- elif (hasattr(typing, 'TupleMeta') and
- isinstance(annotation, typing.TupleMeta) and # type: ignore
- hasattr(annotation, '__tuple_params__') and
- hasattr(annotation, '__tuple_use_ellipsis__')):
- params = annotation.__tuple_params__
- if params is not None:
- param_strings = [format_annotation(p) for p in params]
- if annotation.__tuple_use_ellipsis__:
- param_strings.append('...')
- return '%s[%s]' % (qualified_name,
- ', '.join(param_strings))
- return qualified_name
-
-
-def formatargspec(function, args, varargs=None, varkw=None, defaults=None,
- kwonlyargs=(), kwonlydefaults={}, annotations={}):
- # type: (Callable, Tuple[str, ...], str, str, Any, Tuple, Dict, Dict[str, Any]) -> str
- """Return a string representation of an ``inspect.FullArgSpec`` tuple.
-
- An enhanced version of ``inspect.formatargspec()`` that handles typing
- annotations better.
- """
- warnings.warn('formatargspec() is now deprecated. '
- 'Please use sphinx.util.inspect.Signature instead.',
- RemovedInSphinx20Warning, stacklevel=2)
-
- def format_arg_with_annotation(name):
- # type: (str) -> str
- if name in annotations:
- return '%s: %s' % (name, format_annotation(get_annotation(name)))
- return name
-
- def get_annotation(name):
- # type: (str) -> str
- value = annotations[name]
- if isinstance(value, string_types):
- return introspected_hints.get(name, value)
- else:
- return value
-
- try:
- introspected_hints = (typing.get_type_hints(function) # type: ignore
- if typing and hasattr(function, '__code__') else {})
- except Exception:
- introspected_hints = {}
-
- fd = StringIO()
- fd.write('(')
-
- formatted = []
- defaults_start = len(args) - len(defaults) if defaults else len(args)
-
- for i, arg in enumerate(args):
- arg_fd = StringIO()
- if isinstance(arg, list):
- # support tupled arguments list (only for py2): def foo((x, y))
- arg_fd.write('(')
- arg_fd.write(format_arg_with_annotation(arg[0]))
- for param in arg[1:]:
- arg_fd.write(', ')
- arg_fd.write(format_arg_with_annotation(param))
- arg_fd.write(')')
- else:
- arg_fd.write(format_arg_with_annotation(arg))
- if defaults and i >= defaults_start:
- arg_fd.write(' = ' if arg in annotations else '=')
- arg_fd.write(object_description(defaults[i - defaults_start])) # type: ignore
- formatted.append(arg_fd.getvalue())
-
- if varargs:
- formatted.append('*' + format_arg_with_annotation(varargs))
-
- if kwonlyargs:
- if not varargs:
- formatted.append('*')
-
- for kwarg in kwonlyargs:
- arg_fd = StringIO()
- arg_fd.write(format_arg_with_annotation(kwarg))
- if kwonlydefaults and kwarg in kwonlydefaults:
- arg_fd.write(' = ' if kwarg in annotations else '=')
- arg_fd.write(object_description(kwonlydefaults[kwarg])) # type: ignore
- formatted.append(arg_fd.getvalue())
-
- if varkw:
- formatted.append('**' + format_arg_with_annotation(varkw))
-
- fd.write(', '.join(formatted))
- fd.write(')')
-
- if 'return' in annotations:
- fd.write(' -> ')
- fd.write(format_annotation(get_annotation('return')))
-
- return fd.getvalue()
diff --git a/sphinx/ext/autosectionlabel.py b/sphinx/ext/autosectionlabel.py
index f6702bc5b..f20864fe2 100644
--- a/sphinx/ext/autosectionlabel.py
+++ b/sphinx/ext/autosectionlabel.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.autosectionlabel
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,6 +8,8 @@
:license: BSD, see LICENSE for details.
"""
+from typing import cast
+
from docutils import nodes
from sphinx.locale import __
@@ -36,12 +37,13 @@ def register_sections_as_label(app, document):
for node in document.traverse(nodes.section):
labelid = node['ids'][0]
docname = app.env.docname
- ref_name = getattr(node[0], 'rawsource', node[0].astext())
+ title = cast(nodes.title, node[0])
+ ref_name = getattr(title, 'rawsource', title.astext())
if app.config.autosectionlabel_prefix_document:
name = nodes.fully_normalize_name(docname + ':' + ref_name)
else:
name = nodes.fully_normalize_name(ref_name)
- sectname = clean_astext(node[0])
+ sectname = clean_astext(title)
if name in labels:
logger.warning(__('duplicate label %s, other instance in %s'),
@@ -53,7 +55,7 @@ def register_sections_as_label(app, document):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_config_value('autosectionlabel_prefix_document', False, 'env')
app.connect('doctree-read', register_sections_as_label)
diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py
index c2a498fec..a18ce2a6d 100644
--- a/sphinx/ext/autosummary/__init__.py
+++ b/sphinx/ext/autosummary/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary
~~~~~~~~~~~~~~~~~~~~~~
@@ -60,17 +59,16 @@ import re
import sys
import warnings
from types import ModuleType
+from typing import List, cast
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.states import RSTStateMachine, state_classes
-from docutils.statemachine import ViewList
-from six import string_types
-from six import text_type
+from docutils.statemachine import StringList
import sphinx
from sphinx import addnodes
-from sphinx.deprecation import RemovedInSphinx20Warning
+from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.environment.adapters.toctree import TocTree
from sphinx.ext.autodoc import get_documenters
from sphinx.ext.autodoc.directive import DocumenterBridge, Options
@@ -85,11 +83,12 @@ from sphinx.util.matching import Matcher
if False:
# For type annotation
- from typing import Any, Dict, List, Tuple, Type, Union # NOQA
- from docutils.utils import Inliner # NOQA
+ from typing import Any, Dict, Tuple, Type # NOQA
+ from docutils.parsers.rst.states import Inliner # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
logger = logging.getLogger(__name__)
@@ -105,7 +104,7 @@ class autosummary_toc(nodes.comment):
def process_autosummary_toc(app, doctree):
- # type: (Sphinx, nodes.Node) -> None
+ # type: (Sphinx, nodes.document) -> None
"""Insert items described in autosummary:: to the TOC tree, but do
not generate the toctree:: list.
"""
@@ -113,7 +112,7 @@ def process_autosummary_toc(app, doctree):
crawled = {}
def crawl_toc(node, depth=1):
- # type: (nodes.Node, int) -> None
+ # type: (nodes.Element, int) -> None
crawled[node] = True
for j, subnode in enumerate(node):
try:
@@ -148,17 +147,19 @@ class autosummary_table(nodes.comment):
def autosummary_table_visit_html(self, node):
- # type: (nodes.NodeVisitor, autosummary_table) -> None
+ # type: (HTMLTranslator, autosummary_table) -> None
"""Make the first column of the table non-breaking."""
try:
- tbody = node[0][0][-1]
- for row in tbody:
- col1_entry = row[0]
- par = col1_entry[0]
+ table = cast(nodes.table, node[0])
+ tgroup = cast(nodes.tgroup, table[0])
+ tbody = cast(nodes.tbody, tgroup[-1])
+ rows = cast(List[nodes.row], tbody)
+ for row in rows:
+ col1_entry = cast(nodes.entry, row[0])
+ par = cast(nodes.paragraph, col1_entry[0])
for j, subnode in enumerate(list(par)):
if isinstance(subnode, nodes.Text):
- new_text = text_type(subnode.astext())
- new_text = new_text.replace(u" ", u"\u00a0")
+ new_text = subnode.astext().replace(" ", "\u00a0")
par[j] = nodes.Text(new_text)
except IndexError:
pass
@@ -173,11 +174,11 @@ _app = None # type: Sphinx
class FakeDirective(DocumenterBridge):
def __init__(self):
# type: () -> None
- super(FakeDirective, self).__init__({}, None, Options(), 0) # type: ignore
+ super().__init__({}, None, Options(), 0) # type: ignore
-def get_documenter(*args):
- # type: (Any) -> Type[Documenter]
+def get_documenter(app, obj, parent):
+ # type: (Sphinx, Any, Any) -> Type[Documenter]
"""Get an autodoc.Documenter class suitable for documenting the given
object.
@@ -186,16 +187,6 @@ def get_documenter(*args):
belongs to.
"""
from sphinx.ext.autodoc import DataDocumenter, ModuleDocumenter
- if len(args) == 3:
- # new style arguments: (app, obj, parent)
- app, obj, parent = args
- else:
- # old style arguments: (obj, parent)
- app = _app
- obj, parent = args
- warnings.warn('the interface of get_documenter() has been changed. '
- 'Please give application object as first argument.',
- RemovedInSphinx20Warning, stacklevel=2)
if inspect.ismodule(obj):
# ModuleDocumenter.can_document_member always returns False
@@ -241,16 +232,10 @@ class Autosummary(SphinxDirective):
'template': directives.unchanged,
}
- def warn(self, msg):
- # type: (unicode) -> None
- self.warnings.append(self.state.document.reporter.warning(
- msg, line=self.lineno))
-
def run(self):
# type: () -> List[nodes.Node]
- self.genopt = Options()
- self.warnings = [] # type: List[nodes.Node]
- self.result = ViewList()
+ self.bridge = DocumenterBridge(self.env, self.state.document.reporter,
+ Options(), self.lineno)
names = [x.strip().split()[0] for x in self.content
if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])]
@@ -268,11 +253,9 @@ class Autosummary(SphinxDirective):
docname = posixpath.normpath(posixpath.join(dirname, docname))
if docname not in self.env.found_docs:
if excluded(self.env.doc2path(docname, None)):
- self.warn('toctree references excluded document %r'
- % docname)
+ logger.warning(__('toctree references excluded document %r'), docname)
else:
- self.warn('toctree references unknown document %r'
- % docname)
+ logger.warning(__('toctree references unknown document %r'), docname)
docnames.append(docname)
tocnode = addnodes.toctree()
@@ -281,19 +264,18 @@ class Autosummary(SphinxDirective):
tocnode['maxdepth'] = -1
tocnode['glob'] = None
- tocnode = autosummary_toc('', '', tocnode)
- nodes.append(tocnode)
+ nodes.append(autosummary_toc('', '', tocnode))
- return self.warnings + nodes
+ return nodes
def get_items(self, names):
- # type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode, unicode]]
+ # type: (List[str]) -> List[Tuple[str, str, str, str]]
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
prefixes = get_import_prefixes_from_env(self.env)
- items = [] # type: List[Tuple[unicode, unicode, unicode, unicode]]
+ items = [] # type: List[Tuple[str, str, str, str]]
max_item_chars = 50
@@ -306,11 +288,11 @@ class Autosummary(SphinxDirective):
try:
real_name, obj, parent, modname = import_by_name(name, prefixes=prefixes)
except ImportError:
- self.warn('failed to import %s' % name)
+ logger.warning(__('failed to import %s'), name)
items.append((name, '', '', name))
continue
- self.result = ViewList() # initialize for each documenter
+ self.bridge.result = StringList() # initialize for each documenter
full_name = real_name
if not isinstance(obj, ModuleType):
# give explicitly separated module name, so that members
@@ -318,13 +300,14 @@ class Autosummary(SphinxDirective):
full_name = modname + '::' + full_name[len(modname) + 1:]
# NB. using full_name here is important, since Documenters
# handle module prefixes slightly differently
- documenter = get_documenter(self.env.app, obj, parent)(self, full_name)
+ doccls = get_documenter(self.env.app, obj, parent)
+ documenter = doccls(self.bridge, full_name)
if not documenter.parse_name():
- self.warn('failed to parse name %s' % real_name)
+ logger.warning(__('failed to parse name %s'), real_name)
items.append((display_name, '', '', real_name))
continue
if not documenter.import_object():
- self.warn('failed to import object %s' % real_name)
+ logger.warning(__('failed to import object %s'), real_name)
items.append((display_name, '', '', real_name))
continue
if documenter.options.members and not documenter.check_module():
@@ -354,14 +337,14 @@ class Autosummary(SphinxDirective):
# -- Grab the summary
documenter.add_content(None)
- summary = extract_summary(self.result.data[:], self.state.document)
+ summary = extract_summary(self.bridge.result.data[:], self.state.document)
items.append((display_name, sig, summary, real_name))
return items
def get_table(self, items):
- # type: (List[Tuple[unicode, unicode, unicode, unicode]]) -> List[Union[addnodes.tabular_col_spec, autosummary_table]] # NOQA
+ # type: (List[Tuple[str, str, str, str]]) -> List[nodes.Node]
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
@@ -380,12 +363,12 @@ class Autosummary(SphinxDirective):
group.append(body)
def append_row(*column_texts):
- # type: (unicode) -> None
+ # type: (str) -> None
row = nodes.row('')
source, line = self.state_machine.get_source_and_line()
for text in column_texts:
node = nodes.paragraph('')
- vl = ViewList()
+ vl = StringList()
vl.append(text, '%s:%d:<autosummary>' % (source, line))
with switch_source_input(self.state, vl):
self.state.nested_parse(vl, 0, node)
@@ -400,7 +383,7 @@ class Autosummary(SphinxDirective):
for name, sig, summary, real_name in items:
qualifier = 'obj'
if 'nosignatures' not in self.options:
- col1 = ':%s:`%s <%s>`\\ %s' % (qualifier, name, real_name, rst.escape(sig)) # type: unicode # NOQA
+ col1 = ':%s:`%s <%s>`\\ %s' % (qualifier, name, real_name, rst.escape(sig))
else:
col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name)
col2 = summary
@@ -408,15 +391,42 @@ class Autosummary(SphinxDirective):
return [table_spec, table]
+ def warn(self, msg):
+ # type: (str) -> None
+ warnings.warn('Autosummary.warn() is deprecated',
+ RemovedInSphinx40Warning, stacklevel=2)
+ logger.warning(msg)
+
+ @property
+ def genopt(self):
+ # type: () -> Options
+ warnings.warn('Autosummary.genopt is deprecated',
+ RemovedInSphinx40Warning, stacklevel=2)
+ return self.bridge.genopt
+
+ @property
+ def warnings(self):
+ # type: () -> List[nodes.Node]
+ warnings.warn('Autosummary.warnings is deprecated',
+ RemovedInSphinx40Warning, stacklevel=2)
+ return []
+
+ @property
+ def result(self):
+ # type: () -> StringList
+ warnings.warn('Autosummary.result is deprecated',
+ RemovedInSphinx40Warning, stacklevel=2)
+ return self.bridge.result
+
def strip_arg_typehint(s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Strip a type hint from argument definition."""
return s.split(':')[0].strip()
def mangle_signature(sig, max_chars=30):
- # type: (unicode, int) -> unicode
+ # type: (str, int) -> str
"""Reformat a function signature to a more compact form."""
# Strip return type annotation
s = re.sub(r"\)\s*->\s.*$", ")", sig)
@@ -430,8 +440,8 @@ def mangle_signature(sig, max_chars=30):
s = re.sub(r"'[^']*'", "", s)
# Parse the signature to arguments + options
- args = [] # type: List[unicode]
- opts = [] # type: List[unicode]
+ args = [] # type: List[str]
+ opts = [] # type: List[str]
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
@@ -460,11 +470,11 @@ def mangle_signature(sig, max_chars=30):
sig += "[, %s]" % limited_join(", ", opts,
max_chars=max_chars - len(sig) - 4 - 2)
- return u"(%s)" % sig
+ return "(%s)" % sig
def extract_summary(doc, document):
- # type: (List[unicode], Any) -> unicode
+ # type: (List[str], Any) -> str
"""Extract summary from docstring."""
# Skip a blank lines at the top
@@ -513,7 +523,7 @@ def extract_summary(doc, document):
def limited_join(sep, items, max_chars=30, overflow_marker="..."):
- # type: (unicode, List[unicode], int, unicode) -> unicode
+ # type: (str, List[str], int, str) -> str
"""Join a number of strings to one, limiting the length to *max_chars*.
If the string overflows this limit, replace the last fitting item by
@@ -540,12 +550,12 @@ def limited_join(sep, items, max_chars=30, overflow_marker="..."):
# -- Importing items -----------------------------------------------------------
def get_import_prefixes_from_env(env):
- # type: (BuildEnvironment) -> List
+ # type: (BuildEnvironment) -> List[str]
"""
Obtain current Python import prefixes (for `import_by_name`)
from ``document.env``
"""
- prefixes = [None] # type: List
+ prefixes = [None] # type: List[str]
currmodule = env.ref_context.get('py:module')
if currmodule:
@@ -562,7 +572,7 @@ def get_import_prefixes_from_env(env):
def import_by_name(name, prefixes=[None]):
- # type: (unicode, List) -> Tuple[unicode, Any, Any, unicode]
+ # type: (str, List[str]) -> Tuple[str, Any, Any, str]
"""Import a Python object that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
@@ -581,7 +591,7 @@ def import_by_name(name, prefixes=[None]):
def _import_by_name(name):
- # type: (str) -> Tuple[Any, Any, unicode]
+ # type: (str) -> Tuple[Any, Any, str]
"""Import a Python object given its full name."""
try:
name_parts = name.split('.')
@@ -625,40 +635,42 @@ def _import_by_name(name):
# -- :autolink: (smart default role) -------------------------------------------
def autolink_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
"""
env = inliner.document.settings.env
- r = None # type: Tuple[List[nodes.Node], List[nodes.Node]]
- r = env.get_domain('py').role('obj')(
- 'obj', rawtext, etext, lineno, inliner, options, content)
- pnode = r[0][0]
+ pyobj_role = env.get_domain('py').role('obj')
+ objects, msg = pyobj_role('obj', rawtext, etext, lineno, inliner, options, content)
+ if msg != []:
+ return objects, msg
+ assert len(objects) == 1
+ pending_xref = cast(addnodes.pending_xref, objects[0])
prefixes = get_import_prefixes_from_env(env)
try:
- name, obj, parent, modname = import_by_name(pnode['reftarget'], prefixes)
+ name, obj, parent, modname = import_by_name(pending_xref['reftarget'], prefixes)
except ImportError:
- content_node = pnode[0]
- r[0][0] = nodes.emphasis(rawtext, content_node[0].astext(),
- classes=content_node['classes'])
- return r
+ literal = cast(nodes.literal, pending_xref[0])
+ objects[0] = nodes.emphasis(rawtext, literal.astext(), classes=literal['classes'])
+
+ return objects, msg
def get_rst_suffix(app):
- # type: (Sphinx) -> unicode
+ # type: (Sphinx) -> str
def get_supported_format(suffix):
- # type: (unicode) -> Tuple[unicode]
+ # type: (str) -> Tuple[str, ...]
parser_class = app.registry.get_source_parsers().get(suffix)
if parser_class is None:
return ('restructuredtext',)
- if isinstance(parser_class, string_types):
- parser_class = import_object(parser_class, 'source parser') # type: ignore
+ if isinstance(parser_class, str):
+ parser_class = import_object(parser_class, 'source parser')
return parser_class.supported
- suffix = None # type: unicode
+ suffix = None # type: str
for suffix in app.config.source_suffix:
if 'restructuredtext' in get_supported_format(suffix):
return suffix
@@ -697,7 +709,7 @@ def process_generate_options(app):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
# I need autodoc
app.setup_extension('sphinx.ext.autodoc')
app.add_node(autosummary_toc,
diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py
index a95cbbf13..83918e998 100644
--- a/sphinx/ext/autosummary/generate.py
+++ b/sphinx/ext/autosummary/generate.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary.generate
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -17,10 +16,8 @@
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import argparse
-import codecs
import locale
import os
import pydoc
@@ -43,15 +40,14 @@ from sphinx.util.rst import escape as rst_escape
if False:
# For type annotation
- from typing import Any, Callable, Dict, List, Tuple, Type # NOQA
- from jinja2 import BaseLoader # NOQA
+ from typing import Any, Callable, Dict, List, Tuple, Type, Union # NOQA
from sphinx import addnodes # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
-class DummyApplication(object):
+class DummyApplication:
"""Dummy Application class for sphinx-autogen command."""
def __init__(self):
@@ -76,17 +72,17 @@ def setup_documenters(app):
def _simple_info(msg):
- # type: (unicode) -> None
+ # type: (str) -> None
print(msg)
def _simple_warn(msg):
- # type: (unicode) -> None
+ # type: (str) -> None
print('WARNING: ' + msg, file=sys.stderr)
def _underline(title, line='='):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if '\n' in title:
raise ValueError('Can only underline single lines')
return title + '\n' + line * len(title)
@@ -98,7 +94,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
warn=_simple_warn, info=_simple_info,
base_path=None, builder=None, template_dir=None,
imported_members=False, app=None):
- # type: (List[unicode], unicode, unicode, Callable, Callable, unicode, Builder, unicode, bool, Any) -> None # NOQA
+ # type: (List[str], str, str, Callable, Callable, str, Builder, str, bool, Any) -> None
showed_sources = list(sorted(sources))
if len(showed_sources) > 20:
@@ -113,11 +109,11 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
sources = [os.path.join(base_path, filename) for filename in sources]
# create our own templating environment
- template_dirs = None # type: List[unicode]
+ template_dirs = None # type: List[str]
template_dirs = [os.path.join(package_dir, 'ext',
'autosummary', 'templates')]
- template_loader = None # type: BaseLoader
+ template_loader = None # type: Union[BuiltinTemplateLoader, FileSystemLoader]
if builder is not None:
# allow the user to override the templates
template_loader = BuiltinTemplateLoader()
@@ -176,8 +172,8 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
template = template_env.get_template('autosummary/base.rst')
def get_members(obj, typ, include_public=[], imported=True):
- # type: (Any, unicode, List[unicode], bool) -> Tuple[List[unicode], List[unicode]] # NOQA
- items = [] # type: List[unicode]
+ # type: (Any, str, List[str], bool) -> Tuple[List[str], List[str]]
+ items = [] # type: List[str]
for name in dir(obj):
try:
value = safe_getattr(obj, name)
@@ -192,7 +188,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
if x in include_public or not x.startswith('_')]
return public, items
- ns = {} # type: Dict[unicode, Any]
+ ns = {} # type: Dict[str, Any]
if doc.objtype == 'module':
ns['members'] = dir(obj)
@@ -229,7 +225,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
ns['underline'] = len(name) * '='
rendered = template.render(**ns)
- f.write(rendered) # type: ignore
+ f.write(rendered)
# descend recursively to new files
if new_files:
@@ -242,22 +238,21 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
# -- Finding documented entries in files ---------------------------------------
def find_autosummary_in_files(filenames):
- # type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode]]
+ # type: (List[str]) -> List[Tuple[str, str, str]]
"""Find out what items are documented in source/*.rst.
See `find_autosummary_in_lines`.
"""
- documented = [] # type: List[Tuple[unicode, unicode, unicode]]
+ documented = [] # type: List[Tuple[str, str, str]]
for filename in filenames:
- with codecs.open(filename, 'r', encoding='utf-8', # type: ignore
- errors='ignore') as f:
+ with open(filename, encoding='utf-8', errors='ignore') as f:
lines = f.read().splitlines()
documented.extend(find_autosummary_in_lines(lines, filename=filename))
return documented
def find_autosummary_in_docstring(name, module=None, filename=None):
- # type: (unicode, Any, unicode) -> List[Tuple[unicode, unicode, unicode]]
+ # type: (str, Any, str) -> List[Tuple[str, str, str]]
"""Find out what items are documented in the given object's docstring.
See `find_autosummary_in_lines`.
@@ -265,7 +260,7 @@ def find_autosummary_in_docstring(name, module=None, filename=None):
try:
real_name, obj, parent, modname = import_by_name(name)
lines = pydoc.getdoc(obj).splitlines()
- return find_autosummary_in_lines(lines, module=name, filename=filename) # type: ignore
+ return find_autosummary_in_lines(lines, module=name, filename=filename)
except AttributeError:
pass
except ImportError as e:
@@ -277,7 +272,7 @@ def find_autosummary_in_docstring(name, module=None, filename=None):
def find_autosummary_in_lines(lines, module=None, filename=None):
- # type: (List[unicode], Any, unicode) -> List[Tuple[unicode, unicode, unicode]]
+ # type: (List[str], Any, str) -> List[Tuple[str, str, str]]
"""Find out what items appear in autosummary:: directives in the
given lines.
@@ -297,13 +292,13 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
- documented = [] # type: List[Tuple[unicode, unicode, unicode]]
+ documented = [] # type: List[Tuple[str, str, str]]
- toctree = None # type: unicode
+ toctree = None # type: str
template = None
current_module = module
in_autosummary = False
- base_indent = "" # type: unicode
+ base_indent = ""
for line in lines:
if in_autosummary:
diff --git a/sphinx/ext/coverage.py b/sphinx/ext/coverage.py
index 9a50fc260..15e738497 100644
--- a/sphinx/ext/coverage.py
+++ b/sphinx/ext/coverage.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.coverage
~~~~~~~~~~~~~~~~~~~
@@ -12,12 +11,10 @@
import glob
import inspect
+import pickle
import re
from os import path
-from six import iteritems
-from six.moves import cPickle as pickle
-
import sphinx
from sphinx.builders import Builder
from sphinx.locale import __
@@ -34,13 +31,13 @@ logger = logging.getLogger(__name__)
# utility
def write_header(f, text, char='-'):
- # type:(IO, unicode, unicode) -> None
+ # type:(IO, str, str) -> None
f.write(text + '\n')
f.write(char * len(text) + '\n')
def compile_regex_list(name, exps):
- # type: (unicode, unicode) -> List[Pattern]
+ # type: (str, str) -> List[Pattern]
lst = []
for exp in exps:
try:
@@ -60,20 +57,20 @@ class CoverageBuilder(Builder):
def init(self):
# type: () -> None
- self.c_sourcefiles = [] # type: List[unicode]
+ self.c_sourcefiles = [] # type: List[str]
for pattern in self.config.coverage_c_path:
pattern = path.join(self.srcdir, pattern)
self.c_sourcefiles.extend(glob.glob(pattern))
- self.c_regexes = [] # type: List[Tuple[unicode, Pattern]]
+ self.c_regexes = [] # type: List[Tuple[str, Pattern]]
for (name, exp) in self.config.coverage_c_regexes.items():
try:
self.c_regexes.append((name, re.compile(exp)))
except Exception:
logger.warning(__('invalid regex %r in coverage_c_regexes'), exp)
- self.c_ignorexps = {} # type: Dict[unicode, List[Pattern]]
- for (name, exps) in iteritems(self.config.coverage_ignore_c_items):
+ self.c_ignorexps = {} # type: Dict[str, List[Pattern]]
+ for (name, exps) in self.config.coverage_ignore_c_items.items():
self.c_ignorexps[name] = compile_regex_list('coverage_ignore_c_items',
exps)
self.mod_ignorexps = compile_regex_list('coverage_ignore_modules',
@@ -84,16 +81,16 @@ class CoverageBuilder(Builder):
self.config.coverage_ignore_functions)
def get_outdated_docs(self):
- # type: () -> unicode
+ # type: () -> str
return 'coverage overview'
def write(self, *ignored):
# type: (Any) -> None
- self.py_undoc = {} # type: Dict[unicode, Dict[unicode, Any]]
+ self.py_undoc = {} # type: Dict[str, Dict[str, Any]]
self.build_py_coverage()
self.write_py_coverage()
- self.c_undoc = {} # type: Dict[unicode, Set[Tuple[unicode, unicode]]]
+ self.c_undoc = {} # type: Dict[str, Set[Tuple[str, str]]]
self.build_c_coverage()
self.write_c_coverage()
@@ -102,8 +99,8 @@ class CoverageBuilder(Builder):
# Fetch all the info from the header files
c_objects = self.env.domaindata['c']['objects']
for filename in self.c_sourcefiles:
- undoc = set() # type: Set[Tuple[unicode, unicode]]
- with open(filename, 'r') as f:
+ undoc = set() # type: Set[Tuple[str, str]]
+ with open(filename) as f:
for line in f:
for key, regex in self.c_regexes:
match = regex.match(line)
@@ -127,7 +124,7 @@ class CoverageBuilder(Builder):
write_header(op, 'Undocumented C API elements', '=')
op.write('\n')
- for filename, undoc in iteritems(self.c_undoc):
+ for filename, undoc in self.c_undoc.items():
write_header(op, filename)
for typ, name in sorted(undoc):
op.write(' * %-50s [%9s]\n' % (name, typ))
@@ -157,7 +154,7 @@ class CoverageBuilder(Builder):
continue
funcs = []
- classes = {} # type: Dict[unicode, List[unicode]]
+ classes = {} # type: Dict[str, List[str]]
for name, obj in inspect.getmembers(mod):
# diverse module attributes are ignored:
@@ -194,7 +191,7 @@ class CoverageBuilder(Builder):
classes[name] = []
continue
- attrs = [] # type: List[unicode]
+ attrs = [] # type: List[str]
for attr_name in dir(obj):
if attr_name not in obj.__dict__:
@@ -247,7 +244,7 @@ class CoverageBuilder(Builder):
if undoc['classes']:
op.write('Classes:\n')
for name, methods in sorted(
- iteritems(undoc['classes'])):
+ undoc['classes'].items()):
if not methods:
op.write(' * %s\n' % name)
else:
@@ -268,7 +265,7 @@ class CoverageBuilder(Builder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(CoverageBuilder)
app.add_config_value('coverage_ignore_modules', [], False)
app.add_config_value('coverage_ignore_functions', [], False)
diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py
index f0c418ccf..23f3063cd 100644
--- a/sphinx/ext/doctest.py
+++ b/sphinx/ext/doctest.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.doctest
~~~~~~~~~~~~~~~~~~
@@ -9,33 +8,33 @@
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
-import codecs
import doctest
import re
import sys
import time
+import warnings
+from io import StringIO
from os import path
from docutils import nodes
from docutils.parsers.rst import directives
from packaging.specifiers import SpecifierSet, InvalidSpecifier
from packaging.version import Version
-from six import itervalues, StringIO, binary_type, text_type, PY2
import sphinx
from sphinx.builders import Builder
+from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.locale import __
-from sphinx.util import force_decode, logging
+from sphinx.util import logging
from sphinx.util.console import bold # type: ignore
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import set_source_info
-from sphinx.util.osutil import fs_encoding, relpath
+from sphinx.util.osutil import relpath
if False:
# For type annotation
- from typing import Any, Callable, Dict, IO, Iterable, List, Optional, Sequence, Set, Tuple # NOQA
+ from typing import Any, Callable, Dict, IO, Iterable, List, Optional, Sequence, Set, Tuple, Type # NOQA
from sphinx.application import Sphinx # NOQA
logger = logging.getLogger(__name__)
@@ -43,22 +42,16 @@ logger = logging.getLogger(__name__)
blankline_re = re.compile(r'^\s*<BLANKLINE>', re.MULTILINE)
doctestopt_re = re.compile(r'#\s*doctest:.+$', re.MULTILINE)
-if PY2:
- def doctest_encode(text, encoding):
- # type: (str, unicode) -> unicode
- if isinstance(text, text_type):
- text = text.encode(encoding)
- if text.startswith(codecs.BOM_UTF8):
- text = text[len(codecs.BOM_UTF8):]
- return text
-else:
- def doctest_encode(text, encoding):
- # type: (unicode, unicode) -> unicode
- return text
+
+def doctest_encode(text, encoding):
+ # type: (str, str) -> str
+ warnings.warn('doctest_encode() is deprecated.',
+ RemovedInSphinx40Warning)
+ return text
def is_allowed_version(spec, version):
- # type: (unicode, unicode) -> bool
+ # type: (str, str) -> bool
"""Check `spec` satisfies `version` or not.
This obeys PEP-440 specifiers:
@@ -113,7 +106,7 @@ class TestDirective(SphinxDirective):
if not test:
test = code
code = doctestopt_re.sub('', code)
- nodetype = nodes.literal_block
+ nodetype = nodes.literal_block # type: Type[nodes.TextElement]
if self.name in ('testsetup', 'testcleanup') or 'hide' in self.options:
nodetype = nodes.comment
if self.arguments:
@@ -126,9 +119,15 @@ class TestDirective(SphinxDirective):
# only save if it differs from code
node['test'] = test
if self.name == 'doctest':
- node['language'] = 'pycon'
+ if self.config.highlight_language in ('py', 'python'):
+ node['language'] = 'pycon'
+ else:
+ node['language'] = 'pycon3' # default
elif self.name == 'testcode':
- node['language'] = 'python'
+ if self.config.highlight_language in ('py', 'python'):
+ node['language'] = 'python'
+ else:
+ node['language'] = 'python3' # default
elif self.name == 'testoutput':
# don't try to highlight output
node['language'] = 'none'
@@ -203,9 +202,9 @@ parser = doctest.DocTestParser()
# helper classes
-class TestGroup(object):
+class TestGroup:
def __init__(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
self.name = name
self.setup = [] # type: List[TestCode]
self.tests = [] # type: List[List[TestCode]]
@@ -230,23 +229,23 @@ class TestGroup(object):
else:
raise RuntimeError(__('invalid TestCode type'))
- def __repr__(self): # type: ignore
- # type: () -> unicode
+ def __repr__(self):
+ # type: () -> str
return 'TestGroup(name=%r, setup=%r, cleanup=%r, tests=%r)' % (
self.name, self.setup, self.cleanup, self.tests)
-class TestCode(object):
+class TestCode:
def __init__(self, code, type, filename, lineno, options=None):
- # type: (unicode, unicode, Optional[str], int, Optional[Dict]) -> None
+ # type: (str, str, Optional[str], int, Optional[Dict]) -> None
self.code = code
self.type = type
self.filename = filename
self.lineno = lineno
self.options = options or {}
- def __repr__(self): # type: ignore
- # type: () -> unicode
+ def __repr__(self):
+ # type: () -> str
return 'TestCode(%r, %r, filename=%r, lineno=%r, options=%r)' % (
self.code, self.type, self.filename, self.lineno, self.options)
@@ -258,7 +257,7 @@ class SphinxDocTestRunner(doctest.DocTestRunner):
old_stdout = sys.stdout
sys.stdout = string_io
try:
- res = doctest.DocTestRunner.summarize(self, verbose)
+ res = super().summarize(verbose)
finally:
sys.stdout = old_stdout
out(string_io.getvalue())
@@ -266,7 +265,7 @@ class SphinxDocTestRunner(doctest.DocTestRunner):
def _DocTestRunner__patched_linecache_getlines(self, filename,
module_globals=None):
- # type: (unicode, Any) -> Any
+ # type: (str, Any) -> Any
# this is overridden from DocTestRunner adding the try-except below
m = self._DocTestRunner__LINECACHE_FILENAME_RE.match(filename) # type: ignore
if m and m.group('name') == self.test.name:
@@ -317,41 +316,37 @@ class DocTestBuilder(Builder):
date = time.strftime('%Y-%m-%d %H:%M:%S')
- self.outfile = None # type: IO
- self.outfile = codecs.open(path.join(self.outdir, 'output.txt'), # type: ignore
- 'w', encoding='utf-8')
+ self.outfile = open(path.join(self.outdir, 'output.txt'), 'w', encoding='utf-8')
self.outfile.write(('Results of doctest builder run on %s\n'
'==================================%s\n') %
(date, '=' * len(date)))
def _out(self, text):
- # type: (unicode) -> None
+ # type: (str) -> None
logger.info(text, nonl=True)
self.outfile.write(text)
def _warn_out(self, text):
- # type: (unicode) -> None
+ # type: (str) -> None
if self.app.quiet or self.app.warningiserror:
logger.warning(text)
else:
logger.info(text, nonl=True)
- if isinstance(text, binary_type):
- text = force_decode(text, None)
self.outfile.write(text)
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return ''
def get_outdated_docs(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
return self.env.found_docs
def finish(self):
# type: () -> None
# write executive summary
def s(v):
- # type: (int) -> unicode
+ # type: (int) -> str
return v != 1 and 's' or ''
repl = (self.total_tries, s(self.total_tries),
self.total_failures, s(self.total_failures),
@@ -371,7 +366,7 @@ Doctest summary
self.app.statuscode = 1
def write(self, build_docnames, updated_docnames, method='update'):
- # type: (Iterable[unicode], Sequence[unicode], unicode) -> None
+ # type: (Iterable[str], Sequence[str], str) -> None
if build_docnames is None:
build_docnames = sorted(self.env.all_docs)
@@ -382,7 +377,7 @@ Doctest summary
self.test_doc(docname, doctree)
def get_filename_for_node(self, node, docname):
- # type: (nodes.Node, unicode) -> str
+ # type: (nodes.Node, str) -> str
"""Try to get the file which actually contains the doctest, not the
filename of the document it's included in."""
try:
@@ -390,8 +385,6 @@ Doctest summary
.rsplit(':docstring of ', maxsplit=1)[0]
except Exception:
filename = self.env.doc2path(docname, base=None)
- if PY2:
- return filename.encode(fs_encoding)
return filename
@staticmethod
@@ -412,8 +405,8 @@ Doctest summary
return None
def test_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
- groups = {} # type: Dict[unicode, TestGroup]
+ # type: (str, nodes.Node) -> None
+ groups = {} # type: Dict[str, TestGroup]
add_to_all_groups = []
self.setup_runner = SphinxDocTestRunner(verbose=False,
optionflags=self.opt)
@@ -436,7 +429,8 @@ Doctest summary
# type: (nodes.Node) -> bool
return isinstance(node, (nodes.literal_block, nodes.comment)) \
and 'testnodetype' in node
- for node in doctree.traverse(condition):
+
+ for node in doctree.traverse(condition): # type: nodes.Element
source = node['test'] if 'test' in node else node.astext()
filename = self.get_filename_for_node(node, docname)
line_number = self.get_line_number(node)
@@ -456,24 +450,24 @@ Doctest summary
groups[groupname] = TestGroup(groupname)
groups[groupname].add_code(code)
for code in add_to_all_groups:
- for group in itervalues(groups):
+ for group in groups.values():
group.add_code(code)
if self.config.doctest_global_setup:
code = TestCode(self.config.doctest_global_setup,
'testsetup', filename=None, lineno=0)
- for group in itervalues(groups):
+ for group in groups.values():
group.add_code(code, prepend=True)
if self.config.doctest_global_cleanup:
code = TestCode(self.config.doctest_global_cleanup,
'testcleanup', filename=None, lineno=0)
- for group in itervalues(groups):
+ for group in groups.values():
group.add_code(code)
if not groups:
return
self._out('\nDocument: %s\n----------%s\n' %
(docname, '-' * len(docname)))
- for group in itervalues(groups):
+ for group in groups.values():
self.test_group(group)
# Separately count results from setup code
res_f, res_t = self.setup_runner.summarize(self._out, verbose=False)
@@ -490,7 +484,7 @@ Doctest summary
self.cleanup_tries += res_t
def compile(self, code, name, type, flags, dont_inherit):
- # type: (unicode, unicode, unicode, Any, bool) -> Any
+ # type: (str, str, str, Any, bool) -> Any
return compile(code, name, self.type, flags, dont_inherit)
def test_group(self, group):
@@ -501,9 +495,8 @@ Doctest summary
# type: (Any, List[TestCode], Any) -> bool
examples = []
for testcode in testcodes:
- examples.append(doctest.Example( # type: ignore
- doctest_encode(testcode.code, self.env.config.source_encoding), '', # type: ignore # NOQA
- lineno=testcode.lineno))
+ example = doctest.Example(testcode.code, '', lineno=testcode.lineno)
+ examples.append(example)
if not examples:
return True
# simulate a doctest with the code
@@ -528,9 +521,8 @@ Doctest summary
if len(code) == 1:
# ordinary doctests (code/output interleaved)
try:
- test = parser.get_doctest( # type: ignore
- doctest_encode(code[0].code, self.env.config.source_encoding), {}, # type: ignore # NOQA
- group.name, code[0].filename, code[0].lineno)
+ test = parser.get_doctest(code[0].code, {}, group.name, # type: ignore
+ code[0].filename, code[0].lineno)
except Exception:
logger.warning(__('ignoring invalid doctest code: %r'), code[0].code,
location=(code[0].filename, code[0].lineno))
@@ -555,12 +547,9 @@ Doctest summary
exc_msg = m.group('msg')
else:
exc_msg = None
- example = doctest.Example( # type: ignore
- doctest_encode(code[0].code, self.env.config.source_encoding), output, # type: ignore # NOQA
- exc_msg=exc_msg,
- lineno=code[0].lineno,
- options=options)
- test = doctest.DocTest([example], {}, group.name, # type: ignore
+ example = doctest.Example(code[0].code, output, exc_msg=exc_msg,
+ lineno=code[0].lineno, options=options)
+ test = doctest.DocTest([example], {}, group.name,
code[0].filename, code[0].lineno, None)
self.type = 'exec' # multiple statements again
# DocTest.__init__ copies the globs namespace, which we don't want
@@ -573,7 +562,7 @@ Doctest summary
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_directive('testsetup', TestsetupDirective)
app.add_directive('testcleanup', TestcleanupDirective)
app.add_directive('doctest', DoctestDirective)
diff --git a/sphinx/ext/extlinks.py b/sphinx/ext/extlinks.py
index b91aae23a..7ad721b8f 100644
--- a/sphinx/ext/extlinks.py
+++ b/sphinx/ext/extlinks.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.extlinks
~~~~~~~~~~~~~~~~~~~
@@ -25,7 +24,6 @@
"""
from docutils import nodes, utils
-from six import iteritems
import sphinx
from sphinx.util.nodes import split_explicit_title
@@ -39,9 +37,9 @@ if False:
def make_link_role(base_url, prefix):
- # type: (unicode, unicode) -> RoleFunction
+ # type: (str, str) -> RoleFunction
def role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
text = utils.unescape(text)
has_explicit_title, title, part = split_explicit_title(text)
try:
@@ -64,12 +62,12 @@ def make_link_role(base_url, prefix):
def setup_link_roles(app):
# type: (Sphinx) -> None
- for name, (base_url, prefix) in iteritems(app.config.extlinks):
+ for name, (base_url, prefix) in app.config.extlinks.items():
app.add_role(name, make_link_role(base_url, prefix))
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_config_value('extlinks', {}, 'env')
app.connect('builder-inited', setup_link_roles)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/githubpages.py b/sphinx/ext/githubpages.py
index 4a6488792..f39d1cb58 100644
--- a/sphinx/ext/githubpages.py
+++ b/sphinx/ext/githubpages.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.githubpages
~~~~~~~~~~~~~~~~~~~~~~
@@ -28,6 +27,6 @@ def create_nojekyll(app, env):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.connect('env-updated', create_nojekyll)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py
index b62d80254..9827d8531 100644
--- a/sphinx/ext/graphviz.py
+++ b/sphinx/ext/graphviz.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.graphviz
~~~~~~~~~~~~~~~~~~~
@@ -10,17 +9,15 @@
:license: BSD, see LICENSE for details.
"""
-import codecs
import posixpath
import re
+import subprocess
from hashlib import sha1
from os import path
-from subprocess import Popen, PIPE
+from subprocess import CalledProcessError, PIPE
from docutils import nodes
from docutils.parsers.rst import directives
-from docutils.statemachine import ViewList
-from six import text_type
import sphinx
from sphinx.errors import SphinxError
@@ -29,13 +26,20 @@ from sphinx.util import logging
from sphinx.util.docutils import SphinxDirective
from sphinx.util.fileutil import copy_asset
from sphinx.util.i18n import search_image_for_language
-from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL
+from sphinx.util.nodes import set_source_info
+from sphinx.util.osutil import ensuredir
if False:
# For type annotation
from docutils.parsers.rst import Directive # NOQA
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
+ from sphinx.util.docutils import SphinxTranslator # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
+ from sphinx.writers.latex import LaTeXTranslator # NOQA
+ from sphinx.writers.manpage import ManualPageTranslator # NOQA
+ from sphinx.writers.texinfo import TexinfoTranslator # NOQA
+ from sphinx.writers.text import TextTranslator # NOQA
logger = logging.getLogger(__name__)
@@ -44,22 +48,22 @@ class GraphvizError(SphinxError):
category = 'Graphviz error'
-class ClickableMapDefinition(object):
+class ClickableMapDefinition:
"""A manipulator for clickable map file of graphviz."""
maptag_re = re.compile('<map id="(.*?)"')
href_re = re.compile('href=".*?"')
def __init__(self, filename, content, dot=''):
- # type: (unicode, unicode, unicode) -> None
- self.id = None # type: unicode
+ # type: (str, str, str) -> None
+ self.id = None # type: str
self.filename = filename
self.content = content.splitlines()
- self.clickable = [] # type: List[unicode]
+ self.clickable = [] # type: List[str]
self.parse(dot=dot)
def parse(self, dot=None):
- # type: (unicode) -> None
+ # type: (str) -> None
matched = self.maptag_re.match(self.content[0])
if not matched:
raise GraphvizError('Invalid clickable map file found: %s' % self.filename)
@@ -68,7 +72,7 @@ class ClickableMapDefinition(object):
if self.id == '%3':
# graphviz generates wrong ID if graph name not specified
# https://gitlab.com/graphviz/graphviz/issues/1327
- hashed = sha1(dot.encode('utf-8')).hexdigest()
+ hashed = sha1(dot.encode()).hexdigest()
self.id = 'grapviz%s' % hashed[-10:]
self.content[0] = self.content[0].replace('%3', self.id)
@@ -77,7 +81,7 @@ class ClickableMapDefinition(object):
self.clickable.append(line)
def generate_clickable_map(self):
- # type: () -> unicode
+ # type: () -> str
"""Generate clickable map tags if clickable item exists.
If not exists, this only returns empty string.
@@ -93,24 +97,21 @@ class graphviz(nodes.General, nodes.Inline, nodes.Element):
def figure_wrapper(directive, node, caption):
- # type: (Directive, nodes.Node, unicode) -> nodes.figure
+ # type: (Directive, graphviz, str) -> nodes.figure
figure_node = nodes.figure('', node)
if 'align' in node:
figure_node['align'] = node.attributes.pop('align')
- parsed = nodes.Element()
- directive.state.nested_parse(ViewList([caption], source=''),
- directive.content_offset, parsed)
- caption_node = nodes.caption(parsed[0].rawsource, '',
- *parsed[0].children)
- caption_node.source = parsed[0].source
- caption_node.line = parsed[0].line
+ inodes, messages = directive.state.inline_text(caption, directive.lineno)
+ caption_node = nodes.caption(caption, '', *inodes)
+ caption_node.extend(messages)
+ set_source_info(directive, caption_node)
figure_node += caption_node
return figure_node
def align_spec(argument):
- # type: (Any) -> bool
+ # type: (Any) -> str
return directives.choice(argument, ('left', 'center', 'right'))
@@ -142,9 +143,9 @@ class Graphviz(SphinxDirective):
rel_filename, filename = self.env.relfn2path(argument)
self.env.note_dependency(rel_filename)
try:
- with codecs.open(filename, 'r', 'utf-8') as fp: # type: ignore
+ with open(filename, encoding='utf-8') as fp:
dotcode = fp.read()
- except (IOError, OSError):
+ except OSError:
return [document.reporter.warning(
__('External Graphviz file %r not found or reading '
'it failed') % filename, line=self.lineno)]
@@ -165,12 +166,13 @@ class Graphviz(SphinxDirective):
if 'align' in self.options:
node['align'] = self.options['align']
- caption = self.options.get('caption')
- if caption:
- node = figure_wrapper(self, node, caption)
-
- self.add_name(node)
- return [node]
+ if 'caption' not in self.options:
+ self.add_name(node)
+ return [node]
+ else:
+ figure = figure_wrapper(self, node, self.options['caption'])
+ self.add_name(figure)
+ return [figure]
class GraphvizSimple(SphinxDirective):
@@ -204,20 +206,21 @@ class GraphvizSimple(SphinxDirective):
if 'align' in self.options:
node['align'] = self.options['align']
- caption = self.options.get('caption')
- if caption:
- node = figure_wrapper(self, node, caption)
-
- self.add_name(node)
- return [node]
+ if 'caption' not in self.options:
+ self.add_name(node)
+ return [node]
+ else:
+ figure = figure_wrapper(self, node, self.options['caption'])
+ self.add_name(figure)
+ return [figure]
def render_dot(self, code, options, format, prefix='graphviz'):
- # type: (nodes.NodeVisitor, unicode, Dict, unicode, unicode) -> Tuple[unicode, unicode]
+ # type: (SphinxTranslator, str, Dict, str, str) -> Tuple[str, str]
"""Render graphviz code into a PNG or PDF output file."""
graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
hashkey = (code + str(options) + str(graphviz_dot) +
- str(self.builder.config.graphviz_dot_args)).encode('utf-8')
+ str(self.builder.config.graphviz_dot_args)).encode()
fname = '%s-%s.%s' % (prefix, sha1(hashkey).hexdigest(), format)
relfn = posixpath.join(self.builder.imgpath, fname)
@@ -227,15 +230,11 @@ def render_dot(self, code, options, format, prefix='graphviz'):
return relfn, outfn
if (hasattr(self.builder, '_graphviz_warned_dot') and
- self.builder._graphviz_warned_dot.get(graphviz_dot)):
+ self.builder._graphviz_warned_dot.get(graphviz_dot)): # type: ignore # NOQA
return None, None
ensuredir(path.dirname(outfn))
- # graphviz expects UTF-8 by default
- if isinstance(code, text_type):
- code = code.encode('utf-8')
-
dot_args = [graphviz_dot]
dot_args.extend(self.builder.config.graphviz_dot_args)
dot_args.extend(['-T' + format, '-o' + outfn])
@@ -245,40 +244,29 @@ def render_dot(self, code, options, format, prefix='graphviz'):
if format == 'png':
dot_args.extend(['-Tcmapx', '-o%s.map' % outfn])
+
try:
- p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE, cwd=cwd)
- except OSError as err:
- if err.errno != ENOENT: # No such file or directory
- raise
+ ret = subprocess.run(dot_args, input=code.encode(), stdout=PIPE, stderr=PIPE,
+ cwd=cwd, check=True)
+ if not path.isfile(outfn):
+ raise GraphvizError(__('dot did not produce an output file:\n[stderr]\n%r\n'
+ '[stdout]\n%r') % (ret.stderr, ret.stdout))
+ return relfn, outfn
+ except OSError:
logger.warning(__('dot command %r cannot be run (needed for graphviz '
'output), check the graphviz_dot setting'), graphviz_dot)
if not hasattr(self.builder, '_graphviz_warned_dot'):
- self.builder._graphviz_warned_dot = {}
- self.builder._graphviz_warned_dot[graphviz_dot] = True
+ self.builder._graphviz_warned_dot = {} # type: ignore
+ self.builder._graphviz_warned_dot[graphviz_dot] = True # type: ignore
return None, None
- try:
- # Graphviz may close standard input when an error occurs,
- # resulting in a broken pipe on communicate()
- stdout, stderr = p.communicate(code)
- except (OSError, IOError) as err:
- if err.errno not in (EPIPE, EINVAL):
- raise
- # in this case, read the standard output and standard error streams
- # directly, to get the error message(s)
- stdout, stderr = p.stdout.read(), p.stderr.read()
- p.wait()
- if p.returncode != 0:
- raise GraphvizError(__('dot exited with error:\n[stderr]\n%s\n'
- '[stdout]\n%s') % (stderr, stdout))
- if not path.isfile(outfn):
- raise GraphvizError(__('dot did not produce an output file:\n[stderr]\n%s\n'
- '[stdout]\n%s') % (stderr, stdout))
- return relfn, outfn
+ except CalledProcessError as exc:
+ raise GraphvizError(__('dot exited with error:\n[stderr]\n%r\n'
+ '[stdout]\n%r') % (exc.stderr, exc.stdout))
def render_dot_html(self, node, code, options, prefix='graphviz',
imgcls=None, alt=None):
- # type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (HTMLTranslator, graphviz, str, Dict, str, str, str) -> Tuple[str, str]
format = self.builder.config.graphviz_output_format
try:
if format not in ('png', 'svg'):
@@ -286,7 +274,7 @@ def render_dot_html(self, node, code, options, prefix='graphviz',
"'svg', but is %r") % format)
fname, outfn = render_dot(self, code, options, format, prefix)
except GraphvizError as exc:
- logger.warning(__('dot code %r: %s'), code, text_type(exc))
+ logger.warning(__('dot code %r: %s'), code, exc)
raise nodes.SkipNode
if imgcls:
@@ -309,7 +297,7 @@ def render_dot_html(self, node, code, options, prefix='graphviz',
self.body.append('<p class="warning">%s</p>' % alt)
self.body.append('</object></div>\n')
else:
- with codecs.open(outfn + '.map', 'r', encoding='utf-8') as mapfile: # type: ignore
+ with open(outfn + '.map', encoding='utf-8') as mapfile:
imgmap = ClickableMapDefinition(outfn + '.map', mapfile.read(), dot=code)
if imgmap.clickable:
# has a map
@@ -331,16 +319,16 @@ def render_dot_html(self, node, code, options, prefix='graphviz',
def html_visit_graphviz(self, node):
- # type: (nodes.NodeVisitor, graphviz) -> None
+ # type: (HTMLTranslator, graphviz) -> None
render_dot_html(self, node, node['code'], node['options'])
def render_dot_latex(self, node, code, options, prefix='graphviz'):
- # type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None
+ # type: (LaTeXTranslator, graphviz, str, Dict, str) -> None
try:
fname, outfn = render_dot(self, code, options, 'pdf', prefix)
except GraphvizError as exc:
- logger.warning(__('dot code %r: %s'), code, text_type(exc))
+ logger.warning(__('dot code %r: %s'), code, exc)
raise nodes.SkipNode
is_inline = self.is_inline(node)
@@ -369,16 +357,16 @@ def render_dot_latex(self, node, code, options, prefix='graphviz'):
def latex_visit_graphviz(self, node):
- # type: (nodes.NodeVisitor, graphviz) -> None
+ # type: (LaTeXTranslator, graphviz) -> None
render_dot_latex(self, node, node['code'], node['options'])
def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
- # type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None
+ # type: (TexinfoTranslator, graphviz, str, Dict, str) -> None
try:
fname, outfn = render_dot(self, code, options, 'png', prefix)
except GraphvizError as exc:
- logger.warning(__('dot code %r: %s'), code, text_type(exc))
+ logger.warning(__('dot code %r: %s'), code, exc)
raise nodes.SkipNode
if fname is not None:
self.body.append('@image{%s,,,[graphviz],png}\n' % fname[:-4])
@@ -386,12 +374,12 @@ def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
def texinfo_visit_graphviz(self, node):
- # type: (nodes.NodeVisitor, graphviz) -> None
+ # type: (TexinfoTranslator, graphviz) -> None
render_dot_texinfo(self, node, node['code'], node['options'])
def text_visit_graphviz(self, node):
- # type: (nodes.NodeVisitor, graphviz) -> None
+ # type: (TextTranslator, graphviz) -> None
if 'alt' in node.attributes:
self.add_text(_('[graph: %s]') % node['alt'])
else:
@@ -400,7 +388,7 @@ def text_visit_graphviz(self, node):
def man_visit_graphviz(self, node):
- # type: (nodes.NodeVisitor, graphviz) -> None
+ # type: (ManualPageTranslator, graphviz) -> None
if 'alt' in node.attributes:
self.body.append(_('[graph: %s]') % node['alt'])
else:
@@ -417,7 +405,7 @@ def on_build_finished(app, exc):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_node(graphviz,
html=(html_visit_graphviz, None),
latex=(latex_visit_graphviz, None),
diff --git a/sphinx/ext/ifconfig.py b/sphinx/ext/ifconfig.py
index 112cc763f..4fd5fa391 100644
--- a/sphinx/ext/ifconfig.py
+++ b/sphinx/ext/ifconfig.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.ifconfig
~~~~~~~~~~~~~~~~~~~
@@ -51,12 +50,12 @@ class IfConfig(SphinxDirective):
set_source_info(self, node)
node['expr'] = self.arguments[0]
self.state.nested_parse(self.content, self.content_offset,
- node, match_titles=1)
+ node, match_titles=True)
return [node]
def process_ifconfig_nodes(app, doctree, docname):
- # type: (Sphinx, nodes.Node, unicode) -> None
+ # type: (Sphinx, nodes.document, str) -> None
ns = dict((confval.name, confval.value) for confval in app.config)
ns.update(app.config.__dict__.copy())
ns['builder'] = app.builder.name
@@ -79,7 +78,7 @@ def process_ifconfig_nodes(app, doctree, docname):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_node(ifconfig)
app.add_directive('ifconfig', IfConfig)
app.connect('doctree-resolved', process_ifconfig_nodes)
diff --git a/sphinx/ext/imgconverter.py b/sphinx/ext/imgconverter.py
index 79755d351..f4ba5d001 100644
--- a/sphinx/ext/imgconverter.py
+++ b/sphinx/ext/imgconverter.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.imgconverter
~~~~~~~~~~~~~~~~~~~~~~~
@@ -8,14 +7,13 @@
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-import locale
import subprocess
+from subprocess import CalledProcessError, PIPE
from sphinx.errors import ExtensionError
from sphinx.locale import __
from sphinx.transforms.post_transforms.images import ImageConverter
from sphinx.util import logging
-from sphinx.util.osutil import ENOENT, EPIPE, EINVAL
if False:
# For type annotation
@@ -39,31 +37,21 @@ class ImagemagickConverter(ImageConverter):
try:
args = [self.config.image_converter, '-version']
logger.debug('Invoking %r ...', args)
- p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- except (OSError, IOError):
+ subprocess.run(args, stdout=PIPE, stderr=PIPE, check=True)
+ return True
+ except OSError:
logger.warning(__('convert command %r cannot be run.'
'check the image_converter setting'),
self.config.image_converter)
return False
-
- try:
- stdout, stderr = p.communicate()
- except (OSError, IOError) as err:
- if err.errno not in (EPIPE, EINVAL):
- raise
- stdout, stderr = p.stdout.read(), p.stderr.read()
- p.wait()
- if p.returncode != 0:
- encoding = locale.getpreferredencoding()
+ except CalledProcessError as exc:
logger.warning(__('convert exited with error:\n'
- '[stderr]\n%s\n[stdout]\n%s'),
- stderr.decode(encoding), stdout.decode(encoding))
+ '[stderr]\n%r\n[stdout]\n%r'),
+ exc.stderr, exc.stdout)
return False
- return True
-
def convert(self, _from, _to):
- # type: (unicode, unicode) -> bool
+ # type: (str, str) -> bool
"""Converts the image to expected one."""
try:
if _from.lower().endswith('.gif'):
@@ -74,32 +62,21 @@ class ImagemagickConverter(ImageConverter):
self.config.image_converter_args +
[_from, _to])
logger.debug('Invoking %r ...', args)
- p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- except OSError as err:
- if err.errno != ENOENT: # No such file or directory
- raise
+ subprocess.run(args, stdout=PIPE, stderr=PIPE, check=True)
+ return True
+ except OSError:
logger.warning(__('convert command %r cannot be run.'
'check the image_converter setting'),
self.config.image_converter)
return False
-
- try:
- stdout, stderr = p.communicate()
- except (OSError, IOError) as err:
- if err.errno not in (EPIPE, EINVAL):
- raise
- stdout, stderr = p.stdout.read(), p.stderr.read()
- p.wait()
- if p.returncode != 0:
+ except CalledProcessError as exc:
raise ExtensionError(__('convert exited with error:\n'
- '[stderr]\n%s\n[stdout]\n%s') %
- (stderr, stdout))
-
- return True
+ '[stderr]\n%r\n[stdout]\n%r') %
+ (exc.stderr, exc.stdout))
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_post_transform(ImagemagickConverter)
app.add_config_value('image_converter', 'convert', 'env')
app.add_config_value('image_converter_args', [], 'env')
diff --git a/sphinx/ext/imgmath.py b/sphinx/ext/imgmath.py
index 631a3fd42..00aae13f2 100644
--- a/sphinx/ext/imgmath.py
+++ b/sphinx/ext/imgmath.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.imgmath
~~~~~~~~~~~~~~~~~~
@@ -9,34 +8,34 @@
:license: BSD, see LICENSE for details.
"""
-import codecs
import posixpath
import re
import shutil
+import subprocess
import tempfile
from hashlib import sha1
from os import path
-from subprocess import Popen, PIPE
+from subprocess import CalledProcessError, PIPE
from docutils import nodes
-from six import text_type
import sphinx
from sphinx.errors import SphinxError
from sphinx.locale import _, __
from sphinx.util import logging
from sphinx.util.math import get_node_equation_number, wrap_displaymath
-from sphinx.util.osutil import ensuredir, ENOENT, cd
+from sphinx.util.osutil import ensuredir
from sphinx.util.png import read_png_depth, write_png_depth
from sphinx.util.pycompat import sys_encoding
if False:
# For type annotation
- from typing import Any, Dict, List, Tuple # NOQA
+ from typing import Any, Dict, List, Tuple, Union # NOQA
from sphinx.addnodes import displaymath # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.config import Config # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
logger = logging.getLogger(__name__)
@@ -45,12 +44,12 @@ class MathExtError(SphinxError):
category = 'Math extension error'
def __init__(self, msg, stderr=None, stdout=None):
- # type: (unicode, unicode, unicode) -> None
+ # type: (str, bytes, bytes) -> None
if stderr:
msg += '\n[stderr]\n' + stderr.decode(sys_encoding, 'replace')
if stdout:
msg += '\n[stdout]\n' + stdout.decode(sys_encoding, 'replace')
- SphinxError.__init__(self, msg)
+ super().__init__(msg)
class InvokeError(SphinxError):
@@ -90,7 +89,7 @@ depth_re = re.compile(br'\[\d+ depth=(-?\d+)\]')
def generate_latex_macro(math, config):
- # type: (unicode, Config) -> unicode
+ # type: (str, Config) -> str
"""Generate LaTeX macro."""
fontsize = config.imgmath_font_size
baselineskip = int(round(fontsize * 1.2))
@@ -105,7 +104,7 @@ def generate_latex_macro(math, config):
def ensure_tempdir(builder):
- # type: (Builder) -> unicode
+ # type: (Builder) -> str
"""Create temporary directory.
use only one tempdir per build -- the use of a directory is cleaner
@@ -119,11 +118,11 @@ def ensure_tempdir(builder):
def compile_math(latex, builder):
- # type: (unicode, Builder) -> unicode
+ # type: (str, Builder) -> str
"""Compile LaTeX macros for math to DVI."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.tex')
- with codecs.open(filename, 'w', 'utf-8') as f: # type: ignore
+ with open(filename, 'w', encoding='utf-8') as f:
f.write(latex)
# build latex command; old versions of latex don't have the
@@ -134,46 +133,35 @@ def compile_math(latex, builder):
command.extend(builder.config.imgmath_latex_args)
command.append('math.tex')
- with cd(tempdir):
- try:
- p = Popen(command, stdout=PIPE, stderr=PIPE)
- except OSError as err:
- if err.errno != ENOENT: # No such file or directory
- raise
- logger.warning(__('LaTeX command %r cannot be run (needed for math '
- 'display), check the imgmath_latex setting'),
- builder.config.imgmath_latex)
- raise InvokeError
-
- stdout, stderr = p.communicate()
- if p.returncode != 0:
- raise MathExtError('latex exited with error', stderr, stdout)
-
- return path.join(tempdir, 'math.dvi')
+ try:
+ subprocess.run(command, stdout=PIPE, stderr=PIPE, cwd=tempdir, check=True)
+ return path.join(tempdir, 'math.dvi')
+ except OSError:
+ logger.warning(__('LaTeX command %r cannot be run (needed for math '
+ 'display), check the imgmath_latex setting'),
+ builder.config.imgmath_latex)
+ raise InvokeError
+ except CalledProcessError as exc:
+ raise MathExtError('latex exited with error', exc.stderr, exc.stdout)
def convert_dvi_to_image(command, name):
- # type: (List[unicode], unicode) -> Tuple[unicode, unicode]
+ # type: (List[str], str) -> Tuple[bytes, bytes]
"""Convert DVI file to specific image format."""
try:
- p = Popen(command, stdout=PIPE, stderr=PIPE)
- except OSError as err:
- if err.errno != ENOENT: # No such file or directory
- raise
+ ret = subprocess.run(command, stdout=PIPE, stderr=PIPE, check=True)
+ return ret.stdout, ret.stderr
+ except OSError:
logger.warning(__('%s command %r cannot be run (needed for math '
'display), check the imgmath_%s setting'),
name, command[0], name)
raise InvokeError
-
- stdout, stderr = p.communicate()
- if p.returncode != 0:
- raise MathExtError('%s exited with error' % name, stderr, stdout)
-
- return stdout, stderr
+ except CalledProcessError as exc:
+ raise MathExtError('%s exited with error' % name, exc.stderr, exc.stdout)
def convert_dvi_to_png(dvipath, builder):
- # type: (unicode, Builder) -> Tuple[unicode, int]
+ # type: (str, Builder) -> Tuple[str, int]
"""Convert DVI file to PNG image."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.png')
@@ -200,7 +188,7 @@ def convert_dvi_to_png(dvipath, builder):
def convert_dvi_to_svg(dvipath, builder):
- # type: (unicode, Builder) -> Tuple[unicode, int]
+ # type: (str, Builder) -> Tuple[str, int]
"""Convert DVI file to SVG image."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.svg')
@@ -215,7 +203,7 @@ def convert_dvi_to_svg(dvipath, builder):
def render_math(self, math):
- # type: (nodes.NodeVisitor, unicode) -> Tuple[unicode, int]
+ # type: (HTMLTranslator, str) -> Tuple[str, int]
"""Render the LaTeX math expression *math* using latex and dvipng or
dvisvgm.
@@ -235,7 +223,7 @@ def render_math(self, math):
latex = generate_latex_macro(math, self.builder.config)
- filename = "%s.%s" % (sha1(latex.encode('utf-8')).hexdigest(), image_format)
+ filename = "%s.%s" % (sha1(latex.encode()).hexdigest(), image_format)
relfn = posixpath.join(self.builder.imgpath, 'math', filename)
outfn = path.join(self.builder.outdir, self.builder.imagedir, 'math', filename)
if path.isfile(outfn):
@@ -251,7 +239,7 @@ def render_math(self, math):
try:
dvipath = compile_math(latex, self.builder)
except InvokeError:
- self.builder._imgmath_warned_latex = True
+ self.builder._imgmath_warned_latex = True # type: ignore
return None, None
# .dvi -> .png/.svg
@@ -261,7 +249,7 @@ def render_math(self, math):
elif image_format == 'svg':
imgpath, depth = convert_dvi_to_svg(dvipath, self.builder)
except InvokeError:
- self.builder._imgmath_warned_image_translator = True
+ self.builder._imgmath_warned_image_translator = True # type: ignore
return None, None
# Move generated image on tempdir to build dir
@@ -284,18 +272,18 @@ def cleanup_tempdir(app, exc):
def get_tooltip(self, node):
- # type: (nodes.NodeVisitor, nodes.math) -> unicode
+ # type: (HTMLTranslator, Union[nodes.math, nodes.math_block]) -> str
if self.builder.config.imgmath_add_tooltips:
return ' alt="%s"' % self.encode(node.astext()).strip()
return ''
def html_visit_math(self, node):
- # type: (nodes.NodeVisitor, nodes.math) -> None
+ # type: (HTMLTranslator, nodes.math) -> None
try:
fname, depth = render_math(self, '$' + node.astext() + '$')
except MathExtError as exc:
- msg = text_type(exc)
+ msg = str(exc)
sm = nodes.system_message(msg, type='WARNING', level=2,
backrefs=[], source=node.astext())
sm.walkabout(self)
@@ -314,7 +302,7 @@ def html_visit_math(self, node):
def html_visit_displaymath(self, node):
- # type: (nodes.NodeVisitor, nodes.math_block) -> None
+ # type: (HTMLTranslator, nodes.math_block) -> None
if node['nowrap']:
latex = node.astext()
else:
@@ -322,7 +310,7 @@ def html_visit_displaymath(self, node):
try:
fname, depth = render_math(self, latex)
except MathExtError as exc:
- msg = text_type(exc)
+ msg = str(exc)
sm = nodes.system_message(msg, type='WARNING', level=2,
backrefs=[], source=node.astext())
sm.walkabout(self)
@@ -346,7 +334,7 @@ def html_visit_displaymath(self, node):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_html_math_renderer('imgmath',
(html_visit_math, None),
(html_visit_displaymath, None))
diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py
index cf81251ee..ed583b694 100644
--- a/sphinx/ext/inheritance_diagram.py
+++ b/sphinx/ext/inheritance_diagram.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
r"""
sphinx.ext.inheritance_diagram
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -36,21 +35,22 @@ r"""
:license: BSD, see LICENSE for details.
"""
+import builtins
import inspect
import re
import sys
from hashlib import md5
+from typing import Iterable, cast
from docutils import nodes
from docutils.parsers.rst import directives
-from six import text_type
-from six.moves import builtins
import sphinx
-from sphinx.ext.graphviz import render_dot_html, render_dot_latex, \
- render_dot_texinfo, figure_wrapper
-from sphinx.pycode import ModuleAnalyzer
-from sphinx.util import force_decode
+from sphinx import addnodes
+from sphinx.ext.graphviz import (
+ graphviz, figure_wrapper,
+ render_dot_html, render_dot_latex, render_dot_texinfo
+)
from sphinx.util.docutils import SphinxDirective
if False:
@@ -58,6 +58,9 @@ if False:
from typing import Any, Dict, List, Tuple, Dict, Optional # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
+ from sphinx.writers.latex import LaTeXTranslator # NOQA
+ from sphinx.writers.texinfo import TexinfoTranslator # NOQA
module_sig_re = re.compile(r'''^(?:([\w.]*)\.)? # module names
@@ -66,7 +69,7 @@ module_sig_re = re.compile(r'''^(?:([\w.]*)\.)? # module names
def try_import(objname):
- # type: (unicode) -> Any
+ # type: (str) -> Any
"""Import a object or module using *name* and *currentmodule*.
*name* should be a relative name from *currentmodule* or
a fully-qualified name.
@@ -75,8 +78,8 @@ def try_import(objname):
"""
try:
__import__(objname)
- return sys.modules.get(objname) # type: ignore
- except (ImportError, ValueError): # ValueError,py27 -> ImportError,py3
+ return sys.modules.get(objname)
+ except ImportError:
matched = module_sig_re.match(objname)
if not matched:
@@ -88,13 +91,13 @@ def try_import(objname):
return None
try:
__import__(modname)
- return getattr(sys.modules.get(modname), attrname, None) # type: ignore
- except (ImportError, ValueError): # ValueError,py27 -> ImportError,py3
+ return getattr(sys.modules.get(modname), attrname, None)
+ except ImportError:
return None
def import_classes(name, currmodule):
- # type: (unicode, unicode) -> Any
+ # type: (str, str) -> Any
"""Import a class using its fully-qualified *name*."""
target = None
@@ -129,7 +132,7 @@ class InheritanceException(Exception):
pass
-class InheritanceGraph(object):
+class InheritanceGraph:
"""
Given a list of classes, determines the set of classes that they inherit
from all the way to the root "object", and then is able to generate a
@@ -137,7 +140,7 @@ class InheritanceGraph(object):
"""
def __init__(self, class_names, currmodule, show_builtins=False,
private_bases=False, parts=0, aliases=None, top_classes=[]):
- # type: (unicode, str, bool, bool, int, Optional[Dict[unicode, unicode]], List[Any]) -> None # NOQA
+ # type: (List[str], str, bool, bool, int, Optional[Dict[str, str]], List[Any]) -> None
"""*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
@@ -152,7 +155,7 @@ class InheritanceGraph(object):
'inheritance diagram')
def _import_classes(self, class_names, currmodule):
- # type: (unicode, str) -> List[Any]
+ # type: (List[str], str) -> List[Any]
"""Import a list of classes."""
classes = [] # type: List[Any]
for name in class_names:
@@ -160,7 +163,7 @@ class InheritanceGraph(object):
return classes
def _class_info(self, classes, show_builtins, private_bases, parts, aliases, top_classes):
- # type: (List[Any], bool, bool, int, Optional[Dict[unicode, unicode]], List[Any]) -> List[Tuple[unicode, unicode, List[unicode], unicode]] # NOQA
+ # type: (List[Any], bool, bool, int, Optional[Dict[str, str]], List[Any]) -> List[Tuple[str, str, List[str], str]] # NOQA
"""Return name and bases for all classes that are ancestors of
*classes*.
@@ -187,16 +190,13 @@ class InheritanceGraph(object):
tooltip = None
try:
if cls.__doc__:
- enc = ModuleAnalyzer.for_module(cls.__module__).encoding
doc = cls.__doc__.strip().split("\n")[0]
- if not isinstance(doc, text_type):
- doc = force_decode(doc, enc)
if doc:
tooltip = '"%s"' % doc.replace('"', '\\"')
except Exception: # might raise AttributeError for strange classes
pass
- baselist = [] # type: List[unicode]
+ baselist = [] # type: List[str]
all_classes[cls] = (nodename, fullname, baselist, tooltip)
if fullname in top_classes:
@@ -217,7 +217,7 @@ class InheritanceGraph(object):
return list(all_classes.values())
def class_name(self, cls, parts=0, aliases=None):
- # type: (Any, int, Optional[Dict[unicode, unicode]]) -> unicode
+ # type: (Any, int, Optional[Dict[str, str]]) -> str
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
@@ -238,7 +238,7 @@ class InheritanceGraph(object):
return result
def get_all_class_names(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""Get all of the class names involved in the graph."""
return [fullname for (_, fullname, _, _) in self.class_info]
@@ -261,16 +261,16 @@ class InheritanceGraph(object):
}
def _format_node_attrs(self, attrs):
- # type: (Dict) -> unicode
+ # type: (Dict) -> str
return ','.join(['%s=%s' % x for x in sorted(attrs.items())])
def _format_graph_attrs(self, attrs):
- # type: (Dict) -> unicode
+ # type: (Dict) -> str
return ''.join(['%s=%s;\n' % x for x in sorted(attrs.items())])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
- # type: (unicode, Dict, BuildEnvironment, Dict, Dict, Dict) -> unicode
+ # type: (str, Dict, BuildEnvironment, Dict, Dict, Dict) -> str
"""Generate a graphviz dot graph from the classes that were passed in
to __init__.
@@ -292,7 +292,7 @@ class InheritanceGraph(object):
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
- res = [] # type: List[unicode]
+ res = [] # type: List[str]
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
@@ -316,7 +316,7 @@ class InheritanceGraph(object):
return ''.join(res)
-class inheritance_diagram(nodes.General, nodes.Element):
+class inheritance_diagram(graphviz):
"""
A docutils node to use as a placeholder for the inheritance diagram.
"""
@@ -362,36 +362,37 @@ class InheritanceDiagram(SphinxDirective):
aliases=self.config.inheritance_alias,
top_classes=node['top-classes'])
except InheritanceException as err:
- return [node.document.reporter.warning(err.args[0],
- line=self.lineno)]
+ return [node.document.reporter.warning(err, line=self.lineno)]
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
- refnodes, x = class_role(
+ refnodes, x = class_role( # type: ignore
'class', ':class:`%s`' % name, name, 0, self.state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
- # wrap the result in figure node
- caption = self.options.get('caption')
- if caption:
- node = figure_wrapper(self, node, caption)
- return [node]
+ if 'caption' not in self.options:
+ self.add_name(node)
+ return [node]
+ else:
+ figure = figure_wrapper(self, node, self.options['caption'])
+ self.add_name(figure)
+ return [figure]
def get_graph_hash(node):
- # type: (inheritance_diagram) -> unicode
- encoded = (node['content'] + str(node['parts'])).encode('utf-8')
+ # type: (inheritance_diagram) -> str
+ encoded = (node['content'] + str(node['parts'])).encode()
return md5(encoded).hexdigest()[-10:]
def html_visit_inheritance_diagram(self, node):
- # type: (nodes.NodeVisitor, inheritance_diagram) -> None
+ # type: (HTMLTranslator, inheritance_diagram) -> None
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
@@ -405,7 +406,8 @@ def html_visit_inheritance_diagram(self, node):
graphviz_output_format = self.builder.env.config.graphviz_output_format.upper()
current_filename = self.builder.current_docname + self.builder.out_suffix
urls = {}
- for child in node:
+ pending_xrefs = cast(Iterable[addnodes.pending_xref], node)
+ for child in pending_xrefs:
if child.get('refuri') is not None:
if graphviz_output_format == 'SVG':
urls[child['reftitle']] = "../" + child.get('refuri')
@@ -424,7 +426,7 @@ def html_visit_inheritance_diagram(self, node):
def latex_visit_inheritance_diagram(self, node):
- # type: (nodes.NodeVisitor, inheritance_diagram) -> None
+ # type: (LaTeXTranslator, inheritance_diagram) -> None
"""
Output the graph for LaTeX. This will insert a PDF.
"""
@@ -440,7 +442,7 @@ def latex_visit_inheritance_diagram(self, node):
def texinfo_visit_inheritance_diagram(self, node):
- # type: (nodes.NodeVisitor, inheritance_diagram) -> None
+ # type: (TexinfoTranslator, inheritance_diagram) -> None
"""
Output the graph for Texinfo. This will insert a PNG.
"""
@@ -461,7 +463,7 @@ def skip(self, node):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,
diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py
index 33e218531..1258cd84e 100644
--- a/sphinx/ext/intersphinx.py
+++ b/sphinx/ext/intersphinx.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.intersphinx
~~~~~~~~~~~~~~~~~~~~~~
@@ -24,23 +23,18 @@
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
-
import functools
import posixpath
import sys
import time
-import warnings
from os import path
+from urllib.parse import urlsplit, urlunsplit
from docutils import nodes
from docutils.utils import relative_path
-from six import PY3, iteritems, string_types
-from six.moves.urllib.parse import urlsplit, urlunsplit
import sphinx
from sphinx.builders.html import INVENTORY_FILENAME
-from sphinx.deprecation import RemovedInSphinx20Warning
from sphinx.locale import _, __
from sphinx.util import requests, logging
from sphinx.util.inventory import InventoryFile
@@ -51,16 +45,12 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
-
- if PY3:
- unicode = str
-
- Inventory = Dict[unicode, Dict[unicode, Tuple[unicode, unicode, unicode, unicode]]]
+ from sphinx.util.typing import Inventory # NOQA
logger = logging.getLogger(__name__)
-class InventoryAdapter(object):
+class InventoryAdapter:
"""Inventory adapter for environment"""
def __init__(self, env):
@@ -74,7 +64,7 @@ class InventoryAdapter(object):
@property
def cache(self):
- # type: () -> Dict[unicode, Tuple[unicode, int, Inventory]]
+ # type: () -> Dict[str, Tuple[str, int, Inventory]]
return self.env.intersphinx_cache # type: ignore
@property
@@ -84,7 +74,7 @@ class InventoryAdapter(object):
@property
def named_inventory(self):
- # type: () -> Dict[unicode, Inventory]
+ # type: () -> Dict[str, Inventory]
return self.env.intersphinx_named_inventory # type: ignore
def clear(self):
@@ -94,7 +84,7 @@ class InventoryAdapter(object):
def _strip_basic_auth(url):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Returns *url* with basic auth credentials removed. Also returns the
basic auth username and password if they're present in *url*.
@@ -116,7 +106,7 @@ def _strip_basic_auth(url):
def _read_from_url(url, config=None):
- # type: (unicode, Config) -> IO
+ # type: (str, Config) -> IO
"""Reads data from *url* with an HTTP *GET*.
This function supports fetching from resources which use basic HTTP auth as
@@ -142,7 +132,7 @@ def _read_from_url(url, config=None):
def _get_safe_url(url):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Gets version of *url* with basic auth passwords obscured. This function
returns results suitable for printing and logging.
@@ -168,7 +158,7 @@ def _get_safe_url(url):
def fetch_inventory(app, uri, inv):
- # type: (Sphinx, unicode, Any) -> Any
+ # type: (Sphinx, str, Any) -> Any
"""Fetch, parse and return an intersphinx inventory file."""
# both *uri* (base URI of the links to generate) and *inv* (actual
# location of the inventory file) can be local or remote URIs
@@ -189,7 +179,7 @@ def fetch_inventory(app, uri, inv):
if hasattr(f, 'url'):
newinv = f.url # type: ignore
if inv != newinv:
- logger.info('intersphinx inventory has moved: %s -> %s', inv, newinv)
+ logger.info(__('intersphinx inventory has moved: %s -> %s'), inv, newinv)
if uri in (inv, path.dirname(inv), path.dirname(inv) + '/'):
uri = path.dirname(newinv)
@@ -214,28 +204,7 @@ def load_mappings(app):
cache_time = now - app.config.intersphinx_cache_limit * 86400
inventories = InventoryAdapter(app.builder.env)
update = False
- for key, value in iteritems(app.config.intersphinx_mapping):
- name = None # type: unicode
- uri = None # type: unicode
- inv = None # type: Union[unicode, Tuple[unicode, ...]]
-
- if isinstance(value, (list, tuple)):
- # new format
- name, (uri, inv) = key, value
- if not isinstance(name, string_types):
- logger.warning(__('intersphinx identifier %r is not string. Ignored'), name)
- continue
- else:
- # old format, no name
- name, uri, inv = None, key, value
- # we can safely assume that the uri<->inv mapping is not changed
- # during partial rebuilds since a changed intersphinx_mapping
- # setting will cause a full environment reread
- if not isinstance(inv, tuple):
- invs = (inv, )
- else:
- invs = inv # type: ignore
-
+ for key, (name, (uri, invs)) in app.config.intersphinx_mapping.items():
failures = []
for inv in invs:
if not inv:
@@ -245,7 +214,7 @@ def load_mappings(app):
if '://' not in inv or uri not in inventories.cache \
or inventories.cache[uri][1] < cache_time:
safe_inv_url = _get_safe_url(inv)
- logger.info('loading intersphinx inventory from %s...', safe_inv_url)
+ logger.info(__('loading intersphinx inventory from %s...'), safe_inv_url)
try:
invdata = fetch_inventory(app, uri, inv)
except Exception as err:
@@ -260,8 +229,8 @@ def load_mappings(app):
if failures == []:
pass
elif len(failures) < len(invs):
- logger.info("encountered some issues with some of the inventories,"
- " but they had working alternatives:")
+ logger.info(__("encountered some issues with some of the inventories,"
+ " but they had working alternatives:"))
for fail in failures:
logger.info(*fail)
else:
@@ -286,16 +255,16 @@ def load_mappings(app):
for name, _x, invdata in named_vals + unnamed_vals:
if name:
inventories.named_inventory[name] = invdata
- for type, objects in iteritems(invdata):
+ for type, objects in invdata.items():
inventories.main_inventory.setdefault(type, {}).update(objects)
def missing_reference(app, env, node, contnode):
- # type: (Sphinx, BuildEnvironment, nodes.Node, nodes.Node) -> None
+ # type: (Sphinx, BuildEnvironment, nodes.Element, nodes.TextElement) -> nodes.reference
"""Attempt to resolve a missing reference via intersphinx references."""
target = node['reftarget']
inventories = InventoryAdapter(env)
- objtypes = None # type: List[unicode]
+ objtypes = None # type: List[str]
if node['reftype'] == 'any':
# we search anything!
objtypes = ['%s:%s' % (domain.name, objtype)
@@ -306,10 +275,10 @@ def missing_reference(app, env, node, contnode):
domain = node.get('refdomain')
if not domain:
# only objects in domains are in the inventory
- return
+ return None
objtypes = env.get_domain(domain).objtypes_for_role(node['reftype'])
if not objtypes:
- return
+ return None
objtypes = ['%s:%s' % (domain, objtype) for objtype in objtypes]
if 'std:cmdoption' in objtypes:
# until Sphinx-1.6, cmdoptions are stored as std:option
@@ -365,14 +334,42 @@ def missing_reference(app, env, node, contnode):
if len(contnode) and isinstance(contnode[0], nodes.Text):
contnode[0] = nodes.Text(newtarget, contnode[0].rawsource)
+ return None
+
+
+def normalize_intersphinx_mapping(app, config):
+ # type: (Sphinx, Config) -> None
+ for key, value in config.intersphinx_mapping.copy().items():
+ try:
+ if isinstance(value, (list, tuple)):
+ # new format
+ name, (uri, inv) = key, value
+ if not isinstance(name, str):
+ logger.warning(__('intersphinx identifier %r is not string. Ignored'),
+ name)
+ config.intersphinx_mapping.pop(key)
+ continue
+ else:
+ # old format, no name
+ name, uri, inv = None, key, value
+
+ if not isinstance(inv, tuple):
+ config.intersphinx_mapping[key] = (name, (uri, (inv,)))
+ else:
+ config.intersphinx_mapping[key] = (name, (uri, inv))
+ except Exception as exc:
+ logger.warning(__('Fail to read intersphinx_mapping[%s], Ignored: %r'), key, exc)
+ config.intersphinx_mapping.pop(key)
+
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_config_value('intersphinx_mapping', {}, True)
app.add_config_value('intersphinx_cache_limit', 5, False)
app.add_config_value('intersphinx_timeout', None, False)
- app.connect('missing-reference', missing_reference)
+ app.connect('config-inited', normalize_intersphinx_mapping)
app.connect('builder-inited', load_mappings)
+ app.connect('missing-reference', missing_reference)
return {
'version': sphinx.__display_version__,
'env_version': 1,
@@ -380,17 +377,8 @@ def setup(app):
}
-def debug(argv):
- # type: (List[unicode]) -> None
- """Debug functionality to print out an inventory"""
- warnings.warn('sphinx.ext.intersphinx.debug() is deprecated. '
- 'Please use inspect_main() instead',
- RemovedInSphinx20Warning, stacklevel=2)
- inspect_main(argv[1:])
-
-
def inspect_main(argv):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
"""Debug functionality to print out an inventory"""
if len(argv) < 1:
print("Print out an inventory file.\n"
@@ -398,16 +386,16 @@ def inspect_main(argv):
file=sys.stderr)
sys.exit(1)
- class MockConfig(object):
+ class MockConfig:
intersphinx_timeout = None # type: int
tls_verify = False
- class MockApp(object):
+ class MockApp:
srcdir = ''
config = MockConfig()
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
print(msg, file=sys.stderr)
try:
@@ -429,4 +417,4 @@ if __name__ == '__main__':
import logging # type: ignore
logging.basicConfig() # type: ignore
- inspect_main(argv=sys.argv[1:]) # type: ignore
+ inspect_main(argv=sys.argv[1:])
diff --git a/sphinx/ext/jsmath.py b/sphinx/ext/jsmath.py
index 63bcf248e..47e1b1836 100644
--- a/sphinx/ext/jsmath.py
+++ b/sphinx/ext/jsmath.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.jsmath
~~~~~~~~~~~~~~~~~
@@ -10,74 +9,32 @@
:license: BSD, see LICENSE for details.
"""
-from docutils import nodes
+import warnings
+
+from sphinxcontrib.jsmath import ( # NOQA
+ html_visit_math,
+ html_visit_displaymath,
+ install_jsmath,
+)
import sphinx
-from sphinx.errors import ExtensionError
-from sphinx.locale import _
-from sphinx.util.math import get_node_equation_number
+from sphinx.deprecation import RemovedInSphinx40Warning
if False:
# For type annotation
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.environment import BuildEnvironment # NOQA
-
-
-def html_visit_math(self, node):
- # type: (nodes.NodeVisitor, nodes.Node) -> None
- self.body.append(self.starttag(node, 'span', '', CLASS='math notranslate nohighlight'))
- self.body.append(self.encode(node.astext()) + '</span>')
- raise nodes.SkipNode
-
-
-def html_visit_displaymath(self, node):
- # type: (nodes.NodeVisitor, nodes.Node) -> None
- if node['nowrap']:
- self.body.append(self.starttag(node, 'div', CLASS='math notranslate nohighlight'))
- self.body.append(self.encode(node.astext()))
- self.body.append('</div>')
- raise nodes.SkipNode
- for i, part in enumerate(node.astext().split('\n\n')):
- part = self.encode(part)
- if i == 0:
- # necessary to e.g. set the id property correctly
- if node['number']:
- number = get_node_equation_number(self, node)
- self.body.append('<span class="eqno">(%s)' % number)
- self.add_permalink_ref(node, _('Permalink to this equation'))
- self.body.append('</span>')
- self.body.append(self.starttag(node, 'div', CLASS='math notranslate nohighlight'))
- else:
- # but only once!
- self.body.append('<div class="math">')
- if '&' in part or '\\\\' in part:
- self.body.append('\\begin{split}' + part + '\\end{split}')
- else:
- self.body.append(part)
- self.body.append('</div>\n')
- raise nodes.SkipNode
-
-
-def install_jsmath(app, env):
- # type: (Sphinx, BuildEnvironment) -> None
- if app.builder.format != 'html' or app.builder.math_renderer_name != 'jsmath': # type: ignore # NOQA
- return
- if not app.config.jsmath_path:
- raise ExtensionError('jsmath_path config value must be set for the '
- 'jsmath extension to work')
-
- if env.get_domain('math').has_equations(): # type: ignore
- # Enable jsmath only if equations exists
- app.builder.add_js_file(app.config.jsmath_path) # type: ignore
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
- app.add_html_math_renderer('jsmath',
- (html_visit_math, None),
- (html_visit_displaymath, None))
+ # type: (Sphinx) -> Dict[str, Any]
+ warnings.warn('sphinx.ext.jsmath has been moved to sphinxcontrib-jsmath.',
+ RemovedInSphinx40Warning)
+
+ app.setup_extension('sphinxcontrib.jsmath')
- app.add_config_value('jsmath_path', '', False)
- app.connect('env-check-consistency', install_jsmath)
- return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
+ return {
+ 'version': sphinx.__display_version__,
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/ext/linkcode.py b/sphinx/ext/linkcode.py
index 843b64efb..3ebdf7bc9 100644
--- a/sphinx/ext/linkcode.py
+++ b/sphinx/ext/linkcode.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.linkcode
~~~~~~~~~~~~~~~~~~~
@@ -35,16 +34,16 @@ def doctree_read(app, doctree):
raise LinkcodeError(
"Function `linkcode_resolve` is not given in conf.py")
- domain_keys = dict(
- py=['module', 'fullname'],
- c=['names'],
- cpp=['names'],
- js=['object', 'fullname'],
- )
+ domain_keys = {
+ 'py': ['module', 'fullname'],
+ 'c': ['names'],
+ 'cpp': ['names'],
+ 'js': ['object', 'fullname'],
+ }
for objnode in doctree.traverse(addnodes.desc):
domain = objnode.get('domain')
- uris = set() # type: Set[unicode]
+ uris = set() # type: Set[str]
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
@@ -70,15 +69,14 @@ def doctree_read(app, doctree):
continue
uris.add(uri)
+ inline = nodes.inline('', _('[source]'), classes=['viewcode-link'])
onlynode = addnodes.only(expr='html')
- onlynode += nodes.reference('', '', internal=False, refuri=uri)
- onlynode[0] += nodes.inline('', _('[source]'),
- classes=['viewcode-link'])
+ onlynode += nodes.reference('', '', inline, internal=False, refuri=uri)
signode += onlynode
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.connect('doctree-read', doctree_read)
app.add_config_value('linkcode_resolve', None, '')
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/mathbase.py b/sphinx/ext/mathbase.py
index 70b6d5577..90ad4563d 100644
--- a/sphinx/ext/mathbase.py
+++ b/sphinx/ext/mathbase.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.mathbase
~~~~~~~~~~~~~~~~~~~
@@ -24,8 +23,8 @@ from sphinx.domains.math import MathReferenceRole as EqXRefRole # NOQA # to ke
if False:
# For type annotation
from typing import Any, Callable, List, Tuple # NOQA
- from docutils.writers.html4css1 import Writer # NOQA
from sphinx.application import Sphinx # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
class MathDirective(MathDirectiveBase):
@@ -33,7 +32,7 @@ class MathDirective(MathDirectiveBase):
warnings.warn('sphinx.ext.mathbase.MathDirective is moved to '
'sphinx.directives.patches package.',
RemovedInSphinx30Warning, stacklevel=2)
- return super(MathDirective, self).run()
+ return super().run()
def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
@@ -44,7 +43,7 @@ def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
def get_node_equation_number(writer, node):
- # type: (Writer, nodes.Node) -> unicode
+ # type: (HTMLTranslator, nodes.math_block) -> str
warnings.warn('sphinx.ext.mathbase.get_node_equation_number() is moved to '
'sphinx.util.math package.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -53,7 +52,7 @@ def get_node_equation_number(writer, node):
def wrap_displaymath(text, label, numbering):
- # type: (unicode, unicode, bool) -> unicode
+ # type: (str, str, bool) -> str
warnings.warn('sphinx.ext.mathbase.wrap_displaymath() is moved to '
'sphinx.util.math package.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -62,7 +61,7 @@ def wrap_displaymath(text, label, numbering):
def is_in_section_title(node):
- # type: (nodes.Node) -> bool
+ # type: (nodes.Element) -> bool
"""Determine whether the node is in a section title"""
from sphinx.util.nodes import traverse_parent
diff --git a/sphinx/ext/mathjax.py b/sphinx/ext/mathjax.py
index 82b2d7eac..5a84e738f 100644
--- a/sphinx/ext/mathjax.py
+++ b/sphinx/ext/mathjax.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.mathjax
~~~~~~~~~~~~~~~~~~
@@ -12,10 +11,13 @@
"""
import json
+from typing import cast
from docutils import nodes
import sphinx
+from sphinx.builders.html import StandaloneHTMLBuilder
+from sphinx.domains.math import MathDomain
from sphinx.errors import ExtensionError
from sphinx.locale import _
from sphinx.util.math import get_node_equation_number
@@ -25,10 +27,11 @@ if False:
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
def html_visit_math(self, node):
- # type: (nodes.NodeVisitor, nodes.Node) -> None
+ # type: (HTMLTranslator, nodes.math) -> None
self.body.append(self.starttag(node, 'span', '', CLASS='math notranslate nohighlight'))
self.body.append(self.builder.config.mathjax_inline[0] +
self.encode(node.astext()) +
@@ -37,7 +40,7 @@ def html_visit_math(self, node):
def html_visit_displaymath(self, node):
- # type: (nodes.NodeVisitor, nodes.Node) -> None
+ # type: (HTMLTranslator, nodes.math_block) -> None
self.body.append(self.starttag(node, 'div', CLASS='math notranslate nohighlight'))
if node['nowrap']:
self.body.append(self.encode(node.astext()))
@@ -77,20 +80,22 @@ def install_mathjax(app, env):
raise ExtensionError('mathjax_path config value must be set for the '
'mathjax extension to work')
- if env.get_domain('math').has_equations(): # type: ignore
+ builder = cast(StandaloneHTMLBuilder, app.builder)
+ domain = cast(MathDomain, env.get_domain('math'))
+ if domain.has_equations():
# Enable mathjax only if equations exists
options = {'async': 'async'}
if app.config.mathjax_options:
options.update(app.config.mathjax_options)
- app.builder.add_js_file(app.config.mathjax_path, **options) # type: ignore
+ builder.add_js_file(app.config.mathjax_path, **options)
if app.config.mathjax_config:
body = "MathJax.Hub.Config(%s)" % json.dumps(app.config.mathjax_config)
- app.builder.add_js_file(None, type="text/x-mathjax-config", body=body) # type: ignore # NOQA
+ builder.add_js_file(None, type="text/x-mathjax-config", body=body)
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_html_math_renderer('mathjax',
(html_visit_math, None),
(html_visit_displaymath, None))
@@ -104,6 +109,6 @@ def setup(app):
app.add_config_value('mathjax_inline', [r'\(', r'\)'], 'html')
app.add_config_value('mathjax_display', [r'\[', r'\]'], 'html')
app.add_config_value('mathjax_config', None, 'html')
- app.connect('env-check-consistency', install_mathjax)
+ app.connect('env-updated', install_mathjax)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/napoleon/__init__.py b/sphinx/ext/napoleon/__init__.py
index 3edec8397..b475dadef 100644
--- a/sphinx/ext/napoleon/__init__.py
+++ b/sphinx/ext/napoleon/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.napoleon
~~~~~~~~~~~~~~~~~~~
@@ -9,11 +8,7 @@
:license: BSD, see LICENSE for details.
"""
-import sys
-
-from six import PY2, iteritems
-
-import sphinx
+from sphinx import __display_version__ as __version__
from sphinx.application import Sphinx
from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring
@@ -22,7 +17,7 @@ if False:
from typing import Any, Dict, List # NOQA
-class Config(object):
+class Config:
"""Sphinx napoleon extension settings in `conf.py`.
Listed below are all the settings used by napoleon and their default
@@ -176,10 +171,10 @@ class Config(object):
.. attribute:: attr1
- *int*
-
Description of `attr1`
+ :type: int
+
napoleon_use_param : :obj:`bool` (Defaults to True)
True to use a ``:param:`` role for each function parameter. False to
use a single ``:parameters:`` role for all the parameters.
@@ -274,14 +269,14 @@ class Config(object):
def __init__(self, **settings):
# type: (Any) -> None
- for name, (default, rebuild) in iteritems(self._config_values):
+ for name, (default, rebuild) in self._config_values.items():
setattr(self, name, default)
- for name, value in iteritems(settings):
+ for name, value in settings.items():
setattr(self, name, value)
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
"""Sphinx extension setup function.
When the extension is loaded, Sphinx imports this module and executes
@@ -304,7 +299,8 @@ def setup(app):
"""
if not isinstance(app, Sphinx):
- return # probably called by tests
+ # probably called by tests
+ return {'version': __version__, 'parallel_read_safe': True}
_patch_python_domain()
@@ -312,9 +308,9 @@ def setup(app):
app.connect('autodoc-process-docstring', _process_docstring)
app.connect('autodoc-skip-member', _skip_member)
- for name, (default, rebuild) in iteritems(Config._config_values):
+ for name, (default, rebuild) in Config._config_values.items():
app.add_config_value(name, default, rebuild)
- return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
+ return {'version': __version__, 'parallel_read_safe': True}
def _patch_python_domain():
@@ -338,7 +334,7 @@ def _patch_python_domain():
def _process_docstring(app, what, name, obj, options, lines):
- # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
+ # type: (Sphinx, str, str, Any, Any, List[str]) -> None
"""Process the docstring for a given python object.
Called when autodoc has read and processed a docstring. `lines` is a list
@@ -388,7 +384,7 @@ def _process_docstring(app, what, name, obj, options, lines):
def _skip_member(app, what, name, obj, skip, options):
- # type: (Sphinx, unicode, unicode, Any, bool, Any) -> bool
+ # type: (Sphinx, str, str, Any, bool, Any) -> bool
"""Determine if private and special class members are included in docs.
The following settings in conf.py determine if private and special class
@@ -435,34 +431,26 @@ def _skip_member(app, what, name, obj, skip, options):
if name != '__weakref__' and has_doc and is_member:
cls_is_owner = False
if what == 'class' or what == 'exception':
- if PY2:
- cls = getattr(obj, 'im_class', getattr(obj, '__objclass__',
- None))
- cls_is_owner = (cls and hasattr(cls, name) and
- name in cls.__dict__)
- elif sys.version_info >= (3, 3):
- qualname = getattr(obj, '__qualname__', '')
- cls_path, _, _ = qualname.rpartition('.')
- if cls_path:
- try:
- if '.' in cls_path:
- import importlib
- import functools
-
- mod = importlib.import_module(obj.__module__)
- mod_path = cls_path.split('.')
- cls = functools.reduce(getattr, mod_path, mod)
- else:
- cls = obj.__globals__[cls_path]
- except Exception:
- cls_is_owner = False
+ qualname = getattr(obj, '__qualname__', '')
+ cls_path, _, _ = qualname.rpartition('.')
+ if cls_path:
+ try:
+ if '.' in cls_path:
+ import importlib
+ import functools
+
+ mod = importlib.import_module(obj.__module__)
+ mod_path = cls_path.split('.')
+ cls = functools.reduce(getattr, mod_path, mod)
else:
- cls_is_owner = (cls and hasattr(cls, name) and
- name in cls.__dict__)
- else:
+ cls = obj.__globals__[cls_path]
+ except Exception:
cls_is_owner = False
+ else:
+ cls_is_owner = (cls and hasattr(cls, name) and # type: ignore
+ name in cls.__dict__)
else:
- cls_is_owner = True
+ cls_is_owner = False
if what == 'module' or cls_is_owner:
is_init = (name == '__init__')
diff --git a/sphinx/ext/napoleon/docstring.py b/sphinx/ext/napoleon/docstring.py
index f8141278f..00a41afe8 100644
--- a/sphinx/ext/napoleon/docstring.py
+++ b/sphinx/ext/napoleon/docstring.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.napoleon.docstring
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -11,21 +10,16 @@
:license: BSD, see LICENSE for details.
"""
-import collections
import inspect
import re
from functools import partial
-from six import string_types, u
-from six.moves import range
-
from sphinx.ext.napoleon.iterators import modify_iter
from sphinx.locale import _
-from sphinx.util.pycompat import UnicodeMixin
if False:
# For type annotation
- from typing import Any, Callable, Dict, List, Tuple, Union # NOQA
+ from typing import Any, Callable, Dict, List, Tuple, Type, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config as SphinxConfig # NOQA
@@ -43,7 +37,7 @@ _enumerated_list_regex = re.compile(
r'(?(paren)\)|\.)(\s+\S|\s*$)')
-class GoogleDocstring(UnicodeMixin):
+class GoogleDocstring:
"""Convert Google style docstrings to reStructuredText.
Parameters
@@ -105,9 +99,13 @@ class GoogleDocstring(UnicodeMixin):
<BLANKLINE>
"""
+
+ _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
+ r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
- # type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA
+ # type: (Union[str, List[str]], SphinxConfig, Sphinx, str, str, Any, Any) -> None
self._config = config
self._app = app
@@ -120,7 +118,7 @@ class GoogleDocstring(UnicodeMixin):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
- elif isinstance(obj, collections.Callable): # type: ignore
+ elif callable(obj):
what = 'function'
else:
what = 'object'
@@ -129,15 +127,16 @@ class GoogleDocstring(UnicodeMixin):
self._name = name
self._obj = obj
self._opt = options
- if isinstance(docstring, string_types):
- docstring = docstring.splitlines()
- self._lines = docstring
- self._line_iter = modify_iter(docstring, modifier=lambda s: s.rstrip())
- self._parsed_lines = [] # type: List[unicode]
+ if isinstance(docstring, str):
+ lines = docstring.splitlines()
+ else:
+ lines = docstring
+ self._line_iter = modify_iter(lines, modifier=lambda s: s.rstrip())
+ self._parsed_lines = [] # type: List[str]
self._is_in_section = False
self._section_indent = 0
if not hasattr(self, '_directive_sections'):
- self._directive_sections = [] # type: List[unicode]
+ self._directive_sections = [] # type: List[str]
if not hasattr(self, '_sections'):
self._sections = {
'args': self._parse_parameters_section,
@@ -170,14 +169,14 @@ class GoogleDocstring(UnicodeMixin):
'warns': self._parse_warns_section,
'yield': self._parse_yields_section,
'yields': self._parse_yields_section,
- } # type: Dict[unicode, Callable]
+ } # type: Dict[str, Callable]
self._load_custom_sections()
self._parse()
- def __unicode__(self):
- # type: () -> unicode
+ def __str__(self):
+ # type: () -> str
"""Return the parsed docstring in reStructuredText format.
Returns
@@ -186,10 +185,10 @@ class GoogleDocstring(UnicodeMixin):
Unicode version of the docstring.
"""
- return u('\n').join(self.lines())
+ return '\n'.join(self.lines())
def lines(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
@@ -201,7 +200,7 @@ class GoogleDocstring(UnicodeMixin):
return self._parsed_lines
def _consume_indented_block(self, indent=1):
- # type: (int) -> List[unicode]
+ # type: (int) -> List[str]
lines = []
line = self._line_iter.peek()
while(not self._is_section_break() and
@@ -211,7 +210,7 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _consume_contiguous(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
lines = []
while (self._line_iter.has_next() and
self._line_iter.peek() and
@@ -220,7 +219,7 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _consume_empty(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and not line:
@@ -229,11 +228,11 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _consume_field(self, parse_type=True, prefer_type=False):
- # type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
+ # type: (bool, bool) -> Tuple[str, str, List[str]]
line = next(self._line_iter)
before, colon, after = self._partition_field_on_colon(line)
- _name, _type, _desc = before, '', after # type: unicode, unicode, unicode
+ _name, _type, _desc = before, '', after
if parse_type:
match = _google_typed_arg_regex.match(before)
@@ -251,7 +250,7 @@ class GoogleDocstring(UnicodeMixin):
return _name, _type, _descs
def _consume_fields(self, parse_type=True, prefer_type=False):
- # type: (bool, bool) -> List[Tuple[unicode, unicode, List[unicode]]]
+ # type: (bool, bool) -> List[Tuple[str, str, List[str]]]
self._consume_empty()
fields = []
while not self._is_section_break():
@@ -261,21 +260,22 @@ class GoogleDocstring(UnicodeMixin):
return fields
def _consume_inline_attribute(self):
- # type: () -> Tuple[unicode, List[unicode]]
+ # type: () -> Tuple[str, List[str]]
line = next(self._line_iter)
_type, colon, _desc = self._partition_field_on_colon(line)
- if not colon:
+ if not colon or not _desc:
_type, _desc = _desc, _type
+ _desc += colon
_descs = [_desc] + self._dedent(self._consume_to_end())
_descs = self.__class__(_descs, self._config).lines()
return _type, _descs
def _consume_returns_section(self):
- # type: () -> List[Tuple[unicode, unicode, List[unicode]]]
+ # type: () -> List[Tuple[str, str, List[str]]]
lines = self._dedent(self._consume_to_next_section())
if lines:
before, colon, after = self._partition_field_on_colon(lines[0])
- _name, _type, _desc = '', '', lines # type: unicode, unicode, List[unicode]
+ _name, _type, _desc = '', '', lines
if colon:
if after:
@@ -291,12 +291,12 @@ class GoogleDocstring(UnicodeMixin):
return []
def _consume_usage_section(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
lines = self._dedent(self._consume_to_next_section())
return lines
def _consume_section_header(self):
- # type: () -> unicode
+ # type: () -> str
section = next(self._line_iter)
stripped_section = section.strip(':')
if stripped_section.lower() in self._sections:
@@ -304,14 +304,14 @@ class GoogleDocstring(UnicodeMixin):
return section
def _consume_to_end(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
lines = []
while self._line_iter.has_next():
lines.append(next(self._line_iter))
return lines
def _consume_to_next_section(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
self._consume_empty()
lines = []
while not self._is_section_break():
@@ -319,7 +319,7 @@ class GoogleDocstring(UnicodeMixin):
return lines + self._consume_empty()
def _dedent(self, lines, full=False):
- # type: (List[unicode], bool) -> List[unicode]
+ # type: (List[str], bool) -> List[str]
if full:
return [line.lstrip() for line in lines]
else:
@@ -327,7 +327,7 @@ class GoogleDocstring(UnicodeMixin):
return [line[min_indent:] for line in lines]
def _escape_args_and_kwargs(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if name[:2] == '**':
return r'\*\*' + name[2:]
elif name[:1] == '*':
@@ -336,32 +336,32 @@ class GoogleDocstring(UnicodeMixin):
return name
def _fix_field_desc(self, desc):
- # type: (List[unicode]) -> List[unicode]
+ # type: (List[str]) -> List[str]
if self._is_list(desc):
- desc = [u''] + desc
+ desc = [''] + desc
elif desc[0].endswith('::'):
desc_block = desc[1:]
indent = self._get_indent(desc[0])
block_indent = self._get_initial_indent(desc_block)
if block_indent > indent:
- desc = [u''] + desc
+ desc = [''] + desc
else:
desc = ['', desc[0]] + self._indent(desc_block, 4)
return desc
def _format_admonition(self, admonition, lines):
- # type: (unicode, List[unicode]) -> List[unicode]
+ # type: (str, List[str]) -> List[str]
lines = self._strip_empty(lines)
if len(lines) == 1:
return ['.. %s:: %s' % (admonition, lines[0].strip()), '']
elif lines:
lines = self._indent(self._dedent(lines), 3)
- return [u'.. %s::' % admonition, u''] + lines + [u'']
+ return ['.. %s::' % admonition, ''] + lines + ['']
else:
- return [u'.. %s::' % admonition, u'']
+ return ['.. %s::' % admonition, '']
def _format_block(self, prefix, lines, padding=None):
- # type: (unicode, List[unicode], unicode) -> List[unicode]
+ # type: (str, List[str], str) -> List[str]
if lines:
if padding is None:
padding = ' ' * len(prefix)
@@ -379,7 +379,7 @@ class GoogleDocstring(UnicodeMixin):
def _format_docutils_params(self, fields, field_role='param',
type_role='type'):
- # type: (List[Tuple[unicode, unicode, List[unicode]]], unicode, unicode) -> List[unicode] # NOQA
+ # type: (List[Tuple[str, str, List[str]]], str, str) -> List[str]
lines = []
for _name, _type, _desc in fields:
_desc = self._strip_empty(_desc)
@@ -395,14 +395,14 @@ class GoogleDocstring(UnicodeMixin):
return lines + ['']
def _format_field(self, _name, _type, _desc):
- # type: (unicode, unicode, List[unicode]) -> List[unicode]
+ # type: (str, str, List[str]) -> List[str]
_desc = self._strip_empty(_desc)
has_desc = any(_desc)
separator = has_desc and ' -- ' or ''
if _name:
if _type:
if '`' in _type:
- field = '**%s** (%s)%s' % (_name, _type, separator) # type: unicode
+ field = '**%s** (%s)%s' % (_name, _type, separator)
else:
field = '**%s** (*%s*)%s' % (_name, _type, separator)
else:
@@ -425,11 +425,11 @@ class GoogleDocstring(UnicodeMixin):
return [field]
def _format_fields(self, field_type, fields):
- # type: (unicode, List[Tuple[unicode, unicode, List[unicode]]]) -> List[unicode]
+ # type: (str, List[Tuple[str, str, List[str]]]) -> List[str]
field_type = ':%s:' % field_type.strip()
padding = ' ' * len(field_type)
multi = len(fields) > 1
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
for _name, _type, _desc in fields:
field = self._format_field(_name, _type, _desc)
if multi:
@@ -454,21 +454,21 @@ class GoogleDocstring(UnicodeMixin):
return 0
def _get_indent(self, line):
- # type: (unicode) -> int
+ # type: (str) -> int
for i, s in enumerate(line):
if not s.isspace():
return i
return len(line)
def _get_initial_indent(self, lines):
- # type: (List[unicode]) -> int
+ # type: (List[str]) -> int
for line in lines:
if line:
return self._get_indent(line)
return 0
def _get_min_indent(self, lines):
- # type: (List[unicode]) -> int
+ # type: (List[str]) -> int
min_indent = None
for line in lines:
if line:
@@ -480,11 +480,11 @@ class GoogleDocstring(UnicodeMixin):
return min_indent or 0
def _indent(self, lines, n=4):
- # type: (List[unicode], int) -> List[unicode]
+ # type: (List[str], int) -> List[str]
return [(' ' * n) + line for line in lines]
def _is_indented(self, line, indent=1):
- # type: (unicode, int) -> bool
+ # type: (str, int) -> bool
for i, s in enumerate(line):
if i >= indent:
return True
@@ -493,7 +493,7 @@ class GoogleDocstring(UnicodeMixin):
return False
def _is_list(self, lines):
- # type: (List[unicode]) -> bool
+ # type: (List[str]) -> bool
if not lines:
return False
if _bullet_list_regex.match(lines[0]):
@@ -539,7 +539,7 @@ class GoogleDocstring(UnicodeMixin):
if self._config.napoleon_custom_sections is not None:
for entry in self._config.napoleon_custom_sections:
- if isinstance(entry, string_types):
+ if isinstance(entry, str):
# if entry is just a label, add to sections list,
# using generic section logic.
self._sections[entry.lower()] = self._parse_custom_generic_section
@@ -558,7 +558,7 @@ class GoogleDocstring(UnicodeMixin):
if self._name and (self._what == 'attribute' or self._what == 'data'):
# Implicit stop using StopIteration no longer allowed in
# Python 3.7; see PEP 479
- res = [] # type: List[unicode]
+ res = [] # type: List[str]
try:
res = self._parse_attribute_docstring()
except StopIteration:
@@ -587,12 +587,12 @@ class GoogleDocstring(UnicodeMixin):
self._parsed_lines.extend(lines)
def _parse_admonition(self, admonition, section):
- # type (unicode, unicode) -> List[unicode]
+ # type (str, str) -> List[str]
lines = self._consume_to_next_section()
return self._format_admonition(admonition, lines)
def _parse_attribute_docstring(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
_type, _desc = self._consume_inline_attribute()
lines = self._format_field('', '', _desc)
if _type:
@@ -600,11 +600,12 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_attributes_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
lines = []
for _name, _type, _desc in self._consume_fields():
if self._config.napoleon_use_ivar:
- field = ':ivar %s: ' % _name # type: unicode
+ _name = self._qualify_name(_name, self._obj)
+ field = ':ivar %s: ' % _name
lines.extend(self._format_block(field, _desc))
if _type:
lines.append(':vartype %s: %s' % (_name, _type))
@@ -621,11 +622,11 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_examples_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
labels = {
'example': _('Example'),
'examples': _('Examples'),
- } # type: Dict[unicode, unicode]
+ }
use_admonition = self._config.napoleon_use_admonition_for_examples
label = labels.get(section.lower(), section)
return self._parse_generic_section(label, use_admonition)
@@ -635,19 +636,19 @@ class GoogleDocstring(UnicodeMixin):
return self._parse_generic_section(section, False)
def _parse_usage_section(self, section):
- # type: (unicode) -> List[unicode]
- header = ['.. rubric:: Usage:', ''] # type: List[unicode]
- block = ['.. code-block:: python', ''] # type: List[unicode]
+ # type: (str) -> List[str]
+ header = ['.. rubric:: Usage:', '']
+ block = ['.. code-block:: python', '']
lines = self._consume_usage_section()
lines = self._indent(lines, 3)
return header + block + lines + ['']
def _parse_generic_section(self, section, use_admonition):
- # type: (unicode, bool) -> List[unicode]
+ # type: (str, bool) -> List[str]
lines = self._strip_empty(self._consume_to_next_section())
lines = self._dedent(lines)
if use_admonition:
- header = '.. admonition:: %s' % section # type: unicode
+ header = '.. admonition:: %s' % section
lines = self._indent(lines, 3)
else:
header = '.. rubric:: %s' % section
@@ -657,7 +658,7 @@ class GoogleDocstring(UnicodeMixin):
return [header, '']
def _parse_keyword_arguments_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_fields()
if self._config.napoleon_use_keyword:
return self._format_docutils_params(
@@ -668,26 +669,26 @@ class GoogleDocstring(UnicodeMixin):
return self._format_fields(_('Keyword Arguments'), fields)
def _parse_methods_section(self, section):
- # type: (unicode) -> List[unicode]
- lines = [] # type: List[unicode]
+ # type: (str) -> List[str]
+ lines = [] # type: List[str]
for _name, _type, _desc in self._consume_fields(parse_type=False):
lines.append('.. method:: %s' % _name)
if _desc:
- lines.extend([u''] + self._indent(_desc, 3))
+ lines.extend([''] + self._indent(_desc, 3))
lines.append('')
return lines
def _parse_notes_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
use_admonition = self._config.napoleon_use_admonition_for_notes
return self._parse_generic_section(_('Notes'), use_admonition)
def _parse_other_parameters_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
return self._format_fields(_('Other Parameters'), self._consume_fields())
def _parse_parameters_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_fields()
if self._config.napoleon_use_param:
return self._format_docutils_params(fields)
@@ -695,51 +696,28 @@ class GoogleDocstring(UnicodeMixin):
return self._format_fields(_('Parameters'), fields)
def _parse_raises_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_fields(parse_type=False, prefer_type=True)
- field_type = ':raises:'
- padding = ' ' * len(field_type)
- multi = len(fields) > 1
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
for _name, _type, _desc in fields:
+ m = self._name_rgx.match(_type).groupdict()
+ if m['role']:
+ _type = m['name']
+ _type = ' ' + _type if _type else ''
_desc = self._strip_empty(_desc)
- has_desc = any(_desc)
- separator = has_desc and ' -- ' or ''
- if _type:
- has_refs = '`' in _type or ':' in _type
- has_space = any(c in ' \t\n\v\f ' for c in _type)
-
- if not has_refs and not has_space:
- _type = ':exc:`%s`%s' % (_type, separator)
- elif has_desc and has_space:
- _type = '*%s*%s' % (_type, separator)
- else:
- _type = '%s%s' % (_type, separator)
-
- if has_desc:
- field = [_type + _desc[0]] + _desc[1:]
- else:
- field = [_type]
- else:
- field = _desc
- if multi:
- if lines:
- lines.extend(self._format_block(padding + ' * ', field))
- else:
- lines.extend(self._format_block(field_type + ' * ', field))
- else:
- lines.extend(self._format_block(field_type + ' ', field))
- if lines and lines[-1]:
+ _descs = ' ' + '\n '.join(_desc) if any(_desc) else ''
+ lines.append(':raises%s:%s' % (_type, _descs))
+ if lines:
lines.append('')
return lines
def _parse_references_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
use_admonition = self._config.napoleon_use_admonition_for_references
return self._parse_generic_section(_('References'), use_admonition)
def _parse_returns_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_returns_section()
multi = len(fields) > 1
if multi:
@@ -747,7 +725,7 @@ class GoogleDocstring(UnicodeMixin):
else:
use_rtype = self._config.napoleon_use_rtype
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
for _name, _type, _desc in fields:
if use_rtype:
field = self._format_field(_name, '', _desc)
@@ -768,23 +746,23 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_see_also_section(self, section):
- # type (unicode) -> List[unicode]
+ # type (str) -> List[str]
return self._parse_admonition('seealso', section)
def _parse_warns_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
return self._format_fields(_('Warns'), self._consume_fields())
def _parse_yields_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_returns_section()
return self._format_fields(_('Yields'), fields)
def _partition_field_on_colon(self, line):
- # type: (unicode) -> Tuple[unicode, unicode, unicode]
+ # type: (str) -> Tuple[str, str, str]
before_colon = []
after_colon = []
- colon = '' # type: unicode
+ colon = ''
found_colon = False
for i, source in enumerate(_xref_regex.split(line)):
if found_colon:
@@ -803,8 +781,20 @@ class GoogleDocstring(UnicodeMixin):
colon,
"".join(after_colon).strip())
+ def _qualify_name(self, attr_name, klass):
+ # type: (str, Type) -> str
+ if klass and '.' not in attr_name:
+ if attr_name.startswith('~'):
+ attr_name = attr_name[1:]
+ try:
+ q = klass.__qualname__
+ except AttributeError:
+ q = klass.__name__
+ return '~%s.%s' % (q, attr_name)
+ return attr_name
+
def _strip_empty(self, lines):
- # type: (List[unicode]) -> List[unicode]
+ # type: (List[str]) -> List[str]
if lines:
start = -1
for i, line in enumerate(lines):
@@ -919,13 +909,12 @@ class NumpyDocstring(GoogleDocstring):
"""
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
- # type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA
+ # type: (Union[str, List[str]], SphinxConfig, Sphinx, str, str, Any, Any) -> None
self._directive_sections = ['.. index::']
- super(NumpyDocstring, self).__init__(docstring, config, app, what,
- name, obj, options)
+ super().__init__(docstring, config, app, what, name, obj, options)
def _consume_field(self, parse_type=True, prefer_type=False):
- # type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
+ # type: (bool, bool) -> Tuple[str, str, List[str]]
line = next(self._line_iter)
if parse_type:
_name, _, _type = self._partition_field_on_colon(line)
@@ -942,11 +931,11 @@ class NumpyDocstring(GoogleDocstring):
return _name, _type, _desc
def _consume_returns_section(self):
- # type: () -> List[Tuple[unicode, unicode, List[unicode]]]
+ # type: () -> List[Tuple[str, str, List[str]]]
return self._consume_fields(prefer_type=True)
def _consume_section_header(self):
- # type: () -> unicode
+ # type: () -> str
section = next(self._line_iter)
if not _directive_regex.match(section):
# Consume the header underline
@@ -967,7 +956,7 @@ class NumpyDocstring(GoogleDocstring):
# type: () -> bool
section, underline = self._line_iter.peek(2)
section = section.lower()
- if section in self._sections and isinstance(underline, string_types):
+ if section in self._sections and isinstance(underline, str):
return bool(_numpy_section_regex.match(underline))
elif self._directive_sections:
if _directive_regex.match(section):
@@ -976,11 +965,8 @@ class NumpyDocstring(GoogleDocstring):
return True
return False
- _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
- r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
-
def _parse_see_also_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
lines = self._consume_to_next_section()
try:
return self._parse_numpydoc_see_also_section(lines)
@@ -988,7 +974,7 @@ class NumpyDocstring(GoogleDocstring):
return self._format_admonition('seealso', lines)
def _parse_numpydoc_see_also_section(self, content):
- # type: (List[unicode]) -> List[unicode]
+ # type: (List[str]) -> List[str]
"""
Derived from the NumpyDoc implementation of _parse_see_also.
@@ -1003,7 +989,7 @@ class NumpyDocstring(GoogleDocstring):
items = []
def parse_item_name(text):
- # type: (unicode) -> Tuple[unicode, unicode]
+ # type: (str) -> Tuple[str, str]
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
@@ -1015,7 +1001,7 @@ class NumpyDocstring(GoogleDocstring):
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
- # type: (unicode, List[unicode]) -> None
+ # type: (str, List[str]) -> None
if not name:
return
name, role = parse_item_name(name)
@@ -1023,7 +1009,7 @@ class NumpyDocstring(GoogleDocstring):
del rest[:]
current_func = None
- rest = [] # type: List[unicode]
+ rest = [] # type: List[str]
for line in content:
if not line.strip():
@@ -1069,12 +1055,12 @@ class NumpyDocstring(GoogleDocstring):
'const': 'const',
'attribute': 'attr',
'attr': 'attr'
- } # type: Dict[unicode, unicode]
+ }
if self._what is None:
- func_role = 'obj' # type: unicode
+ func_role = 'obj'
else:
func_role = roles.get(self._what, '')
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
last_had_desc = True
for func, desc, role in items:
if role:
diff --git a/sphinx/ext/napoleon/iterators.py b/sphinx/ext/napoleon/iterators.py
index 478def60b..d9de4fb80 100644
--- a/sphinx/ext/napoleon/iterators.py
+++ b/sphinx/ext/napoleon/iterators.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.napoleon.iterators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -18,7 +17,7 @@ if False:
from typing import Any, Iterable # NOQA
-class peek_iter(object):
+class peek_iter:
"""An iterator object that supports peeking ahead.
Parameters
@@ -232,7 +231,7 @@ class modify_iter(peek_iter):
if not callable(self.modifier):
raise TypeError('modify_iter(o, modifier): '
'modifier must be callable')
- super(modify_iter, self).__init__(*args)
+ super().__init__(*args)
def _fillcache(self, n):
# type: (int) -> None
diff --git a/sphinx/ext/todo.py b/sphinx/ext/todo.py
index 65e4408cf..492578f93 100644
--- a/sphinx/ext/todo.py
+++ b/sphinx/ext/todo.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.todo
~~~~~~~~~~~~~~~
@@ -12,6 +11,8 @@
:license: BSD, see LICENSE for details.
"""
+from typing import cast
+
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
@@ -26,9 +27,11 @@ from sphinx.util.texescape import tex_escape_map
if False:
# For type annotation
- from typing import Any, Dict, Iterable, List # NOQA
+ from typing import Any, Dict, Iterable, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
+ from sphinx.writers.latex import LaTeXTranslator # NOQA
logger = logging.getLogger(__name__)
@@ -60,22 +63,24 @@ class Todo(BaseAdmonition, SphinxDirective):
if not self.options.get('class'):
self.options['class'] = ['admonition-todo']
- (todo,) = super(Todo, self).run()
+ (todo,) = super().run() # type: Tuple[nodes.Node]
if isinstance(todo, nodes.system_message):
return [todo]
-
- todo.insert(0, nodes.title(text=_('Todo')))
- set_source_info(self, todo)
-
- targetid = 'index-%s' % self.env.new_serialno('index')
- # Stash the target to be retrieved later in latex_visit_todo_node.
- todo['targetref'] = '%s:%s' % (self.env.docname, targetid)
- targetnode = nodes.target('', '', ids=[targetid])
- return [targetnode, todo]
+ elif isinstance(todo, todo_node):
+ todo.insert(0, nodes.title(text=_('Todo')))
+ set_source_info(self, todo)
+
+ targetid = 'index-%s' % self.env.new_serialno('index')
+ # Stash the target to be retrieved later in latex_visit_todo_node.
+ todo['targetref'] = '%s:%s' % (self.env.docname, targetid)
+ targetnode = nodes.target('', '', ids=[targetid])
+ return [targetnode, todo]
+ else:
+ raise RuntimeError # never reached here
def process_todos(app, doctree):
- # type: (Sphinx, nodes.Node) -> None
+ # type: (Sphinx, nodes.document) -> None
# collect all todos in the environment
# this is not done in the directive itself because it some transformations
# must have already been run, e.g. substitutions
@@ -102,7 +107,8 @@ def process_todos(app, doctree):
})
if env.config.todo_emit_warnings:
- logger.warning(__("TODO entry found: %s"), node[1].astext(),
+ label = cast(nodes.Element, node[1])
+ logger.warning(__("TODO entry found: %s"), label.astext(),
location=node)
@@ -118,14 +124,15 @@ class TodoList(SphinxDirective):
option_spec = {} # type: Dict
def run(self):
- # type: () -> List[todolist]
+ # type: () -> List[nodes.Node]
# Simply insert an empty todolist node which will be replaced later
# when process_todo_nodes is called
return [todolist('')]
def process_todo_nodes(app, doctree, fromdocname):
- # type: (Sphinx, nodes.Node, unicode) -> None
+ # type: (Sphinx, nodes.document, str) -> None
+ node = None # type: nodes.Element
if not app.config['todo_include_todos']:
for node in doctree.traverse(todo_node):
node.parent.remove(node)
@@ -139,7 +146,7 @@ def process_todo_nodes(app, doctree, fromdocname):
for node in doctree.traverse(todolist):
if node.get('ids'):
- content = [nodes.target()]
+ content = [nodes.target()] # type: List[nodes.Element]
else:
content = []
@@ -195,7 +202,7 @@ def process_todo_nodes(app, doctree, fromdocname):
def purge_todos(app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
if not hasattr(env, 'todo_all_todos'):
return
env.todo_all_todos = [todo for todo in env.todo_all_todos # type: ignore
@@ -203,7 +210,7 @@ def purge_todos(app, env, docname):
def merge_info(app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Iterable[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Iterable[str], BuildEnvironment) -> None
if not hasattr(other, 'todo_all_todos'):
return
if not hasattr(env, 'todo_all_todos'):
@@ -212,35 +219,36 @@ def merge_info(app, env, docnames, other):
def visit_todo_node(self, node):
- # type: (nodes.NodeVisitor, todo_node) -> None
+ # type: (HTMLTranslator, todo_node) -> None
self.visit_admonition(node)
- # self.visit_admonition(node, 'todo')
def depart_todo_node(self, node):
- # type: (nodes.NodeVisitor, todo_node) -> None
+ # type: (HTMLTranslator, todo_node) -> None
self.depart_admonition(node)
def latex_visit_todo_node(self, node):
- # type: (nodes.NodeVisitor, todo_node) -> None
- title = node.pop(0).astext().translate(tex_escape_map)
- self.body.append(u'\n\\begin{sphinxadmonition}{note}{')
+ # type: (LaTeXTranslator, todo_node) -> None
+ self.body.append('\n\\begin{sphinxadmonition}{note}{')
# If this is the original todo node, emit a label that will be referenced by
# a hyperref in the todolist.
target = node.get('targetref')
if target is not None:
- self.body.append(u'\\label{%s}' % target)
- self.body.append('%s:}' % title)
+ self.body.append('\\label{%s}' % target)
+
+ title_node = cast(nodes.title, node[0])
+ self.body.append('%s:}' % title_node.astext().translate(tex_escape_map))
+ node.pop(0)
def latex_depart_todo_node(self, node):
- # type: (nodes.NodeVisitor, todo_node) -> None
+ # type: (LaTeXTranslator, todo_node) -> None
self.body.append('\\end{sphinxadmonition}\n')
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_event('todo-defined')
app.add_config_value('todo_include_todos', False, 'html')
app.add_config_value('todo_link_only', False, 'html')
diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py
index 3ba5deb01..2997dc216 100644
--- a/sphinx/ext/viewcode.py
+++ b/sphinx/ext/viewcode.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.viewcode
~~~~~~~~~~~~~~~~~~~
@@ -13,12 +12,11 @@ import traceback
import warnings
from docutils import nodes
-from six import iteritems, text_type
import sphinx
from sphinx import addnodes
from sphinx.deprecation import RemovedInSphinx30Warning
-from sphinx.locale import _
+from sphinx.locale import _, __
from sphinx.pycode import ModuleAnalyzer
from sphinx.util import get_full_modname, logging, status_iterator
from sphinx.util.nodes import make_refnode
@@ -34,7 +32,7 @@ logger = logging.getLogger(__name__)
def _get_full_modname(app, modname, attribute):
- # type: (Sphinx, str, unicode) -> unicode
+ # type: (Sphinx, str, str) -> str
try:
return get_full_modname(modname, attribute)
except AttributeError:
@@ -75,12 +73,8 @@ def doctree_read(app, doctree):
env._viewcode_modules[modname] = False # type: ignore
return
- if not isinstance(analyzer.code, text_type):
- code = analyzer.code.decode(analyzer.encoding)
- else:
- code = analyzer.code
-
analyzer.find_tags()
+ code = analyzer.code
tags = analyzer.tags
else:
code, tags = code_tags
@@ -96,7 +90,7 @@ def doctree_read(app, doctree):
for objnode in doctree.traverse(addnodes.desc):
if objnode.get('domain') != 'py':
continue
- names = set() # type: Set[unicode]
+ names = set() # type: Set[str]
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
@@ -120,18 +114,16 @@ def doctree_read(app, doctree):
continue
names.add(fullname)
pagename = '_modules/' + modname.replace('.', '/')
+ inline = nodes.inline('', _('[source]'), classes=['viewcode-link'])
onlynode = addnodes.only(expr='html')
- onlynode += addnodes.pending_xref(
- '', reftype='viewcode', refdomain='std', refexplicit=False,
- reftarget=pagename, refid=fullname,
- refdoc=env.docname)
- onlynode[0] += nodes.inline('', _('[source]'),
- classes=['viewcode-link'])
+ onlynode += addnodes.pending_xref('', inline, reftype='viewcode', refdomain='std',
+ refexplicit=False, reftarget=pagename,
+ refid=fullname, refdoc=env.docname)
signode += onlynode
def env_merge_info(app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Iterable[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Iterable[str], BuildEnvironment) -> None
if not hasattr(other, '_viewcode_modules'):
return
# create a _viewcode_modules dict on the main environment
@@ -142,15 +134,17 @@ def env_merge_info(app, env, docnames, other):
def missing_reference(app, env, node, contnode):
- # type: (Sphinx, BuildEnvironment, nodes.Node, nodes.Node) -> nodes.Node
+ # type: (Sphinx, BuildEnvironment, nodes.Element, nodes.Node) -> nodes.Node
# resolve our "viewcode" reference nodes -- they need special treatment
if node['reftype'] == 'viewcode':
return make_refnode(app.builder, node['refdoc'], node['reftarget'],
node['refid'], contnode)
+ return None
+
def collect_pages(app):
- # type: (Sphinx) -> Iterator[Tuple[unicode, Dict[unicode, Any], unicode]]
+ # type: (Sphinx) -> Iterator[Tuple[str, Dict[str, Any], str]]
env = app.builder.env
if not hasattr(env, '_viewcode_modules'):
return
@@ -159,12 +153,9 @@ def collect_pages(app):
modnames = set(env._viewcode_modules) # type: ignore
-# app.builder.info(' (%d module code pages)' %
-# len(env._viewcode_modules), nonl=1)
-
for modname, entry in status_iterator(
- sorted(iteritems(env._viewcode_modules)), # type: ignore
- 'highlighting module code... ', "blue",
+ sorted(env._viewcode_modules.items()), # type: ignore
+ __('highlighting module code... '), "blue",
len(env._viewcode_modules), # type: ignore
app.verbosity, lambda x: x[0]):
if not entry:
@@ -188,7 +179,7 @@ def collect_pages(app):
# the collected tags (HACK: this only works if the tag boundaries are
# properly nested!)
maxindex = len(lines) - 1
- for name, docname in iteritems(used):
+ for name, docname in used.items():
type, start, end = tags[name]
backlink = urito(pagename, docname) + '#' + refname + '.' + name
lines[start] = (
@@ -215,7 +206,7 @@ def collect_pages(app):
'title': modname,
'body': (_('<h1>Source code for %s</h1>') % modname +
'\n'.join(lines)),
- } # type: Dict[unicode, Any]
+ }
yield (pagename, context, 'page.html')
if not modnames:
@@ -256,7 +247,7 @@ def migrate_viewcode_import(app, config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_config_value('viewcode_import', None, False)
app.add_config_value('viewcode_enable_epub', False, False)
app.add_config_value('viewcode_follow_imported_members', True, False)