summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakeshi KOMIYA <i.tkomiya@gmail.com>2018-12-16 00:32:10 +0900
committerGitHub <noreply@github.com>2018-12-16 00:32:10 +0900
commitc70e65fc6cd04d02df4f7911025f534dbd27cc20 (patch)
tree1e8614ac5516dace99ef1df4d203081662c7c2d6
parentd6d4406ce987cc8823d1b3a33be3a418bcd2a59d (diff)
parent79eec90f36f5a74e24cfd6740126396fd6567e07 (diff)
downloadsphinx-git-c70e65fc6cd04d02df4f7911025f534dbd27cc20.tar.gz
Merge branch 'master' into 5770_doctest_refers_highlight_language
-rw-r--r--AUTHORS1
-rw-r--r--CHANGES11
-rw-r--r--doc/extdev/index.rst10
-rw-r--r--doc/usage/configuration.rst2
-rw-r--r--sphinx/__init__.py1
-rw-r--r--sphinx/addnodes.py32
-rw-r--r--sphinx/application.py95
-rw-r--r--sphinx/builders/__init__.py67
-rw-r--r--sphinx/builders/_epub_base.py59
-rw-r--r--sphinx/builders/applehelp.py4
-rw-r--r--sphinx/builders/changes.py17
-rw-r--r--sphinx/builders/devhelp.py7
-rw-r--r--sphinx/builders/dummy.py11
-rw-r--r--sphinx/builders/epub3.py11
-rw-r--r--sphinx/builders/gettext.py32
-rw-r--r--sphinx/builders/html.py142
-rw-r--r--sphinx/builders/htmlhelp.py18
-rw-r--r--sphinx/builders/latex/__init__.py31
-rw-r--r--sphinx/builders/latex/transforms.py9
-rw-r--r--sphinx/builders/linkcheck.py29
-rw-r--r--sphinx/builders/manpage.py13
-rw-r--r--sphinx/builders/qthelp.py21
-rw-r--r--sphinx/builders/texinfo.py21
-rw-r--r--sphinx/builders/text.py15
-rw-r--r--sphinx/builders/websupport.py3
-rw-r--r--sphinx/builders/xml.py11
-rw-r--r--sphinx/cmd/build.py13
-rw-r--r--sphinx/cmd/make_mode.py10
-rw-r--r--sphinx/cmd/quickstart.py36
-rw-r--r--sphinx/cmdline.py5
-rw-r--r--sphinx/config.py41
-rw-r--r--sphinx/deprecation.py10
-rw-r--r--sphinx/directives/__init__.py16
-rw-r--r--sphinx/directives/code.py31
-rw-r--r--sphinx/directives/other.py5
-rw-r--r--sphinx/directives/patches.py1
-rw-r--r--sphinx/domains/__init__.py62
-rw-r--r--sphinx/domains/c.py27
-rw-r--r--sphinx/domains/changeset.py13
-rw-r--r--sphinx/domains/cpp.py678
-rw-r--r--sphinx/domains/javascript.py29
-rw-r--r--sphinx/domains/math.py19
-rw-r--r--sphinx/domains/python.py76
-rw-r--r--sphinx/domains/rst.py27
-rw-r--r--sphinx/domains/std.py90
-rw-r--r--sphinx/environment/__init__.py123
-rw-r--r--sphinx/environment/adapters/asset.py3
-rw-r--r--sphinx/environment/adapters/indexentries.py15
-rw-r--r--sphinx/environment/adapters/toctree.py15
-rw-r--r--sphinx/environment/collectors/__init__.py11
-rw-r--r--sphinx/environment/collectors/asset.py15
-rw-r--r--sphinx/environment/collectors/dependencies.py5
-rw-r--r--sphinx/environment/collectors/indexentries.py5
-rw-r--r--sphinx/environment/collectors/metadata.py5
-rw-r--r--sphinx/environment/collectors/title.py5
-rw-r--r--sphinx/environment/collectors/toctree.py31
-rw-r--r--sphinx/errors.py3
-rw-r--r--sphinx/events.py14
-rw-r--r--sphinx/ext/apidoc.py29
-rw-r--r--sphinx/ext/autodoc/__init__.py122
-rw-r--r--sphinx/ext/autodoc/directive.py5
-rw-r--r--sphinx/ext/autodoc/importer.py5
-rw-r--r--sphinx/ext/autosectionlabel.py3
-rw-r--r--sphinx/ext/autosummary/__init__.py39
-rw-r--r--sphinx/ext/autosummary/generate.py32
-rw-r--r--sphinx/ext/coverage.py25
-rw-r--r--sphinx/ext/doctest.py39
-rw-r--r--sphinx/ext/extlinks.py7
-rw-r--r--sphinx/ext/githubpages.py3
-rw-r--r--sphinx/ext/graphviz.py23
-rw-r--r--sphinx/ext/ifconfig.py5
-rw-r--r--sphinx/ext/imgconverter.py5
-rw-r--r--sphinx/ext/imgmath.py21
-rw-r--r--sphinx/ext/inheritance_diagram.py29
-rw-r--r--sphinx/ext/intersphinx.py29
-rw-r--r--sphinx/ext/jsmath.py3
-rw-r--r--sphinx/ext/linkcode.py5
-rw-r--r--sphinx/ext/mathbase.py5
-rw-r--r--sphinx/ext/mathjax.py3
-rw-r--r--sphinx/ext/napoleon/__init__.py7
-rw-r--r--sphinx/ext/napoleon/docstring.py155
-rw-r--r--sphinx/ext/todo.py9
-rw-r--r--sphinx/ext/viewcode.py13
-rw-r--r--sphinx/extension.py5
-rw-r--r--sphinx/highlighting.py13
-rw-r--r--sphinx/io.py13
-rw-r--r--sphinx/jinja2glue.py21
-rw-r--r--sphinx/locale/__init__.py56
-rw-r--r--sphinx/parsers.py3
-rw-r--r--sphinx/project.py13
-rw-r--r--sphinx/pycode/__init__.py26
-rw-r--r--sphinx/pycode/parser.py51
-rw-r--r--sphinx/registry.py93
-rw-r--r--sphinx/roles.py30
-rw-r--r--sphinx/search/__init__.py73
-rw-r--r--sphinx/search/da.py3
-rw-r--r--sphinx/search/de.py3
-rw-r--r--sphinx/search/en.py3
-rw-r--r--sphinx/search/es.py3
-rw-r--r--sphinx/search/fi.py3
-rw-r--r--sphinx/search/fr.py3
-rw-r--r--sphinx/search/hu.py3
-rw-r--r--sphinx/search/it.py3
-rw-r--r--sphinx/search/ja.py19
-rw-r--r--sphinx/search/nl.py3
-rw-r--r--sphinx/search/no.py3
-rw-r--r--sphinx/search/pt.py3
-rw-r--r--sphinx/search/ro.py5
-rw-r--r--sphinx/search/ru.py3
-rw-r--r--sphinx/search/sv.py3
-rw-r--r--sphinx/search/tr.py5
-rw-r--r--sphinx/search/zh.py9
-rw-r--r--sphinx/setup_command.py17
-rw-r--r--sphinx/testing/fixtures.py3
-rw-r--r--sphinx/testing/path.py15
-rw-r--r--sphinx/testing/util.py19
-rw-r--r--sphinx/theming.py25
-rw-r--r--sphinx/transforms/__init__.py3
-rw-r--r--sphinx/transforms/i18n.py15
-rw-r--r--sphinx/transforms/post_transforms/__init__.py9
-rw-r--r--sphinx/transforms/post_transforms/code.py5
-rw-r--r--sphinx/transforms/post_transforms/compat.py3
-rw-r--r--sphinx/transforms/post_transforms/images.py15
-rw-r--r--sphinx/util/__init__.py88
-rw-r--r--sphinx/util/compat.py3
-rw-r--r--sphinx/util/console.py5
-rw-r--r--sphinx/util/docfields.py44
-rw-r--r--sphinx/util/docstrings.py5
-rw-r--r--sphinx/util/docutils.py34
-rw-r--r--sphinx/util/fileutil.py5
-rw-r--r--sphinx/util/i18n.py25
-rw-r--r--sphinx/util/images.py15
-rw-r--r--sphinx/util/inspect.py32
-rw-r--r--sphinx/util/inventory.py17
-rw-r--r--sphinx/util/jsdump.py7
-rw-r--r--sphinx/util/jsonimpl.py7
-rw-r--r--sphinx/util/logging.py29
-rw-r--r--sphinx/util/matching.py19
-rw-r--r--sphinx/util/math.py7
-rw-r--r--sphinx/util/nodes.py23
-rw-r--r--sphinx/util/osutil.py44
-rw-r--r--sphinx/util/parallel.py3
-rw-r--r--sphinx/util/png.py8
-rw-r--r--sphinx/util/pycompat.py7
-rw-r--r--sphinx/util/requests.py21
-rw-r--r--sphinx/util/rst.py9
-rw-r--r--sphinx/util/smartypants.py5
-rw-r--r--sphinx/util/stemmer/__init__.py10
-rw-r--r--sphinx/util/stemmer/porter.py15
-rw-r--r--sphinx/util/tags.py13
-rw-r--r--sphinx/util/template.py13
-rw-r--r--sphinx/util/texescape.py5
-rw-r--r--sphinx/util/typing.py9
-rw-r--r--sphinx/versioning.py9
-rw-r--r--sphinx/writers/html.py21
-rw-r--r--sphinx/writers/html5.py21
-rw-r--r--sphinx/writers/latex.py124
-rw-r--r--sphinx/writers/manpage.py5
-rw-r--r--sphinx/writers/texinfo.py80
-rw-r--r--sphinx/writers/text.py46
-rw-r--r--sphinx/writers/xml.py5
-rw-r--r--tests/roots/test-ext-autodoc/target/__init__.py5
-rw-r--r--tests/roots/test-root/autodoc_target.py5
-rw-r--r--tests/test_autodoc.py14
-rw-r--r--tests/test_build_gettext.py1
-rw-r--r--tests/test_build_latex.py1
-rw-r--r--tests/test_build_linkcheck.py1
-rw-r--r--tests/test_build_manpage.py1
-rw-r--r--tests/test_build_texinfo.py1
-rw-r--r--tests/test_ext_apidoc.py2
-rw-r--r--tests/test_ext_autosummary.py3
-rw-r--r--tests/test_intl.py1
-rw-r--r--tests/test_io.py3
-rw-r--r--tests/test_quickstart.py3
-rw-r--r--tests/test_util.py7
-rw-r--r--tests/test_util_i18n.py1
-rw-r--r--tests/test_util_images.py1
-rw-r--r--tests/test_util_logging.py1
-rw-r--r--tests/test_writer_latex.py1
179 files changed, 2014 insertions, 2212 deletions
diff --git a/AUTHORS b/AUTHORS
index d773b8f17..aef4410be 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -81,6 +81,7 @@ Other contributors, listed alphabetically, are:
* Hong Xu -- svg support in imgmath extension and various bug fixes
* Stephen Finucane -- setup command improvements and documentation
* Daniel Pizetta -- inheritance diagram improvements
+* KINEBUCHI Tomohiko -- typing Sphinx as well as docutils
Many thanks for all contributions!
diff --git a/CHANGES b/CHANGES
index 664b6e38f..851ac61ad 100644
--- a/CHANGES
+++ b/CHANGES
@@ -26,11 +26,14 @@ Incompatible changes
* LaTeX: Move message resources to ``sphinxmessage.sty``
* LaTeX: Stop using ``\captions<lang>`` macro for some labels
* LaTeX: for ``'xelatex'`` and ``'lualatex'``, use the ``FreeFont`` OpenType
- fonts as default choice (refs #5645)
+ fonts as default choice (refs: #5645)
* LaTeX: Greek letters in text are not escaped to math mode mark-up, and they
will use the text font not the math font. The ``LGR`` font encoding must be
added to the ``'fontenc'`` key of :confval:`latex_elements` for this to work
(only if it is needed by the document, of course).
+* LaTeX: setting the :confval:`language` to ``'en'`` triggered ``Sonny`` option
+ of ``fncychap``, now it is ``Bjarne`` to match case of no language specified.
+ (refs: #5772)
* #5770: doctest: Follow :confval:`highlight_language` on highlighting doctest
block. As a result, they are highlighted as python3 by default.
@@ -48,6 +51,7 @@ Deprecated
is_meta_keywords()``
* The ``suffix`` argument of ``env.doc2path()`` is deprecated.
* The string style ``base`` argument of ``env.doc2path()`` is deprecated.
+* ``sphinx.addnodes.abbreviation``
* ``sphinx.application.Sphinx._setting_up_extension``
* ``sphinx.config.check_unicode()``
* ``sphinx.ext.autodoc.importer._MockImporter``
@@ -60,7 +64,8 @@ Deprecated
* ``sphinx.testing.util.remove_unicode_literal()``
* ``sphinx.util.attrdict``
* ``sphinx.util.force_decode()``
-* ``sphinx.util.get_matching_docs()`` is deprecated
+* ``sphinx.util.get_matching_docs()``
+* ``sphinx.util.inspect.Parameter``
* ``sphinx.util.osutil.walk()``
* ``sphinx.util.PeekableIterator``
* ``sphinx.util.pycompat.u``
@@ -103,6 +108,8 @@ Bugs fixed
language and ``'xelatex'`` or ``'lualatex'`` as :confval:`latex_engine`
(refs: #5251)
* #5248: LaTeX: Greek letters in section titles disappear from PDF bookmarks
+* #5772: LaTeX: should the Bjarne style of fncychap be used for English also
+ if passed as language option?
Testing
--------
diff --git a/doc/extdev/index.rst b/doc/extdev/index.rst
index f15f92790..003666217 100644
--- a/doc/extdev/index.rst
+++ b/doc/extdev/index.rst
@@ -147,6 +147,11 @@ The following is a list of deprecated interfaces.
- 4.0
- ``os.path.join()``
+ * - ``sphinx.addnodes.abbreviation``
+ - 2.0
+ - 4.0
+ - ``docutils.nodes.abbreviation``
+
* - ``sphinx.config.check_unicode()``
- 2.0
- 4.0
@@ -197,6 +202,11 @@ The following is a list of deprecated interfaces.
- 4.0
- ``sphinx.util.get_matching_files()``
+ * - ``sphinx.util.inspect.Parameter``
+ - 2.0
+ - 3.0
+ - N/A
+
* - ``sphinx.util.osutil.walk()``
- 2.0
- 4.0
diff --git a/doc/usage/configuration.rst b/doc/usage/configuration.rst
index e175c6f44..b20219fcd 100644
--- a/doc/usage/configuration.rst
+++ b/doc/usage/configuration.rst
@@ -2094,6 +2094,8 @@ information.
"fncychap" styles you can try are "Lenny", "Glenn", "Conny", "Rejne" and
"Bjornstrup". You can also set this to ``''`` to disable fncychap.
+ The default is ``''`` for Japanese documents.
+
``'preamble'``
Additional preamble content, default empty. See :doc:`/latex`.
diff --git a/sphinx/__init__.py b/sphinx/__init__.py
index eb05bcd83..ac5a91a97 100644
--- a/sphinx/__init__.py
+++ b/sphinx/__init__.py
@@ -22,7 +22,6 @@ from .deprecation import RemovedInNextVersionWarning
if False:
# For type annotation
- # note: Don't use typing.TYPE_CHECK here (for py27 and py34).
from typing import Any # NOQA
diff --git a/sphinx/addnodes.py b/sphinx/addnodes.py
index d907f617a..7c8f2c9d0 100644
--- a/sphinx/addnodes.py
+++ b/sphinx/addnodes.py
@@ -13,13 +13,12 @@ import warnings
from docutils import nodes
-from sphinx.deprecation import RemovedInSphinx30Warning
+from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
if False:
# For type annotation
from typing import Any, Dict, List, Sequence # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
class translatable(nodes.Node):
@@ -42,12 +41,12 @@ class translatable(nodes.Node):
raise NotImplementedError
def apply_translated_message(self, original_message, translated_message):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Apply translated message."""
raise NotImplementedError
def extract_original_messages(self):
- # type: () -> Sequence[unicode]
+ # type: () -> Sequence[str]
"""Extract translation messages.
:returns: list of extracted messages or messages generator
@@ -69,12 +68,12 @@ class toctree(nodes.General, nodes.Element, translatable):
self['rawcaption'] = self['caption']
def apply_translated_message(self, original_message, translated_message):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
if self.get('rawcaption') == original_message:
self['caption'] = translated_message
def extract_original_messages(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
if 'rawcaption' in self:
return [self['rawcaption']]
else:
@@ -128,7 +127,7 @@ class desc_type(nodes.Part, nodes.Inline, nodes.FixedTextElement):
class desc_returns(desc_type):
"""Node for a "returns" annotation (a la -> in Python)."""
def astext(self):
- # type: () -> unicode
+ # type: () -> str
return ' -> ' + super(desc_returns, self).astext()
@@ -150,7 +149,7 @@ class desc_optional(nodes.Part, nodes.Inline, nodes.FixedTextElement):
child_text_separator = ', '
def astext(self):
- # type: () -> unicode
+ # type: () -> str
return '[' + super(desc_optional, self).astext() + ']'
@@ -344,8 +343,18 @@ class literal_strong(nodes.strong, not_smartquotable):
"""
-class abbreviation(nodes.Inline, nodes.TextElement):
- """Node for abbreviations with explanations."""
+class abbreviation(nodes.abbreviation):
+ """Node for abbreviations with explanations.
+
+ .. deprecated:: 2.0
+ """
+
+ def __init__(self, rawsource='', text='', *children, **attributes):
+ # type: (str, str, *nodes.Node, **Any) -> None
+ warnings.warn("abbrevition node for Sphinx was replaced by docutils'.",
+ RemovedInSphinx40Warning, stacklevel=2)
+
+ super(abbreviation, self).__init__(rawsource, text, *children, **attributes)
class manpage(nodes.Inline, nodes.FixedTextElement):
@@ -353,7 +362,7 @@ class manpage(nodes.Inline, nodes.FixedTextElement):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_node(toctree)
app.add_node(desc)
app.add_node(desc_signature)
@@ -389,7 +398,6 @@ def setup(app):
app.add_node(download_reference)
app.add_node(literal_emphasis)
app.add_node(literal_strong)
- app.add_node(abbreviation, override=True)
app.add_node(manpage)
return {
diff --git a/sphinx/application.py b/sphinx/application.py
index 984b2b6af..5f06bfc70 100644
--- a/sphinx/application.py
+++ b/sphinx/application.py
@@ -10,7 +10,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
import pickle
@@ -60,7 +59,7 @@ if False:
from sphinx.extension import Extension # NOQA
from sphinx.roles import XRefRole # NOQA
from sphinx.theming import Theme # NOQA
- from sphinx.util.typing import RoleFunction, TitleGetter, unicode # NOQA
+ from sphinx.util.typing import RoleFunction, TitleGetter # NOQA
builtin_extensions = (
'sphinx.addnodes',
@@ -113,7 +112,7 @@ builtin_extensions = (
# Strictly, alabaster theme is not a builtin extension,
# but it is loaded automatically to use it as default theme.
'alabaster',
-) # type: Tuple[unicode, ...]
+)
ENV_PICKLE_FILENAME = 'environment.pickle'
@@ -133,20 +132,20 @@ class Sphinx:
confoverrides=None, status=sys.stdout, warning=sys.stderr,
freshenv=False, warningiserror=False, tags=None, verbosity=0,
parallel=0, keep_going=False):
- # type: (unicode, unicode, unicode, unicode, unicode, Dict, IO, IO, bool, bool, List[unicode], int, int, bool) -> None # NOQA
+ # type: (str, str, str, str, str, Dict, IO, IO, bool, bool, List[str], int, int, bool) -> None # NOQA
self.phase = BuildPhase.INITIALIZATION
self.verbosity = verbosity
- self.extensions = {} # type: Dict[unicode, Extension]
+ self.extensions = {} # type: Dict[str, Extension]
self.builder = None # type: Builder
self.env = None # type: BuildEnvironment
self.project = None # type: Project
self.registry = SphinxComponentRegistry()
- self.html_themes = {} # type: Dict[unicode, unicode]
+ self.html_themes = {} # type: Dict[str, str]
# validate provided directories
- self.srcdir = abspath(srcdir) # type: unicode
- self.outdir = abspath(outdir) # type: unicode
- self.doctreedir = abspath(doctreedir) # type: unicode
+ self.srcdir = abspath(srcdir)
+ self.outdir = abspath(outdir)
+ self.doctreedir = abspath(doctreedir)
self.confdir = confdir
if self.confdir: # confdir is optional
self.confdir = abspath(self.confdir)
@@ -306,11 +305,11 @@ class Sphinx:
self._init_env(freshenv=True)
def preload_builder(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
self.registry.preload_builder(self, name)
def create_builder(self, name):
- # type: (unicode) -> Builder
+ # type: (str) -> Builder
if name is None:
logger.info(__('No builder selected, using default: html'))
name = 'html'
@@ -326,7 +325,7 @@ class Sphinx:
# ---- main "build" method -------------------------------------------------
def build(self, force_all=False, filenames=None):
- # type: (bool, List[unicode]) -> None
+ # type: (bool, List[str]) -> None
self.phase = BuildPhase.READING
try:
if force_all:
@@ -371,7 +370,7 @@ class Sphinx:
# ---- general extensibility interface -------------------------------------
def setup_extension(self, extname):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Import and setup a Sphinx extension module.
Load the extension given by the module *name*. Use this if your
@@ -382,7 +381,7 @@ class Sphinx:
self.registry.load_extension(self, extname)
def require_sphinx(self, version):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Check the Sphinx version if requested.
Compare *version* (which must be a ``major.minor`` version string, e.g.
@@ -395,7 +394,7 @@ class Sphinx:
raise VersionRequirementError(version)
def import_object(self, objname, source=None):
- # type: (str, unicode) -> Any
+ # type: (str, str) -> Any
"""Import an object from a ``module.name`` string.
.. deprecated:: 1.8
@@ -408,7 +407,7 @@ class Sphinx:
# event interface
def connect(self, event, callback):
- # type: (unicode, Callable) -> int
+ # type: (str, Callable) -> int
"""Register *callback* to be called when *event* is emitted.
For details on available core events and the arguments of callback
@@ -428,7 +427,7 @@ class Sphinx:
self.events.disconnect(listener_id)
def emit(self, event, *args):
- # type: (unicode, Any) -> List
+ # type: (str, Any) -> List
"""Emit *event* and pass *arguments* to the callback functions.
Return the return values of all callbacks as a list. Do not emit core
@@ -443,7 +442,7 @@ class Sphinx:
return self.events.emit(event, self, *args)
def emit_firstresult(self, event, *args):
- # type: (unicode, Any) -> Any
+ # type: (str, Any) -> Any
"""Emit *event* and pass *arguments* to the callback functions.
Return the result of the first callback that doesn't return ``None``.
@@ -468,7 +467,7 @@ class Sphinx:
# TODO(stephenfin): Describe 'types' parameter
def add_config_value(self, name, default, rebuild, types=()):
- # type: (unicode, Any, Union[bool, unicode], Any) -> None
+ # type: (str, Any, Union[bool, str], Any) -> None
"""Register a configuration value.
This is necessary for Sphinx to recognize new values and set default
@@ -501,7 +500,7 @@ class Sphinx:
self.config.add(name, default, rebuild, types)
def add_event(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Register an event called *name*.
This is needed to be able to emit it.
@@ -510,7 +509,7 @@ class Sphinx:
self.events.add(name)
def set_translator(self, name, translator_class, override=False):
- # type: (unicode, Type[nodes.NodeVisitor], bool) -> None
+ # type: (str, Type[nodes.NodeVisitor], bool) -> None
"""Register or override a Docutils translator class.
This is used to register a custom output translator or to replace a
@@ -563,7 +562,7 @@ class Sphinx:
self.registry.add_translation_handlers(node, **kwds)
def add_enumerable_node(self, node, figtype, title_getter=None, override=False, **kwds):
- # type: (Type[nodes.Element], unicode, TitleGetter, bool, Any) -> None
+ # type: (Type[nodes.Element], str, TitleGetter, bool, Any) -> None
"""Register a Docutils node class as a numfig target.
Sphinx numbers the node automatically. And then the users can refer it
@@ -592,14 +591,14 @@ class Sphinx:
@property
def enumerable_nodes(self):
- # type: () -> Dict[Type[nodes.Node], Tuple[unicode, TitleGetter]]
+ # type: () -> Dict[Type[nodes.Node], Tuple[str, TitleGetter]]
warnings.warn('app.enumerable_nodes() is deprecated. '
'Use app.get_domain("std").enumerable_nodes instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.registry.enumerable_nodes
def add_directive(self, name, obj, content=None, arguments=None, override=False, **options): # NOQA
- # type: (unicode, Any, bool, Tuple[int, int, bool], bool, Any) -> None
+ # type: (str, Any, bool, Tuple[int, int, bool], bool, Any) -> None
"""Register a Docutils directive.
*name* must be the prospective directive name. There are two possible
@@ -663,7 +662,7 @@ class Sphinx:
docutils.register_directive(name, obj)
def add_role(self, name, role, override=False):
- # type: (unicode, Any, bool) -> None
+ # type: (str, Any, bool) -> None
"""Register a Docutils role.
*name* must be the role name that occurs in the source, *role* the role
@@ -681,7 +680,7 @@ class Sphinx:
docutils.register_role(name, role)
def add_generic_role(self, name, nodeclass, override=False):
- # type: (unicode, Any, bool) -> None
+ # type: (str, Any, bool) -> None
"""Register a generic Docutils role.
Register a Docutils role that does nothing but wrap its contents in the
@@ -732,7 +731,7 @@ class Sphinx:
def add_directive_to_domain(self, domain, name, obj, has_content=None, argument_spec=None,
override=False, **option_spec):
- # type: (unicode, unicode, Any, bool, Any, bool, Any) -> None
+ # type: (str, str, Any, bool, Any, bool, Any) -> None
"""Register a Docutils directive in a domain.
Like :meth:`add_directive`, but the directive is added to the domain
@@ -747,7 +746,7 @@ class Sphinx:
**option_spec)
def add_role_to_domain(self, domain, name, role, override=False):
- # type: (unicode, unicode, Union[RoleFunction, XRefRole], bool) -> None
+ # type: (str, str, Union[RoleFunction, XRefRole], bool) -> None
"""Register a Docutils role in a domain.
Like :meth:`add_role`, but the role is added to the domain named
@@ -760,7 +759,7 @@ class Sphinx:
self.registry.add_role_to_domain(domain, name, role, override=override)
def add_index_to_domain(self, domain, index, override=False):
- # type: (unicode, Type[Index], bool) -> None
+ # type: (str, Type[Index], bool) -> None
"""Register a custom index for a domain.
Add a custom *index* class to the domain named *domain*. *index* must
@@ -775,7 +774,7 @@ class Sphinx:
def add_object_type(self, directivename, rolename, indextemplate='',
parse_node=None, ref_nodeclass=None, objname='',
doc_field_types=[], override=False):
- # type: (unicode, unicode, unicode, Callable, Type[nodes.TextElement], unicode, List, bool) -> None # NOQA
+ # type: (str, str, str, Callable, Type[nodes.TextElement], str, List, bool) -> None
"""Register a new object type.
This method is a very convenient way to add a new :term:`object` type
@@ -841,7 +840,7 @@ class Sphinx:
def add_crossref_type(self, directivename, rolename, indextemplate='',
ref_nodeclass=None, objname='', override=False):
- # type: (unicode, unicode, unicode, Type[nodes.TextElement], unicode, bool) -> None
+ # type: (str, str, str, Type[nodes.TextElement], str, bool) -> None
"""Register a new crossref object type.
This method is very similar to :meth:`add_object_type` except that the
@@ -920,7 +919,7 @@ class Sphinx:
self.registry.add_post_transform(transform)
def add_javascript(self, filename, **kwargs):
- # type: (unicode, **unicode) -> None
+ # type: (str, **str) -> None
"""An alias of :meth:`add_js_file`."""
warnings.warn('The app.add_javascript() is deprecated. '
'Please use app.add_js_file() instead.',
@@ -928,7 +927,7 @@ class Sphinx:
self.add_js_file(filename, **kwargs)
def add_js_file(self, filename, **kwargs):
- # type: (unicode, **unicode) -> None
+ # type: (str, **str) -> None
"""Register a JavaScript file to include in the HTML output.
Add *filename* to the list of JavaScript files that the default HTML
@@ -955,7 +954,7 @@ class Sphinx:
self.builder.add_js_file(filename, **kwargs) # type: ignore
def add_css_file(self, filename, **kwargs):
- # type: (unicode, **unicode) -> None
+ # type: (str, **str) -> None
"""Register a stylesheet to include in the HTML output.
Add *filename* to the list of CSS files that the default HTML template
@@ -995,13 +994,13 @@ class Sphinx:
self.builder.add_css_file(filename, **kwargs) # type: ignore
def add_stylesheet(self, filename, alternate=False, title=None):
- # type: (unicode, bool, unicode) -> None
+ # type: (str, bool, str) -> None
"""An alias of :meth:`add_css_file`."""
warnings.warn('The app.add_stylesheet() is deprecated. '
'Please use app.add_css_file() instead.',
RemovedInSphinx40Warning, stacklevel=2)
- attributes = {} # type: Dict[unicode, unicode]
+ attributes = {} # type: Dict[str, str]
if alternate:
attributes['rel'] = 'alternate stylesheet'
else:
@@ -1013,7 +1012,7 @@ class Sphinx:
self.add_css_file(filename, **attributes)
def add_latex_package(self, packagename, options=None):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
r"""Register a package to include in the LaTeX source code.
Add *packagename* to the list of packages that LaTeX source code will
@@ -1032,7 +1031,7 @@ class Sphinx:
self.registry.add_latex_package(packagename, options)
def add_lexer(self, alias, lexer):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
"""Register a new lexer for source code.
Use *lexer*, which must be an instance of a Pygments lexer class, to
@@ -1066,7 +1065,7 @@ class Sphinx:
self.add_directive('auto' + cls.objtype, AutodocDirective)
def add_autodoc_attrgetter(self, typ, getter):
- # type: (Type, Callable[[Any, unicode, Any], Any]) -> None
+ # type: (Type, Callable[[Any, str, Any], Any]) -> None
"""Register a new ``getattr``-like function for the autodoc extension.
Add *getter*, which must be a function with an interface compatible to
@@ -1098,7 +1097,7 @@ class Sphinx:
languages[cls.lang] = cls
def add_source_suffix(self, suffix, filetype, override=False):
- # type: (unicode, unicode, bool) -> None
+ # type: (str, str, bool) -> None
"""Register a suffix of source files.
Same as :confval:`source_suffix`. The users can override this
@@ -1133,7 +1132,7 @@ class Sphinx:
collector().enable(self)
def add_html_theme(self, name, theme_path):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Register a HTML Theme.
The *name* is a name of theme, and *path* is a full path to the theme
@@ -1145,7 +1144,7 @@ class Sphinx:
self.html_themes[name] = theme_path
def add_html_math_renderer(self, name, inline_renderers=None, block_renderers=None):
- # type: (unicode, Tuple[Callable, Callable], Tuple[Callable, Callable]) -> None
+ # type: (str, Tuple[Callable, Callable], Tuple[Callable, Callable]) -> None
"""Register a math renderer for HTML.
The *name* is a name of the math renderer. Both *inline_renderers* and
@@ -1159,7 +1158,7 @@ class Sphinx:
self.registry.add_html_math_renderer(name, inline_renderers, block_renderers)
def add_message_catalog(self, catalog, locale_dir):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Register a message catalog.
The *catalog* is a name of catalog, and *locale_dir* is a base path
@@ -1173,7 +1172,7 @@ class Sphinx:
# ---- other methods -------------------------------------------------
def is_parallel_allowed(self, typ):
- # type: (unicode) -> bool
+ # type: (str) -> bool
"""Check parallel processing is allowed or not.
``typ`` is a type of processing; ``'read'`` or ``'write'``.
@@ -1206,7 +1205,7 @@ class Sphinx:
@property
def _setting_up_extension(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
warnings.warn('app._setting_up_extension is deprecated.',
RemovedInSphinx30Warning)
return ['?']
@@ -1219,7 +1218,7 @@ class TemplateBridge:
"""
def init(self, builder, theme=None, dirs=None):
- # type: (Builder, Theme, List[unicode]) -> None
+ # type: (Builder, Theme, List[str]) -> None
"""Called by the builder to initialize the template system.
*builder* is the builder object; you'll probably want to look at the
@@ -1239,14 +1238,14 @@ class TemplateBridge:
return 0
def render(self, template, context):
- # type: (unicode, Dict) -> None
+ # type: (str, Dict) -> None
"""Called by the builder to render a template given as a filename with
a specified context (a Python dictionary).
"""
raise NotImplementedError('must be implemented in subclasses')
def render_string(self, template, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
"""Called by the builder to render a template given as a string with a
specified context (a Python dictionary).
"""
diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py
index 94ec3b4ee..40944fcf1 100644
--- a/sphinx/builders/__init__.py
+++ b/sphinx/builders/__init__.py
@@ -47,7 +47,6 @@ if False:
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.i18n import CatalogInfo # NOQA
from sphinx.util.tags import Tags # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -59,19 +58,19 @@ class Builder:
"""
#: The builder's name, for the -b command line option.
- name = '' # type: unicode
+ name = ''
#: The builder's output format, or '' if no document output is produced.
- format = '' # type: unicode
+ format = ''
#: The message emitted upon successful build completion. This can be a
#: printf-style template string with the following keys: ``outdir``,
#: ``project``
- epilog = '' # type: unicode
+ epilog = ''
#: default translator class for the builder. This can be overrided by
#: :py:meth:`app.set_translator()`.
default_translator_class = None # type: Type[nodes.NodeVisitor]
# doctree versioning method
- versioning_method = 'none' # type: unicode
+ versioning_method = 'none'
versioning_compare = False
# allow parallel write_doc() calls
allow_parallel = False
@@ -80,7 +79,7 @@ class Builder:
#: The list of MIME types of image formats supported by the builder.
#: Image files are searched in the order in which they appear here.
- supported_image_types = [] # type: List[unicode]
+ supported_image_types = [] # type: List[str]
#: The builder supports remote images or not.
supported_remote_images = False
#: The builder supports data URIs or not.
@@ -104,11 +103,11 @@ class Builder:
self.tags.add("builder_%s" % self.name)
# images that need to be copied over (source -> dest)
- self.images = {} # type: Dict[unicode, unicode]
+ self.images = {} # type: Dict[str, str]
# basename of images directory
self.imagedir = ""
# relative path to image directory from current docname (used at writing docs)
- self.imgpath = "" # type: unicode
+ self.imgpath = ""
# these get set later
self.parallel_ok = False
@@ -154,7 +153,7 @@ class Builder:
self.templates = BuiltinTemplateLoader()
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Return the target URI for a document name.
*typ* can be used to qualify the link characteristic for individual
@@ -163,7 +162,7 @@ class Builder:
raise NotImplementedError
def get_relative_uri(self, from_, to, typ=None):
- # type: (unicode, unicode, unicode) -> unicode
+ # type: (str, str, str) -> str
"""Return a relative URI between two source filenames.
May raise environment.NoUri if there's no way to return a sensible URI.
@@ -172,7 +171,7 @@ class Builder:
self.get_target_uri(to, typ))
def get_outdated_docs(self):
- # type: () -> Union[unicode, Iterable[unicode]]
+ # type: () -> Union[str, Iterable[str]]
"""Return an iterable of output files that are outdated, or a string
describing what an update build will build.
@@ -183,7 +182,7 @@ class Builder:
raise NotImplementedError
def get_asset_paths(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""Return list of paths for assets (ex. templates, CSS, etc.)."""
return []
@@ -222,12 +221,12 @@ class Builder:
# compile po methods
def compile_catalogs(self, catalogs, message):
- # type: (Set[CatalogInfo], unicode) -> None
+ # type: (Set[CatalogInfo], str) -> None
if not self.config.gettext_auto_build:
return
def cat2relpath(cat):
- # type: (CatalogInfo) -> unicode
+ # type: (CatalogInfo) -> str
return relpath(cat.mo_path, self.env.srcdir).replace(path.sep, SEP)
logger.info(bold(__('building [mo]: ')) + message)
@@ -248,9 +247,9 @@ class Builder:
self.compile_catalogs(catalogs, message)
def compile_specific_catalogs(self, specified_files):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
def to_domain(fpath):
- # type: (unicode) -> unicode
+ # type: (str) -> str
docname = self.env.path2doc(path.abspath(fpath))
if docname:
return find_catalog(docname, self.config.gettext_compact)
@@ -286,13 +285,13 @@ class Builder:
self.build(None, summary=__('all source files'), method='all')
def build_specific(self, filenames):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
"""Only rebuild as much as needed for changes in the *filenames*."""
# bring the filenames to the canonical format, that is,
# relative to the source directory and without source_suffix.
dirlen = len(self.srcdir) + 1
to_write = []
- suffixes = None # type: Tuple[unicode]
+ suffixes = None # type: Tuple[str]
suffixes = tuple(self.config.source_suffix) # type: ignore
for filename in filenames:
filename = path.normpath(path.abspath(filename))
@@ -328,7 +327,7 @@ class Builder:
len(to_build))
def build(self, docnames, summary=None, method='update'):
- # type: (Iterable[unicode], unicode, unicode) -> None
+ # type: (Iterable[str], str, str) -> None
"""Main build method.
First updates the environment, and then calls :meth:`write`.
@@ -399,7 +398,7 @@ class Builder:
self.finish_tasks.join()
def read(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""(Re-)read all files new or changed since last update.
Store all environment docnames in the canonical format (ie using SEP as
@@ -462,7 +461,7 @@ class Builder:
return sorted(docnames)
def _read_serial(self, docnames):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
for docname in status_iterator(docnames, 'reading sources... ', "purple",
len(docnames), self.app.verbosity):
# remove all inventory entries for that file
@@ -471,14 +470,14 @@ class Builder:
self.read_doc(docname)
def _read_parallel(self, docnames, nproc):
- # type: (List[unicode], int) -> None
+ # type: (List[str], int) -> None
# clear all outdated docs at once
for docname in docnames:
self.app.emit('env-purge-doc', self.env, docname)
self.env.clear_doc(docname)
def read_process(docs):
- # type: (List[unicode]) -> bytes
+ # type: (List[str]) -> bytes
self.env.app = self.app
for docname in docs:
self.read_doc(docname)
@@ -486,7 +485,7 @@ class Builder:
return pickle.dumps(self.env, pickle.HIGHEST_PROTOCOL)
def merge(docs, otherenv):
- # type: (List[unicode], bytes) -> None
+ # type: (List[str], bytes) -> None
env = pickle.loads(otherenv)
self.env.merge_info_from(docs, env, self.app)
@@ -502,7 +501,7 @@ class Builder:
tasks.join()
def read_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Parse a file and add/update inventory entries for the doctree."""
self.env.prepare_settings(docname)
@@ -528,7 +527,7 @@ class Builder:
self.write_doctree(docname, doctree)
def write_doctree(self, docname, doctree):
- # type: (unicode, nodes.document) -> None
+ # type: (str, nodes.document) -> None
"""Write the doctree to a file."""
# make it picklable
doctree.reporter = None
@@ -543,7 +542,7 @@ class Builder:
pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
def write(self, build_docnames, updated_docnames, method='update'):
- # type: (Iterable[unicode], Sequence[unicode], unicode) -> None
+ # type: (Iterable[str], Sequence[str], str) -> None
if build_docnames is None or build_docnames == ['__all__']:
# build_all
build_docnames = self.env.found_docs
@@ -574,7 +573,7 @@ class Builder:
self._write_serial(sorted(docnames))
def _write_serial(self, docnames):
- # type: (Sequence[unicode]) -> None
+ # type: (Sequence[str]) -> None
with logging.pending_warnings():
for docname in status_iterator(docnames, __('writing output... '), "darkgreen",
len(docnames), self.app.verbosity):
@@ -585,9 +584,9 @@ class Builder:
self.write_doc(docname, doctree)
def _write_parallel(self, docnames, nproc):
- # type: (Sequence[unicode], int) -> None
+ # type: (Sequence[str], int) -> None
def write_process(docs):
- # type: (List[Tuple[unicode, nodes.document]]) -> None
+ # type: (List[Tuple[str, nodes.document]]) -> None
self.app.phase = BuildPhase.WRITING
for docname, doctree in docs:
self.write_doc(docname, doctree)
@@ -618,17 +617,17 @@ class Builder:
tasks.join()
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
"""A place where you can add logic before :meth:`write_doc` is run"""
raise NotImplementedError
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.document) -> None
+ # type: (str, nodes.document) -> None
"""Where you actually write something to the filesystem."""
raise NotImplementedError
def write_doc_serialized(self, docname, doctree):
- # type: (unicode, nodes.document) -> None
+ # type: (str, nodes.document) -> None
"""Handle parts of write_doc that must be called in the main process
if parallel build is active.
"""
@@ -651,7 +650,7 @@ class Builder:
pass
def get_builder_config(self, option, default):
- # type: (unicode, unicode) -> Any
+ # type: (str, str) -> Any
"""Return a builder specific option.
This method allows customization of common builder settings by
diff --git a/sphinx/builders/_epub_base.py b/sphinx/builders/_epub_base.py
index 80277431b..4e1794179 100644
--- a/sphinx/builders/_epub_base.py
+++ b/sphinx/builders/_epub_base.py
@@ -39,7 +39,6 @@ if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -80,7 +79,7 @@ MEDIA_TYPES = {
'.otf': 'application/x-font-otf',
'.ttf': 'application/x-font-ttf',
'.woff': 'application/font-woff',
-} # type: Dict[unicode, unicode]
+}
VECTOR_GRAPHICS_EXTENSIONS = ('.svg',)
@@ -97,7 +96,7 @@ NavPoint = namedtuple('NavPoint', ['navpoint', 'playorder', 'text', 'refuri', 'c
def sphinx_smarty_pants(t, language='en'):
- # type: (unicode, str) -> unicode
+ # type: (str, str) -> str
t = t.replace('&quot;', '"')
t = smartquotes.educateDashesOldSchool(t)
t = smartquotes.educateQuotes(t, language)
@@ -158,21 +157,21 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.link_suffix = '.xhtml'
self.playorder = 0
self.tocid = 0
- self.id_cache = {} # type: Dict[unicode, unicode]
+ self.id_cache = {} # type: Dict[str, str]
self.use_index = self.get_builder_config('use_index', 'epub')
- self.refnodes = [] # type: List[Dict[unicode, Any]]
+ self.refnodes = [] # type: List[Dict[str, Any]]
def create_build_info(self):
# type: () -> BuildInfo
return BuildInfo(self.config, self.tags, ['html', 'epub'])
def get_theme_config(self):
- # type: () -> Tuple[unicode, Dict]
+ # type: () -> Tuple[str, Dict]
return self.config.epub_theme, self.config.epub_theme_options
# generic support functions
def make_id(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
# id_cache is intentionally mutable
"""Return a unique id for name."""
id = self.id_cache.get(name)
@@ -182,7 +181,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return id
def esc(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Replace all characters not allowed in text an attribute values."""
# Like cgi.escape, but also replace apostrophe
name = name.replace('&', '&amp;')
@@ -193,7 +192,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return name
def get_refnodes(self, doctree, result):
- # type: (nodes.Node, List[Dict[unicode, Any]]) -> List[Dict[unicode, Any]]
+ # type: (nodes.Node, List[Dict[str, Any]]) -> List[Dict[str, Any]]
"""Collect section titles, their depth in the toc and the refuri."""
# XXX: is there a better way than checking the attribute
# toctree-l[1-8] on the parent node?
@@ -233,7 +232,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.toc_add_files(self.refnodes)
def toc_add_files(self, refnodes):
- # type: (List[Dict[unicode, Any]]) -> None
+ # type: (List[Dict[str, Any]]) -> None
"""Add the master_doc, pre and post files to a list of refnodes.
"""
refnodes.insert(0, {
@@ -256,7 +255,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
})
def fix_fragment(self, prefix, fragment):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Return a href/id attribute with colons replaced by hyphens."""
return prefix + fragment.replace(':', '-')
@@ -294,11 +293,11 @@ class EpubBuilder(StandaloneHTMLBuilder):
desc_signature.attributes['ids'] = newids
def add_visible_links(self, tree, show_urls='inline'):
- # type: (nodes.document, unicode) -> None
+ # type: (nodes.document, str) -> None
"""Add visible link targets for external links"""
def make_footnote_ref(doc, label):
- # type: (nodes.document, unicode) -> nodes.footnote_reference
+ # type: (nodes.document, str) -> nodes.footnote_reference
"""Create a footnote_reference node with children"""
footnote_ref = nodes.footnote_reference('[#]_')
footnote_ref.append(nodes.Text(label))
@@ -306,7 +305,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return footnote_ref
def make_footnote(doc, label, uri):
- # type: (nodes.document, unicode, unicode) -> nodes.footnote
+ # type: (nodes.document, str, str) -> nodes.footnote
"""Create a footnote node with children"""
footnote = nodes.footnote(uri)
para = nodes.paragraph()
@@ -366,7 +365,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
fn_idx += 1
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.document) -> None
+ # type: (str, nodes.document) -> None
"""Write one document file.
This method is overwritten in order to fix fragment identifiers
@@ -377,7 +376,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
super(EpubBuilder, self).write_doc(docname, doctree)
def fix_genindex(self, tree):
- # type: (List[Tuple[unicode, List[Tuple[unicode, Any]]]]) -> None
+ # type: (List[Tuple[str, List[Tuple[str, Any]]]]) -> None
"""Fix href attributes for genindex pages."""
# XXX: modifies tree inline
# Logic modeled from themes/basic/genindex.html
@@ -396,7 +395,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.fix_fragment(m.group(1), m.group(2)))
def is_vector_graphics(self, filename):
- # type: (unicode) -> bool
+ # type: (str) -> bool
"""Does the filename extension indicate a vector graphic format?"""
ext = path.splitext(filename)[-1]
return ext in VECTOR_GRAPHICS_EXTENSIONS
@@ -461,7 +460,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
- # type: (unicode, Dict, unicode, unicode, Any) -> None
+ # type: (str, Dict, str, str, Any) -> None
"""Create a rendered page.
This method is overwritten for genindex pages in order to fix href link
@@ -476,14 +475,14 @@ class EpubBuilder(StandaloneHTMLBuilder):
outfilename, event_arg)
def build_mimetype(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Write the metainfo file mimetype."""
logger.info(__('writing %s file...'), outname)
copy_asset_file(path.join(self.template_dir, 'mimetype'),
path.join(outdir, outname))
def build_container(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Write the metainfo file META-INF/container.xml."""
logger.info(__('writing %s file...'), outname)
filename = path.join(outdir, outname)
@@ -491,11 +490,11 @@ class EpubBuilder(StandaloneHTMLBuilder):
copy_asset_file(path.join(self.template_dir, 'container.xml'), filename)
def content_metadata(self):
- # type: () -> Dict[unicode, Any]
+ # type: () -> Dict[str, Any]
"""Create a dictionary with all metadata for the content.opf
file properly escaped.
"""
- metadata = {} # type: Dict[unicode, Any]
+ metadata = {} # type: Dict[str, Any]
metadata['title'] = self.esc(self.config.epub_title)
metadata['author'] = self.esc(self.config.epub_author)
metadata['uid'] = self.esc(self.config.epub_uid)
@@ -511,7 +510,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return metadata
def build_content(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Write the metainfo file content.opf It contains bibliographic data,
a file list and the spine (the reading order).
"""
@@ -522,7 +521,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
- self.files = [] # type: List[unicode]
+ self.files = [] # type: List[str]
self.ignored_files = ['.buildinfo', 'mimetype', 'content.opf',
'toc.ncx', 'META-INF/container.xml',
'Thumbs.db', 'ehthumbs.db', '.DS_Store',
@@ -625,7 +624,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
metadata)
def new_navpoint(self, node, level, incr=True):
- # type: (Dict[unicode, Any], int, bool) -> NavPoint
+ # type: (Dict[str, Any], int, bool) -> NavPoint
"""Create a new entry in the toc from the node at given level."""
# XXX Modifies the node
if incr:
@@ -635,7 +634,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
node['text'], node['refuri'], [])
def build_navpoints(self, nodes):
- # type: (List[Dict[unicode, Any]]) -> List[NavPoint]
+ # type: (List[Dict[str, Any]]) -> List[NavPoint]
"""Create the toc navigation structure.
Subelements of a node are nested inside the navpoint. For nested nodes
@@ -680,11 +679,11 @@ class EpubBuilder(StandaloneHTMLBuilder):
return navstack[0].children
def toc_metadata(self, level, navpoints):
- # type: (int, List[NavPoint]) -> Dict[unicode, Any]
+ # type: (int, List[NavPoint]) -> Dict[str, Any]
"""Create a dictionary with all metadata for the toc.ncx file
properly escaped.
"""
- metadata = {} # type: Dict[unicode, Any]
+ metadata = {} # type: Dict[str, Any]
metadata['uid'] = self.config.epub_uid
metadata['title'] = self.esc(self.config.epub_title)
metadata['level'] = level
@@ -692,7 +691,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return metadata
def build_toc(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Write the metainfo file toc.ncx."""
logger.info(__('writing %s file...'), outname)
@@ -713,7 +712,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.toc_metadata(level, navpoints))
def build_epub(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Write the epub file.
It is a zip file with the mimetype file stored uncompressed as the first
diff --git a/sphinx/builders/applehelp.py b/sphinx/builders/applehelp.py
index 74f8fcfc6..f7379e1ba 100644
--- a/sphinx/builders/applehelp.py
+++ b/sphinx/builders/applehelp.py
@@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import pipes
import plistlib
@@ -31,7 +30,6 @@ if False:
# For type annotation
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -270,7 +268,7 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(AppleHelpBuilder)
diff --git a/sphinx/builders/changes.py b/sphinx/builders/changes.py
index 5f1999003..5b934fb3e 100644
--- a/sphinx/builders/changes.py
+++ b/sphinx/builders/changes.py
@@ -27,7 +27,6 @@ if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -48,22 +47,22 @@ class ChangesBuilder(Builder):
self.templates.init(self, self.theme)
def get_outdated_docs(self):
- # type: () -> unicode
+ # type: () -> str
return self.outdir
typemap = {
'versionadded': 'added',
'versionchanged': 'changed',
'deprecated': 'deprecated',
- } # type: Dict[unicode, unicode]
+ }
def write(self, *ignored):
# type: (Any) -> None
version = self.config.version
domain = cast(ChangeSetDomain, self.env.get_domain('changeset'))
- libchanges = {} # type: Dict[unicode, List[Tuple[unicode, unicode, int]]]
- apichanges = [] # type: List[Tuple[unicode, unicode, int]]
- otherchanges = {} # type: Dict[Tuple[unicode, unicode], List[Tuple[unicode, unicode, int]]] # NOQA
+ libchanges = {} # type: Dict[str, List[Tuple[str, str, int]]]
+ apichanges = [] # type: List[Tuple[str, str, int]]
+ otherchanges = {} # type: Dict[Tuple[str, str], List[Tuple[str, str, int]]]
if version not in self.env.versionchanges:
logger.info(bold(__('no changes in version %s.') % version))
return
@@ -123,7 +122,7 @@ class ChangesBuilder(Builder):
'.. deprecated:: %s' % version]
def hl(no, line):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
line = '<a name="L%s"> </a>' % no + htmlescape(line)
for x in hltext:
if x in line:
@@ -157,7 +156,7 @@ class ChangesBuilder(Builder):
self.outdir)
def hl(self, text, version):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
text = htmlescape(text)
for directive in ['versionchanged', 'versionadded', 'deprecated']:
text = text.replace('.. %s:: %s' % (directive, version),
@@ -170,7 +169,7 @@ class ChangesBuilder(Builder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(ChangesBuilder)
return {
diff --git a/sphinx/builders/devhelp.py b/sphinx/builders/devhelp.py
index 901fb5560..cb01667b9 100644
--- a/sphinx/builders/devhelp.py
+++ b/sphinx/builders/devhelp.py
@@ -36,7 +36,6 @@ if False:
# For type annotation
from typing import Dict, List # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -72,7 +71,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
self.build_devhelp(self.outdir, self.config.devhelp_basename)
def build_devhelp(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
logger.info(__('dumping devhelp index...'))
# Basic info
@@ -112,7 +111,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
index = IndexEntries(self.env).create_index(self)
def write_index(title, refs, subitems):
- # type: (unicode, List[Any], Any) -> None
+ # type: (str, List[Any], Any) -> None
if len(refs) == 0:
pass
elif len(refs) == 1:
@@ -141,7 +140,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(DevhelpBuilder)
diff --git a/sphinx/builders/dummy.py b/sphinx/builders/dummy.py
index 9ab1ad155..d20a5ab09 100644
--- a/sphinx/builders/dummy.py
+++ b/sphinx/builders/dummy.py
@@ -18,7 +18,6 @@ if False:
from typing import Any, Dict, Set # NOQA
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
class DummyBuilder(Builder):
@@ -32,19 +31,19 @@ class DummyBuilder(Builder):
pass
def get_outdated_docs(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
return self.env.found_docs
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return ''
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
pass
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.Node) -> None
pass
def finish(self):
@@ -53,7 +52,7 @@ class DummyBuilder(Builder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(DummyBuilder)
return {
diff --git a/sphinx/builders/epub3.py b/sphinx/builders/epub3.py
index 312098098..a378bd981 100644
--- a/sphinx/builders/epub3.py
+++ b/sphinx/builders/epub3.py
@@ -28,7 +28,6 @@ if False:
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -141,7 +140,7 @@ class Epub3Builder(_epub_base.EpubBuilder):
return metadata
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
super(Epub3Builder, self).prepare_writing(docnames)
writing_mode = self.config.epub_writing_mode
@@ -151,7 +150,7 @@ class Epub3Builder(_epub_base.EpubBuilder):
self.globalcontext['skip_ua_compatible'] = True
def build_navlist(self, navnodes):
- # type: (List[Dict[unicode, Any]]) -> List[NavPoint]
+ # type: (List[Dict[str, Any]]) -> List[NavPoint]
"""Create the toc navigation structure.
This method is almost same as build_navpoints method in epub.py.
@@ -205,7 +204,7 @@ class Epub3Builder(_epub_base.EpubBuilder):
return metadata
def build_navigation_doc(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Write the metainfo file nav.xhtml."""
logger.info(__('writing %s file...'), outname)
@@ -231,7 +230,7 @@ class Epub3Builder(_epub_base.EpubBuilder):
def convert_epub_css_files(app, config):
# type: (Sphinx, Config) -> None
"""This converts string styled epub_css_files to tuple styled one."""
- epub_css_files = [] # type: List[Tuple[unicode, Dict]]
+ epub_css_files = [] # type: List[Tuple[str, Dict]]
for entry in config.epub_css_files:
if isinstance(entry, str):
epub_css_files.append((entry, {}))
@@ -247,7 +246,7 @@ def convert_epub_css_files(app, config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(Epub3Builder)
# config values
diff --git a/sphinx/builders/gettext.py b/sphinx/builders/gettext.py
index 684013d11..7ff8d0d53 100644
--- a/sphinx/builders/gettext.py
+++ b/sphinx/builders/gettext.py
@@ -14,12 +14,11 @@ from __future__ import unicode_literals
from codecs import open
from collections import defaultdict, OrderedDict
from datetime import datetime, tzinfo, timedelta
+from io import StringIO
from os import path, walk, getenv
from time import time
from uuid import uuid4
-from six import StringIO
-
from sphinx.builders import Builder
from sphinx.domains.python import pairindextypes
from sphinx.errors import ThemeError
@@ -37,7 +36,6 @@ if False:
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.i18n import CatalogInfo # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -69,13 +67,13 @@ class Catalog:
def __init__(self):
# type: () -> None
- self.messages = [] # type: List[unicode]
+ self.messages = [] # type: List[str]
# retain insertion order, a la OrderedDict
- self.metadata = OrderedDict() # type: Dict[unicode, List[Tuple[unicode, int, unicode]]] # NOQA
+ self.metadata = OrderedDict() # type: Dict[str, List[Tuple[str, int, str]]]
# msgid -> file, line, uid
def add(self, msg, origin):
- # type: (unicode, Union[nodes.Element, MsgOrigin]) -> None
+ # type: (str, Union[nodes.Element, MsgOrigin]) -> None
if not hasattr(origin, 'uid'):
# Nodes that are replicated like todo don't have a uid,
# however i18n is also unnecessary.
@@ -92,7 +90,7 @@ class MsgOrigin:
"""
def __init__(self, source, line):
- # type: (unicode, int) -> None
+ # type: (str, int) -> None
self.source = source
self.line = line
self.uid = uuid4().hex
@@ -125,26 +123,26 @@ class I18nBuilder(Builder):
self.env.set_versioning_method(self.versioning_method,
self.env.config.gettext_uuid)
self.tags = I18nTags()
- self.catalogs = defaultdict(Catalog) # type: DefaultDict[unicode, Catalog]
+ self.catalogs = defaultdict(Catalog) # type: DefaultDict[str, Catalog]
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return ''
def get_outdated_docs(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
return self.env.found_docs
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
return
def compile_catalogs(self, catalogs, message):
- # type: (Set[CatalogInfo], unicode) -> None
+ # type: (Set[CatalogInfo], str) -> None
return
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.document) -> None
+ # type: (str, nodes.document) -> None
catalog = self.catalogs[find_catalog(docname, self.config.gettext_compact)]
for node, msg in extract_messages(doctree):
@@ -194,7 +192,7 @@ ltz = LocalTimeZone()
def should_write(filepath, new_content):
- # type: (unicode, unicode) -> bool
+ # type: (str, str) -> bool
if not path.exists(filepath):
return True
try:
@@ -226,7 +224,7 @@ class MessageCatalogBuilder(I18nBuilder):
self.templates.init(self)
def _collect_templates(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
template_files = set()
for template_path in self.config.templates_path:
tmpl_abs_path = path.join(self.app.srcdir, template_path)
@@ -258,7 +256,7 @@ class MessageCatalogBuilder(I18nBuilder):
raise ThemeError('%s: %r' % (template, exc))
def build(self, docnames, summary=None, method='update'):
- # type: (Iterable[unicode], unicode, unicode) -> None
+ # type: (Iterable[str], str, str) -> None
self._extract_from_template()
super(MessageCatalogBuilder, self).build(docnames, summary, method)
@@ -310,7 +308,7 @@ class MessageCatalogBuilder(I18nBuilder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(MessageCatalogBuilder)
app.add_config_value('gettext_compact', True, 'gettext')
diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py
index f9a28933d..10a6498e2 100644
--- a/sphinx/builders/html.py
+++ b/sphinx/builders/html.py
@@ -60,7 +60,6 @@ if False:
from sphinx.config import Config # NOQA
from sphinx.domains import Domain, Index, IndexEntry # NOQA
from sphinx.util.tags import Tags # NOQA
- from sphinx.util.typing import unicode # NOQA
# Experimental HTML5 Writer
if is_html5_writer_available():
@@ -79,7 +78,7 @@ return_codes_re = re.compile('[\r\n]+')
def get_stable_hash(obj):
- # type: (Any) -> unicode
+ # type: (Any) -> str
"""
Return a stable hash for a Python data structure. We can't just use
the md5 of str(obj) since for example dictionary items are enumerated
@@ -99,11 +98,11 @@ class Stylesheet(text_type):
its filename (str).
"""
- attributes = None # type: Dict[unicode, unicode]
- filename = None # type: unicode
+ attributes = None # type: Dict[str, str]
+ filename = None # type: str
def __new__(cls, filename, *args, **attributes):
- # type: (unicode, unicode, unicode) -> None
+ # type: (str, str, str) -> None
self = text_type.__new__(cls, filename) # type: ignore
self.filename = filename
self.attributes = attributes
@@ -119,14 +118,14 @@ class Stylesheet(text_type):
class JSContainer(list):
"""The container for JavaScript scripts."""
def insert(self, index, obj):
- # type: (int, unicode) -> None
+ # type: (int, str) -> None
warnings.warn('builder.script_files is deprecated. '
'Please use app.add_js_file() instead.',
RemovedInSphinx30Warning, stacklevel=2)
super(JSContainer, self).insert(index, obj)
def extend(self, other): # type: ignore
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
warnings.warn('builder.script_files is deprecated. '
'Please use app.add_js_file() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -134,7 +133,7 @@ class JSContainer(list):
self.append(item)
def __iadd__(self, other): # type: ignore
- # type: (List[unicode]) -> JSContainer
+ # type: (List[str]) -> JSContainer
warnings.warn('builder.script_files is deprecated. '
'Please use app.add_js_file() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -143,7 +142,7 @@ class JSContainer(list):
return self
def __add__(self, other):
- # type: (List[unicode]) -> JSContainer
+ # type: (List[str]) -> JSContainer
ret = JSContainer(self)
ret += other
return ret
@@ -156,11 +155,11 @@ class JavaScript(text_type):
its filename (str).
"""
- attributes = None # type: Dict[unicode, unicode]
- filename = None # type: unicode
+ attributes = None # type: Dict[str, str]
+ filename = None # type: str
def __new__(cls, filename, **attributes):
- # type: (unicode, **unicode) -> None
+ # type: (str, **str) -> None
self = text_type.__new__(cls, filename) # type: ignore
self.filename = filename
self.attributes = attributes
@@ -193,7 +192,7 @@ class BuildInfo:
raise ValueError(__('build info file is broken: %r') % exc)
def __init__(self, config=None, tags=None, config_categories=[]):
- # type: (Config, Tags, List[unicode]) -> None
+ # type: (Config, Tags, List[str]) -> None
self.config_hash = u''
self.tags_hash = u''
@@ -249,8 +248,8 @@ class StandaloneHTMLBuilder(Builder):
# use html5 translator by default
default_html5_translator = False
- imgpath = None # type: unicode
- domain_indices = [] # type: List[Tuple[unicode, Type[Index], List[Tuple[unicode, List[IndexEntry]]], bool]] # NOQA
+ imgpath = None # type: str
+ domain_indices = [] # type: List[Tuple[str, Type[Index], List[Tuple[str, List[IndexEntry]]], bool]] # NOQA
# cached publisher object for snippets
_publisher = None
@@ -260,7 +259,7 @@ class StandaloneHTMLBuilder(Builder):
super(StandaloneHTMLBuilder, self).__init__(app)
# CSS files
- self.css_files = [] # type: List[Dict[unicode, unicode]]
+ self.css_files = [] # type: List[Dict[str, str]]
# JS files
self.script_files = JSContainer() # type: List[JavaScript]
@@ -271,9 +270,9 @@ class StandaloneHTMLBuilder(Builder):
# basename of images directory
self.imagedir = '_images'
# section numbers for headings in the currently visited document
- self.secnumbers = {} # type: Dict[unicode, Tuple[int, ...]]
+ self.secnumbers = {} # type: Dict[str, Tuple[int, ...]]
# currently written docname
- self.current_docname = None # type: unicode
+ self.current_docname = None # type: str
self.init_templates()
self.init_highlighter()
@@ -302,7 +301,7 @@ class StandaloneHTMLBuilder(Builder):
return BuildInfo(self.config, self.tags, ['html'])
def _get_translations_js(self):
- # type: () -> unicode
+ # type: () -> str
candidates = [path.join(dir, self.config.language,
'LC_MESSAGES', 'sphinx.js')
for dir in self.config.locale_dirs] + \
@@ -317,7 +316,7 @@ class StandaloneHTMLBuilder(Builder):
return None
def get_theme_config(self):
- # type: () -> Tuple[unicode, Dict]
+ # type: () -> Tuple[str, Dict]
return self.config.html_theme, self.config.html_theme_options
def init_templates(self):
@@ -349,7 +348,7 @@ class StandaloneHTMLBuilder(Builder):
self.add_css_file(filename, **attrs)
def add_css_file(self, filename, **kwargs):
- # type: (unicode, **unicode) -> None
+ # type: (str, **str) -> None
if '://' not in filename:
filename = posixpath.join('_static', filename)
@@ -372,7 +371,7 @@ class StandaloneHTMLBuilder(Builder):
self.add_js_file('translations.js')
def add_js_file(self, filename, **kwargs):
- # type: (unicode, **unicode) -> None
+ # type: (str, **str) -> None
if filename and '://' not in filename:
filename = posixpath.join('_static', filename)
@@ -392,7 +391,7 @@ class StandaloneHTMLBuilder(Builder):
@property
def math_renderer_name(self):
- # type: () -> unicode
+ # type: () -> str
name = self.get_builder_config('math_renderer', 'html')
if name is not None:
# use given name
@@ -412,14 +411,13 @@ class StandaloneHTMLBuilder(Builder):
return None
def get_outdated_docs(self):
- # type: () -> Iterator[unicode]
+ # type: () -> Iterator[str]
try:
with open(path.join(self.outdir, '.buildinfo')) as fp:
buildinfo = BuildInfo.load(fp)
if self.build_info != buildinfo:
- for docname in self.env.found_docs:
- yield docname
+ yield from self.env.found_docs
return
except ValueError as exc:
logger.warning(__('Failed to read build info file: %r'), exc)
@@ -450,11 +448,11 @@ class StandaloneHTMLBuilder(Builder):
pass
def get_asset_paths(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
return self.config.html_extra_path + self.config.html_static_path
def render_partial(self, node):
- # type: (nodes.Node) -> Dict[unicode, unicode]
+ # type: (nodes.Node) -> Dict[str, str]
"""Utility: Render a lone doctree node."""
if node is None:
return {'fragment': ''}
@@ -480,7 +478,7 @@ class StandaloneHTMLBuilder(Builder):
return pub.writer.parts
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
# create the search indexer
self.indexer = None
if self.search:
@@ -509,7 +507,7 @@ class StandaloneHTMLBuilder(Builder):
domain = None # type: Domain
domain = self.env.domains[domain_name]
for indexcls in domain.indices:
- indexname = '%s-%s' % (domain.name, indexcls.name) # type: unicode
+ indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
@@ -538,7 +536,7 @@ class StandaloneHTMLBuilder(Builder):
self.relations = self.env.collect_relations()
- rellinks = [] # type: List[Tuple[unicode, unicode, unicode, unicode]]
+ rellinks = [] # type: List[Tuple[str, str, str, str]]
if self.use_index:
rellinks.append(('genindex', _('General Index'), 'I', _('index')))
for indexname, indexcls, content, collapse in self.domain_indices:
@@ -582,7 +580,7 @@ class StandaloneHTMLBuilder(Builder):
'logo': logo,
'favicon': favicon,
'html5_doctype': self.config.html_experimental_html5_writer and html5_ready,
- } # type: Dict[unicode, Any]
+ }
if self.theme:
self.globalcontext.update(
('theme_' + key, val) for (key, val) in
@@ -590,7 +588,7 @@ class StandaloneHTMLBuilder(Builder):
self.globalcontext.update(self.config.html_context)
def get_doc_context(self, docname, body, metatags):
- # type: (unicode, unicode, unicode) -> Dict[unicode, Any]
+ # type: (str, str, str) -> Dict[str, Any]
"""Collect items for the template context of a page."""
# find out relations
prev = next = None
@@ -671,14 +669,14 @@ class StandaloneHTMLBuilder(Builder):
}
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.document) -> None
+ # type: (str, nodes.document) -> None
destination = StringOutput(encoding='utf-8')
doctree.settings = self.docsettings
self.secnumbers = self.env.toc_secnumbers.get(docname, {})
self.fignumbers = self.env.toc_fignumbers.get(docname, {})
self.imgpath = relative_uri(self.get_target_uri(docname), '_images')
- self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads') # type: unicode
+ self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads')
self.current_docname = docname
self.docwriter.write(doctree, destination)
self.docwriter.assemble_parts()
@@ -689,7 +687,7 @@ class StandaloneHTMLBuilder(Builder):
self.handle_page(docname, ctx, event_arg=doctree)
def write_doc_serialized(self, docname, doctree):
- # type: (unicode, nodes.document) -> None
+ # type: (str, nodes.document) -> None
self.imgpath = relative_uri(self.get_target_uri(docname), self.imagedir)
self.post_process_images(doctree)
title_node = self.env.longtitles.get(docname)
@@ -809,7 +807,7 @@ class StandaloneHTMLBuilder(Builder):
def copy_download_files(self):
# type: () -> None
def to_relpath(f):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return relative_path(self.srcdir, f)
# copy downloadable files
if self.env.dlfiles:
@@ -951,7 +949,7 @@ class StandaloneHTMLBuilder(Builder):
reference.append(node)
def load_indexer(self, docnames):
- # type: (Iterable[unicode]) -> None
+ # type: (Iterable[str]) -> None
keep = set(self.env.all_docs) - set(docnames)
try:
searchindexfn = path.join(self.outdir, self.searchindex_filename)
@@ -970,7 +968,7 @@ class StandaloneHTMLBuilder(Builder):
self.indexer.prune(keep)
def index_page(self, pagename, doctree, title):
- # type: (unicode, nodes.document, unicode) -> None
+ # type: (str, nodes.document, str) -> None
# only index pages with title
if self.indexer is not None and title:
filename = self.env.doc2path(pagename, base=None)
@@ -981,20 +979,20 @@ class StandaloneHTMLBuilder(Builder):
self.indexer.feed(pagename, title, doctree) # type: ignore
def _get_local_toctree(self, docname, collapse=True, **kwds):
- # type: (unicode, bool, Any) -> unicode
+ # type: (str, bool, Any) -> str
if 'includehidden' not in kwds:
kwds['includehidden'] = False
return self.render_partial(TocTree(self.env).get_toctree_for(
docname, self, collapse, **kwds))['fragment']
def get_outfilename(self, pagename):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return path.join(self.outdir, os_path(pagename) + self.out_suffix)
def add_sidebars(self, pagename, ctx):
- # type: (unicode, Dict) -> None
+ # type: (str, Dict) -> None
def has_wildcard(pattern):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return any(char in pattern for char in '*?[')
sidebars = None
matched = None
@@ -1045,12 +1043,12 @@ class StandaloneHTMLBuilder(Builder):
# --------- these are overwritten by the serialization builder
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return docname + self.link_suffix
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
- # type: (unicode, Dict, unicode, unicode, Any) -> None
+ # type: (str, Dict, str, str, Any) -> None
ctx = self.globalcontext.copy()
# current_page_name is backwards compatibility
ctx['pagename'] = ctx['current_page_name'] = pagename
@@ -1067,7 +1065,7 @@ class StandaloneHTMLBuilder(Builder):
ctx['pageurl'] = None
def pathto(otheruri, resource=False, baseuri=default_baseuri):
- # type: (unicode, bool, unicode) -> unicode
+ # type: (str, bool, str) -> str
if resource and '://' in otheruri:
# allow non-local resources given by scheme
return otheruri
@@ -1080,7 +1078,7 @@ class StandaloneHTMLBuilder(Builder):
ctx['pathto'] = pathto
def css_tag(css):
- # type: (Stylesheet) -> unicode
+ # type: (Stylesheet) -> str
attrs = []
for key in sorted(css.attributes):
value = css.attributes[key]
@@ -1091,7 +1089,7 @@ class StandaloneHTMLBuilder(Builder):
ctx['css_tag'] = css_tag
def hasdoc(name):
- # type: (unicode) -> bool
+ # type: (str) -> bool
if name in self.env.all_docs:
return True
elif name == 'search' and self.search:
@@ -1102,7 +1100,7 @@ class StandaloneHTMLBuilder(Builder):
ctx['hasdoc'] = hasdoc
def warn(*args, **kwargs):
- # type: (Any, Any) -> unicode
+ # type: (Any, Any) -> str
"""Simple warn() wrapper for themes."""
warnings.warn('The template function warn() was deprecated. '
'Use warning() instead.',
@@ -1150,7 +1148,7 @@ class StandaloneHTMLBuilder(Builder):
copyfile(self.env.doc2path(pagename), source_name)
def update_page_context(self, pagename, templatename, ctx, event_arg):
- # type: (unicode, unicode, Dict, Any) -> None
+ # type: (str, str, Dict, Any) -> None
pass
def handle_finish(self):
@@ -1193,7 +1191,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
name = 'dirhtml'
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if docname == 'index':
return ''
if docname.endswith(SEP + 'index'):
@@ -1201,7 +1199,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
return docname + SEP
def get_outfilename(self, pagename):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if pagename == 'index' or pagename.endswith(SEP + 'index'):
outfilename = path.join(self.outdir, os_path(pagename) +
self.out_suffix)
@@ -1212,7 +1210,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
return outfilename
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
super(DirectoryHTMLBuilder, self).prepare_writing(docnames)
self.globalcontext['no_search_suffix'] = True
@@ -1228,11 +1226,11 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
copysource = False
def get_outdated_docs(self): # type: ignore
- # type: () -> Union[unicode, List[unicode]]
+ # type: () -> Union[str, List[str]]
return 'all documents'
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if docname in self.env.all_docs:
# all references are on the same page...
return self.config.master_doc + self.out_suffix + \
@@ -1242,7 +1240,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
return docname + self.out_suffix
def get_relative_uri(self, from_, to, typ=None):
- # type: (unicode, unicode, unicode) -> unicode
+ # type: (str, str, str) -> str
# ignore source
return self.get_target_uri(to, typ)
@@ -1262,7 +1260,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
refnode['refuri'] = fname + refuri[hashindex:]
def _get_local_toctree(self, docname, collapse=True, **kwds):
- # type: (unicode, bool, Any) -> unicode
+ # type: (str, bool, Any) -> str
if 'includehidden' not in kwds:
kwds['includehidden'] = False
toctree = TocTree(self.env).get_toctree_for(docname, self, collapse, **kwds)
@@ -1281,7 +1279,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
return tree
def assemble_toc_secnumbers(self):
- # type: () -> Dict[unicode, Dict[unicode, Tuple[int, ...]]]
+ # type: () -> Dict[str, Dict[str, Tuple[int, ...]]]
# Assemble toc_secnumbers to resolve section numbers on SingleHTML.
# Merge all secnumbers to single secnumber.
#
@@ -1291,7 +1289,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_secnumber().
- new_secnumbers = {} # type: Dict[unicode, Tuple[int, ...]]
+ new_secnumbers = {} # type: Dict[str, Tuple[int, ...]]
for docname, secnums in self.env.toc_secnumbers.items():
for id, secnum in secnums.items():
alias = "%s/%s" % (docname, id)
@@ -1300,7 +1298,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
return {self.config.master_doc: new_secnumbers}
def assemble_toc_fignumbers(self):
- # type: () -> Dict[unicode, Dict[unicode, Dict[unicode, Tuple[int, ...]]]] # NOQA
+ # type: () -> Dict[str, Dict[str, Dict[str, Tuple[int, ...]]]]
# Assemble toc_fignumbers to resolve figure numbers on SingleHTML.
# Merge all fignumbers to single fignumber.
#
@@ -1310,7 +1308,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_fignumber().
- new_fignumbers = {} # type: Dict[unicode, Dict[unicode, Tuple[int, ...]]]
+ new_fignumbers = {} # type: Dict[str, Dict[str, Tuple[int, ...]]]
# {u'foo': {'figure': {'id2': (2,), 'id1': (1,)}}, u'bar': {'figure': {'id1': (3,)}}}
for docname, fignumlist in self.env.toc_fignumbers.items():
for figtype, fignums in fignumlist.items():
@@ -1322,7 +1320,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
return {self.config.master_doc: new_fignumbers}
def get_doc_context(self, docname, body, metatags):
- # type: (unicode, unicode, unicode) -> Dict
+ # type: (str, str, str) -> Dict
# no relation links...
toctree = TocTree(self.env).get_toctree_for(self.config.master_doc, self, False)
# if there is no toctree, toc is None
@@ -1404,7 +1402,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
additional_dump_args = () # type: Tuple
#: the filename for the global context file
- globalcontext_filename = None # type: unicode
+ globalcontext_filename = None # type: str
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
@@ -1423,7 +1421,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
self.use_index = self.get_builder_config('use_index', 'html')
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if docname == 'index':
return ''
if docname.endswith(SEP + 'index'):
@@ -1431,7 +1429,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
return docname + SEP
def dump_context(self, context, filename):
- # type: (Dict, unicode) -> None
+ # type: (Dict, str) -> None
if self.implementation_dumps_unicode:
with open(filename, 'w', encoding='utf-8') as ft:
self.implementation.dump(context, ft, *self.additional_dump_args)
@@ -1441,7 +1439,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
def handle_page(self, pagename, ctx, templatename='page.html',
outfilename=None, event_arg=None):
- # type: (unicode, Dict, unicode, unicode, Any) -> None
+ # type: (str, Dict, str, str, Any) -> None
ctx['current_page_name'] = pagename
self.add_sidebars(pagename, ctx)
@@ -1528,7 +1526,7 @@ class JSONHTMLBuilder(SerializingHTMLBuilder):
def convert_html_css_files(app, config):
# type: (Sphinx, Config) -> None
"""This converts string styled html_css_files to tuple styled one."""
- html_css_files = [] # type: List[Tuple[unicode, Dict]]
+ html_css_files = [] # type: List[Tuple[str, Dict]]
for entry in config.html_css_files:
if isinstance(entry, str):
html_css_files.append((entry, {}))
@@ -1546,7 +1544,7 @@ def convert_html_css_files(app, config):
def convert_html_js_files(app, config):
# type: (Sphinx, Config) -> None
"""This converts string styled html_js_files to tuple styled one."""
- html_js_files = [] # type: List[Tuple[unicode, Dict]]
+ html_js_files = [] # type: List[Tuple[str, Dict]]
for entry in config.html_js_files:
if isinstance(entry, str):
html_js_files.append((entry, {}))
@@ -1562,7 +1560,7 @@ def convert_html_js_files(app, config):
def setup_js_tag_helper(app, pagename, templatexname, context, doctree):
- # type: (Sphinx, unicode, unicode, Dict, nodes.Node) -> None
+ # type: (Sphinx, str, str, Dict, nodes.Node) -> None
"""Set up js_tag() template helper.
.. note:: This set up function is added to keep compatibility with webhelper.
@@ -1570,9 +1568,9 @@ def setup_js_tag_helper(app, pagename, templatexname, context, doctree):
pathto = context.get('pathto')
def js_tag(js):
- # type: (JavaScript) -> unicode
+ # type: (JavaScript) -> str
attrs = []
- body = '' # type: unicode
+ body = ''
if isinstance(js, JavaScript):
for key in sorted(js.attributes):
value = js.attributes[key]
@@ -1606,7 +1604,7 @@ def validate_math_renderer(app):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
# builders
app.add_builder(StandaloneHTMLBuilder)
app.add_builder(DirectoryHTMLBuilder)
diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py
index 55bff842b..24f3ab2c4 100644
--- a/sphinx/builders/htmlhelp.py
+++ b/sphinx/builders/htmlhelp.py
@@ -9,7 +9,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
from os import path
@@ -31,7 +30,6 @@ if False:
from typing import Any, Dict, IO, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -209,13 +207,13 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
self.lcid, self.encoding = locale
def open_file(self, outdir, basename, mode='w'):
- # type: (unicode, unicode, unicode) -> IO
+ # type: (str, str, str) -> IO
# open a file with the correct encoding for the selected language
return open(path.join(outdir, basename), mode, encoding=self.encoding,
errors='xmlcharrefreplace')
def update_page_context(self, pagename, templatename, ctx, event_arg):
- # type: (unicode, unicode, Dict, unicode) -> None
+ # type: (str, str, Dict, str) -> None
ctx['encoding'] = self.encoding
def handle_finish(self):
@@ -223,7 +221,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
self.build_hhx(self.outdir, self.config.htmlhelp_basename)
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.document) -> None
+ # type: (str, nodes.document) -> None
for node in doctree.traverse(nodes.reference):
# add ``target=_blank`` attributes to external links
if node.get('internal') is None and 'refuri' in node:
@@ -232,7 +230,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
super(HTMLHelpBuilder, self).write_doc(docname, doctree)
def build_hhx(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
logger.info(__('dumping stopword list...'))
with self.open_file(outdir, outname + '.stp') as f:
for word in sorted(stopwords):
@@ -306,9 +304,9 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
f.write('<UL>\n')
def write_index(title, refs, subitems):
- # type: (unicode, List[Tuple[unicode, unicode]], List[Tuple[unicode, List[Tuple[unicode, unicode]]]]) -> None # NOQA
+ # type: (str, List[Tuple[str, str]], List[Tuple[str, List[Tuple[str, str]]]]) -> None # NOQA
def write_param(name, value):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
item = ' <param name="%s" value="%s">\n' % \
(name, value)
f.write(item)
@@ -337,13 +335,13 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
def default_htmlhelp_basename(config):
- # type: (Config) -> unicode
+ # type: (Config) -> str
"""Better default htmlhelp_basename setting."""
return make_filename_from_project(config.project) + 'doc'
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(HTMLHelpBuilder)
diff --git a/sphinx/builders/latex/__init__.py b/sphinx/builders/latex/__init__.py
index a06ae590e..2240b281b 100644
--- a/sphinx/builders/latex/__init__.py
+++ b/sphinx/builders/latex/__init__.py
@@ -45,7 +45,6 @@ if False:
from typing import Any, Dict, Iterable, List, Tuple, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
- from sphinx.util.typing import unicode # NOQA
XINDY_LANG_OPTIONS = {
@@ -103,11 +102,11 @@ XINDY_LANG_OPTIONS = {
'el': '-L greek -C utf8 ',
# FIXME, not compatible with [:2] slice but does Sphinx support Greek ?
'el-polyton': '-L greek -C polytonic-utf8 ',
-} # type: Dict[unicode, unicode]
+}
XINDY_CYRILLIC_SCRIPTS = [
'be', 'bg', 'mk', 'mn', 'ru', 'sr', 'sh', 'uk',
-] # type: List[unicode]
+]
logger = logging.getLogger(__name__)
@@ -130,27 +129,27 @@ class LaTeXBuilder(Builder):
def init(self):
# type: () -> None
- self.context = {} # type: Dict[unicode, Any]
- self.docnames = [] # type: Iterable[unicode]
- self.document_data = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode, bool]] # NOQA
+ self.context = {} # type: Dict[str, Any]
+ self.docnames = [] # type: Iterable[str]
+ self.document_data = [] # type: List[Tuple[str, str, str, str, str, bool]]
self.usepackages = self.app.registry.latex_packages
texescape.init()
self.init_context()
def get_outdated_docs(self):
- # type: () -> Union[unicode, List[unicode]]
+ # type: () -> Union[str, List[str]]
return 'all documents' # for now
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if docname not in self.docnames:
raise NoUri
else:
return '%' + docname
def get_relative_uri(self, from_, to, typ=None):
- # type: (unicode, unicode, unicode) -> unicode
+ # type: (str, str, str) -> str
# ignore source path
return self.get_target_uri(to, typ)
@@ -162,7 +161,7 @@ class LaTeXBuilder(Builder):
'will be written'))
return
# assign subdirs to titles
- self.titles = [] # type: List[Tuple[unicode, unicode]]
+ self.titles = [] # type: List[Tuple[str, str]]
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
@@ -256,7 +255,7 @@ class LaTeXBuilder(Builder):
logger.info("done")
def get_contentsname(self, indexfile):
- # type: (unicode) -> unicode
+ # type: (str) -> str
tree = self.env.get_doctree(indexfile)
contentsname = None
for toctree in tree.traverse(addnodes.toctree):
@@ -267,12 +266,12 @@ class LaTeXBuilder(Builder):
return contentsname
def update_doc_context(self, title, author):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
self.context['title'] = title
self.context['author'] = author
def assemble_doctree(self, indexfile, toctree_only, appendices):
- # type: (unicode, bool, List[unicode]) -> nodes.document
+ # type: (str, bool, List[str]) -> nodes.document
from docutils import nodes # NOQA
self.docnames = set([indexfile] + appendices)
logger.info(darkgreen(indexfile) + " ", nonl=1)
@@ -427,7 +426,7 @@ def validate_config_values(app, config):
def default_latex_engine(config):
- # type: (Config) -> unicode
+ # type: (Config) -> str
""" Better default latex_engine settings for specific languages. """
if config.language == 'ja':
return 'platex'
@@ -436,7 +435,7 @@ def default_latex_engine(config):
def default_latex_docclass(config):
- # type: (Config) -> Dict[unicode, unicode]
+ # type: (Config) -> Dict[str, str]
""" Better default latex_docclass settings for specific languages. """
if config.language == 'ja':
return {'manual': 'jsbook',
@@ -452,7 +451,7 @@ def default_latex_use_xindy(config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(LaTeXBuilder)
app.add_post_transform(CitationReferenceTransform)
app.add_post_transform(MathReferenceTransform)
diff --git a/sphinx/builders/latex/transforms.py b/sphinx/builders/latex/transforms.py
index 7b76b7545..c3bc50fe3 100644
--- a/sphinx/builders/latex/transforms.py
+++ b/sphinx/builders/latex/transforms.py
@@ -23,7 +23,6 @@ from sphinx.util.nodes import NodeMatcher
if False:
# For type annotation
from typing import Any, Dict, List, Set, Tuple, Union # NOQA
- from sphinx.util.typing import unicode # NOQA
URI_SCHEMES = ('mailto:', 'http:', 'https:', 'ftp:')
@@ -92,7 +91,7 @@ class ShowUrlsTransform(SphinxTransform):
node.parent.insert(index + 1, textnode)
def get_docname_for_node(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Node) -> str
while node:
if isinstance(node, nodes.document):
return self.env.path2doc(node['source'])
@@ -104,7 +103,7 @@ class ShowUrlsTransform(SphinxTransform):
return None # never reached here. only for type hinting
def create_footnote(self, uri, docname):
- # type: (unicode, unicode) -> Tuple[nodes.footnote, nodes.footnote_reference]
+ # type: (str, str) -> Tuple[nodes.footnote, nodes.footnote_reference]
reference = nodes.reference('', nodes.Text(uri), refuri=uri, nolinkurl=True)
footnote = nodes.footnote(uri, auto=1, docname=docname)
footnote['names'].append('#')
@@ -154,7 +153,7 @@ class FootnoteCollector(nodes.NodeVisitor):
def __init__(self, document):
# type: (nodes.document) -> None
self.auto_footnotes = [] # type: List[nodes.footnote]
- self.used_footnote_numbers = set() # type: Set[unicode]
+ self.used_footnote_numbers = set() # type: Set[str]
self.footnote_refs = [] # type: List[nodes.footnote_reference]
super(FootnoteCollector, self).__init__(document)
@@ -361,7 +360,7 @@ class LaTeXFootnoteTransform(SphinxTransform):
class LaTeXFootnoteVisitor(nodes.NodeVisitor):
def __init__(self, document, footnotes):
# type: (nodes.document, List[nodes.footnote]) -> None
- self.appeared = set() # type: Set[Tuple[unicode, unicode]]
+ self.appeared = set() # type: Set[Tuple[str, str]]
self.footnotes = footnotes # type: List[nodes.footnote]
self.pendings = [] # type: List[nodes.footnote]
self.table_footnotes = [] # type: List[nodes.footnote]
diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py
index 9544d5b35..911c58c12 100644
--- a/sphinx/builders/linkcheck.py
+++ b/sphinx/builders/linkcheck.py
@@ -34,7 +34,6 @@ if False:
from typing import Any, Dict, List, Set, Tuple, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.requests.requests import Response # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -44,7 +43,7 @@ class AnchorCheckParser(HTMLParser):
"""Specialized HTML parser that looks for a specific anchor."""
def __init__(self, search_anchor):
- # type: (unicode) -> None
+ # type: (str) -> None
super(AnchorCheckParser, self).__init__()
self.search_anchor = search_anchor
@@ -59,7 +58,7 @@ class AnchorCheckParser(HTMLParser):
def check_anchor(response, anchor):
- # type: (Response, unicode) -> bool
+ # type: (Response, str) -> bool
"""Reads HTML data from a response object `response` searching for `anchor`.
Returns True if anchor was found, False otherwise.
"""
@@ -87,9 +86,9 @@ class CheckExternalLinksBuilder(Builder):
self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
self.anchors_ignore = [re.compile(x)
for x in self.app.config.linkcheck_anchors_ignore]
- self.good = set() # type: Set[unicode]
- self.broken = {} # type: Dict[unicode, unicode]
- self.redirected = {} # type: Dict[unicode, Tuple[unicode, int]]
+ self.good = set() # type: Set[str]
+ self.broken = {} # type: Dict[str, str]
+ self.redirected = {} # type: Dict[str, Tuple[str, int]]
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create output file
@@ -117,7 +116,7 @@ class CheckExternalLinksBuilder(Builder):
kwargs['timeout'] = self.app.config.linkcheck_timeout
def check_uri():
- # type: () -> Tuple[unicode, unicode, int]
+ # type: () -> Tuple[str, str, int]
# split off anchor
if '#' in uri:
req_url, anchor = uri.split('#', 1)
@@ -181,7 +180,7 @@ class CheckExternalLinksBuilder(Builder):
return 'redirected', new_url, 0
def check():
- # type: () -> Tuple[unicode, unicode, int]
+ # type: () -> Tuple[str, str, int]
# check for various conditions without bothering the network
if len(uri) == 0 or uri.startswith(('#', 'mailto:', 'ftp:')):
return 'unchecked', '', 0
@@ -220,7 +219,7 @@ class CheckExternalLinksBuilder(Builder):
self.rqueue.put((uri, docname, lineno, status, info, code))
def process_result(self, result):
- # type: (Tuple[unicode, unicode, int, unicode, unicode, int]) -> None
+ # type: (Tuple[str, str, int, str, str, int]) -> None
uri, docname, lineno, status, info, code = result
if status == 'unchecked':
return
@@ -258,19 +257,19 @@ class CheckExternalLinksBuilder(Builder):
logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return ''
def get_outdated_docs(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
return self.env.found_docs
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
return
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.Node) -> None
logger.info('')
n = 0
for node in doctree.traverse(nodes.reference):
@@ -293,7 +292,7 @@ class CheckExternalLinksBuilder(Builder):
self.app.statuscode = 1
def write_entry(self, what, docname, line, uri):
- # type: (unicode, unicode, int, unicode) -> None
+ # type: (str, str, int, str) -> None
with open(path.join(self.outdir, 'output.txt'), 'a', encoding='utf-8') as output:
output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None),
line, what, uri))
@@ -305,7 +304,7 @@ class CheckExternalLinksBuilder(Builder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(CheckExternalLinksBuilder)
app.add_config_value('linkcheck_ignore', [], None)
diff --git a/sphinx/builders/manpage.py b/sphinx/builders/manpage.py
index e5e9e187d..c4947b76b 100644
--- a/sphinx/builders/manpage.py
+++ b/sphinx/builders/manpage.py
@@ -29,7 +29,6 @@ if False:
from typing import Any, Dict, List, Set, Tuple, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -44,7 +43,7 @@ class ManualPageBuilder(Builder):
epilog = __('The manual pages are in %(outdir)s.')
default_translator_class = ManualPageTranslator
- supported_image_types = [] # type: List[unicode]
+ supported_image_types = [] # type: List[str]
def init(self):
# type: () -> None
@@ -53,11 +52,11 @@ class ManualPageBuilder(Builder):
'will be written'))
def get_outdated_docs(self):
- # type: () -> Union[unicode, List[unicode]]
+ # type: () -> Union[str, List[str]]
return 'all manpages' # for now
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if typ == 'token':
return ''
raise NoUri
@@ -96,7 +95,7 @@ class ManualPageBuilder(Builder):
encoding='utf-8')
tree = self.env.get_doctree(docname)
- docnames = set() # type: Set[unicode]
+ docnames = set() # type: Set[str]
largetree = inline_all_toctrees(self, docnames, docname, tree,
darkgreen, [docname])
largetree.settings = docsettings
@@ -115,7 +114,7 @@ class ManualPageBuilder(Builder):
def default_man_pages(config):
- # type: (Config) -> List[Tuple[unicode, unicode, unicode, List[unicode], int]]
+ # type: (Config) -> List[Tuple[str, str, str, List[str], int]]
""" Better default man_pages settings. """
filename = make_filename_from_project(config.project)
return [(config.master_doc, filename, '%s %s' % (config.project, config.release),
@@ -123,7 +122,7 @@ def default_man_pages(config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(ManualPageBuilder)
app.add_config_value('man_pages', default_man_pages, None)
diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py
index 789a8113f..08042fd87 100644
--- a/sphinx/builders/qthelp.py
+++ b/sphinx/builders/qthelp.py
@@ -33,7 +33,6 @@ if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -47,7 +46,7 @@ section_template = '<section title="%(title)s" ref="%(ref)s"/>'
def render_file(filename, **kwargs):
- # type: (unicode, Any) -> unicode
+ # type: (str, Any) -> str
pathname = os.path.join(package_dir, 'templates', 'qthelp', filename)
return SphinxRenderer.render_from_file(pathname, kwargs)
@@ -88,7 +87,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
# self.config.html_style = 'traditional.css'
def get_theme_config(self):
- # type: () -> Tuple[unicode, Dict]
+ # type: () -> Tuple[str, Dict]
return self.config.qthelp_theme, self.config.qthelp_theme_options
def handle_finish(self):
@@ -96,7 +95,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
self.build_qhp(self.outdir, self.config.qthelp_basename)
def build_qhp(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
logger.info(__('writing project file...'))
# sections
@@ -170,8 +169,8 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return True
def write_toc(self, node, indentlevel=4):
- # type: (nodes.Node, int) -> List[unicode]
- parts = [] # type: List[unicode]
+ # type: (nodes.Node, int) -> List[str]
+ parts = [] # type: List[str]
if isinstance(node, nodes.list_item) and self.isdocnode(node):
compact_paragraph = cast(addnodes.compact_paragraph, node[0])
reference = cast(nodes.reference, compact_paragraph[0])
@@ -205,7 +204,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return parts
def keyword_item(self, name, ref):
- # type: (unicode, Any) -> unicode
+ # type: (str, Any) -> str
matchobj = _idpattern.match(name)
if matchobj:
groupdict = matchobj.groupdict()
@@ -228,8 +227,8 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return item
def build_keywords(self, title, refs, subitems):
- # type: (unicode, List[Any], Any) -> List[unicode]
- keywords = [] # type: List[unicode]
+ # type: (str, List[Any], Any) -> List[str]
+ keywords = [] # type: List[str]
# if len(refs) == 0: # XXX
# write_param('See Also', title)
@@ -251,7 +250,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return keywords
def get_project_files(self, outdir):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
@@ -269,7 +268,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(QtHelpBuilder)
diff --git a/sphinx/builders/texinfo.py b/sphinx/builders/texinfo.py
index 82a1b8f3d..3413810c9 100644
--- a/sphinx/builders/texinfo.py
+++ b/sphinx/builders/texinfo.py
@@ -36,7 +36,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from typing import Any, Dict, Iterable, List, Tuple, Union # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -61,22 +60,22 @@ class TexinfoBuilder(Builder):
def init(self):
# type: () -> None
- self.docnames = [] # type: Iterable[unicode]
- self.document_data = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode, unicode, unicode, bool]] # NOQA
+ self.docnames = [] # type: Iterable[str]
+ self.document_data = [] # type: List[Tuple[str, str, str, str, str, str, str, bool]]
def get_outdated_docs(self):
- # type: () -> Union[unicode, List[unicode]]
+ # type: () -> Union[str, List[str]]
return 'all documents' # for now
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if docname not in self.docnames:
raise NoUri
else:
return '%' + docname
def get_relative_uri(self, from_, to, typ=None):
- # type: (unicode, unicode, unicode) -> unicode
+ # type: (str, str, str) -> str
# ignore source path
return self.get_target_uri(to, typ)
@@ -88,7 +87,7 @@ class TexinfoBuilder(Builder):
'will be written'))
return
# assign subdirs to titles
- self.titles = [] # type: List[Tuple[unicode, unicode]]
+ self.titles = [] # type: List[Tuple[str, str]]
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
@@ -106,7 +105,7 @@ class TexinfoBuilder(Builder):
for entry in self.document_data:
docname, targetname, title, author = entry[:4]
targetname += '.texi'
- direntry = description = category = '' # type: unicode
+ direntry = description = category = ''
if len(entry) > 6:
direntry, description, category = entry[4:7]
toctree_only = False
@@ -139,7 +138,7 @@ class TexinfoBuilder(Builder):
logger.info(__("done"))
def assemble_doctree(self, indexfile, toctree_only, appendices):
- # type: (unicode, bool, List[unicode]) -> nodes.document
+ # type: (str, bool, List[str]) -> nodes.document
self.docnames = set([indexfile] + appendices)
logger.info(darkgreen(indexfile) + " ", nonl=1)
tree = self.env.get_doctree(indexfile)
@@ -212,7 +211,7 @@ class TexinfoBuilder(Builder):
def default_texinfo_documents(config):
- # type: (Config) -> List[Tuple[unicode, unicode, unicode, unicode, unicode, unicode, unicode]] # NOQA
+ # type: (Config) -> List[Tuple[str, str, str, str, str, str, str]]
""" Better default texinfo_documents settings. """
filename = make_filename_from_project(config.project)
return [(config.master_doc, filename, config.project, config.author, filename,
@@ -220,7 +219,7 @@ def default_texinfo_documents(config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(TexinfoBuilder)
app.add_config_value('texinfo_documents', default_texinfo_documents, None)
diff --git a/sphinx/builders/text.py b/sphinx/builders/text.py
index c79caafbf..8a10be1a6 100644
--- a/sphinx/builders/text.py
+++ b/sphinx/builders/text.py
@@ -24,7 +24,6 @@ if False:
from typing import Any, Dict, Iterator, Set, Tuple # NOQA
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -38,15 +37,15 @@ class TextBuilder(Builder):
allow_parallel = True
default_translator_class = TextTranslator
- current_docname = None # type: unicode
+ current_docname = None # type: str
def init(self):
# type: () -> None
# section numbers for headings in the currently visited document
- self.secnumbers = {} # type: Dict[unicode, Tuple[int, ...]]
+ self.secnumbers = {} # type: Dict[str, Tuple[int, ...]]
def get_outdated_docs(self):
- # type: () -> Iterator[unicode]
+ # type: () -> Iterator[str]
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
yield docname
@@ -65,15 +64,15 @@ class TextBuilder(Builder):
pass
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return ''
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
self.writer = TextWriter(self)
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.Node) -> None
self.current_docname = docname
self.secnumbers = self.env.toc_secnumbers.get(docname, {})
destination = StringOutput(encoding='utf-8')
@@ -92,7 +91,7 @@ class TextBuilder(Builder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(TextBuilder)
app.add_config_value('text_sectionchars', '*=-~"+`', 'env')
diff --git a/sphinx/builders/websupport.py b/sphinx/builders/websupport.py
index 077c1bdc2..ab2bf9eaa 100644
--- a/sphinx/builders/websupport.py
+++ b/sphinx/builders/websupport.py
@@ -13,11 +13,10 @@ if False:
# For type annotation
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
try:
from sphinxcontrib.websupport.builder import WebSupportBuilder
app.add_builder(WebSupportBuilder)
diff --git a/sphinx/builders/xml.py b/sphinx/builders/xml.py
index 839f3ea49..b36aede1c 100644
--- a/sphinx/builders/xml.py
+++ b/sphinx/builders/xml.py
@@ -26,7 +26,6 @@ if False:
from typing import Any, Dict, Iterator, Set, Type # NOQA
from docutils.writers.xml import BaseXMLWriter # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -50,7 +49,7 @@ class XMLBuilder(Builder):
pass
def get_outdated_docs(self):
- # type: () -> Iterator[unicode]
+ # type: () -> Iterator[str]
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
yield docname
@@ -69,15 +68,15 @@ class XMLBuilder(Builder):
pass
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return docname
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
self.writer = self._writer_class(self)
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.Node) -> None
# work around multiple string % tuple issues in docutils;
# replace tuples in attribute values with lists
doctree = doctree.deepcopy()
@@ -119,7 +118,7 @@ class PseudoXMLBuilder(XMLBuilder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(XMLBuilder)
app.add_builder(PseudoXMLBuilder)
diff --git a/sphinx/cmd/build.py b/sphinx/cmd/build.py
index 8b815ef28..7127dbe37 100644
--- a/sphinx/cmd/build.py
+++ b/sphinx/cmd/build.py
@@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import argparse
import locale
@@ -199,15 +198,15 @@ files can be built by specifying individual filenames.
return parser
-def make_main(argv=sys.argv[1:]): # type: ignore
- # type: (List[unicode]) -> int
+def make_main(argv=sys.argv[1:]):
+ # type: (List[str]) -> int
"""Sphinx build "make mode" entry."""
from sphinx.cmd import make_mode
return make_mode.run_make_mode(argv[1:])
-def build_main(argv=sys.argv[1:]): # type: ignore
- # type: (List[unicode]) -> int
+def build_main(argv=sys.argv[1:]):
+ # type: (List[str]) -> int
"""Sphinx build "main" command-line entry."""
parser = get_parser()
@@ -292,8 +291,8 @@ def build_main(argv=sys.argv[1:]): # type: ignore
return 2
-def main(argv=sys.argv[1:]): # type: ignore
- # type: (List[unicode]) -> int
+def main(argv=sys.argv[1:]):
+ # type: (List[str]) -> int
locale.setlocale(locale.LC_ALL, '')
sphinx.locale.init_console(os.path.join(package_dir, 'locale'), 'sphinx')
diff --git a/sphinx/cmd/make_mode.py b/sphinx/cmd/make_mode.py
index 27c4165ab..a4a61e5cb 100644
--- a/sphinx/cmd/make_mode.py
+++ b/sphinx/cmd/make_mode.py
@@ -14,7 +14,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
import subprocess
@@ -29,7 +28,6 @@ from sphinx.util.osutil import cd, rmtree
if False:
# For type annotation
from typing import List # NOQA
- from sphinx.util.typing import unicode # NOQA
BUILDERS = [
@@ -63,14 +61,14 @@ BUILDERS = [
class Make:
def __init__(self, srcdir, builddir, opts):
- # type: (unicode, unicode, List[unicode]) -> None
+ # type: (str, str, List[str]) -> None
self.srcdir = srcdir
self.builddir = builddir
self.opts = opts
self.makecmd = os.environ.get('MAKE', 'make') # refer $MAKE to determine make command
def builddir_join(self, *comps):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return path.join(self.builddir, *comps)
def build_clean(self):
@@ -147,7 +145,7 @@ class Make:
return 0
def run_generic_build(self, builder, doctreedir=None):
- # type: (unicode, unicode) -> int
+ # type: (str, str) -> int
# compatibility with old Makefile
papersize = os.getenv('PAPER', '')
opts = self.opts
@@ -164,7 +162,7 @@ class Make:
def run_make_mode(args):
- # type: (List[unicode]) -> int
+ # type: (List[str]) -> int
if len(args) < 3:
print('Error: at least 3 arguments (builder, source '
'dir, build dir) are required.', file=sys.stderr)
diff --git a/sphinx/cmd/quickstart.py b/sphinx/cmd/quickstart.py
index ace0ecdfc..ce66f7352 100644
--- a/sphinx/cmd/quickstart.py
+++ b/sphinx/cmd/quickstart.py
@@ -9,7 +9,6 @@
:license: BSD, see LICENSE for details.
"""
from __future__ import absolute_import
-from __future__ import print_function
import argparse
import locale
@@ -51,7 +50,6 @@ from sphinx.util.template import SphinxRenderer
if False:
# For type annotation
from typing import Any, Callable, Dict, List, Pattern, Union # NOQA
- from sphinx.util.typing import unicode # NOQA
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
@@ -92,7 +90,7 @@ else:
# function to get input from terminal -- overridden by the test suite
def term_input(prompt):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if sys.platform == 'win32':
# Important: On windows, readline is not enabled by default. In these
# environment, escape sequences have been broken. To avoid the
@@ -108,7 +106,7 @@ class ValidationError(Exception):
def is_path(x):
- # type: (unicode) -> unicode
+ # type: (str) -> str
x = path.expanduser(x)
if not path.isdir(x):
raise ValidationError(__("Please enter a valid path name."))
@@ -116,21 +114,21 @@ def is_path(x):
def allow_empty(x):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return x
def nonempty(x):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if not x:
raise ValidationError(__("Please enter some text."))
return x
def choice(*l):
- # type: (unicode) -> Callable[[unicode], unicode]
+ # type: (str) -> Callable[[str], str]
def val(x):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if x not in l:
raise ValidationError(__('Please enter one of %s.') % ', '.join(l))
return x
@@ -138,14 +136,14 @@ def choice(*l):
def boolean(x):
- # type: (unicode) -> bool
+ # type: (str) -> bool
if x.upper() not in ('Y', 'YES', 'N', 'NO'):
raise ValidationError(__("Please enter either 'y' or 'n'."))
return x.upper() in ('Y', 'YES')
def suffix(x):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if not (x[0:1] == '.' and len(x) > 1):
raise ValidationError(__("Please enter a file suffix, "
"e.g. '.rst' or '.txt'."))
@@ -153,12 +151,12 @@ def suffix(x):
def ok(x):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return x
def term_decode(text):
- # type: (Union[bytes,unicode]) -> unicode
+ # type: (Union[bytes,str]) -> str
if isinstance(text, text_type):
return text
@@ -180,10 +178,10 @@ def term_decode(text):
def do_prompt(text, default=None, validator=nonempty):
- # type: (unicode, unicode, Callable[[unicode], Any]) -> Union[unicode, bool]
+ # type: (str, str, Callable[[str], Any]) -> Union[str, bool]
while True:
if default is not None:
- prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default) # type: unicode
+ prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default)
else:
prompt = PROMPT_PREFIX + text + ': '
if USE_LIBEDIT:
@@ -207,7 +205,7 @@ def do_prompt(text, default=None, validator=nonempty):
def convert_python_source(source, rex=re.compile(r"[uU]('.*?')")):
- # type: (unicode, Pattern) -> unicode
+ # type: (str, Pattern) -> str
# remove Unicode literal prefixes
warnings.warn('convert_python_source() is deprecated.',
RemovedInSphinx40Warning)
@@ -216,12 +214,12 @@ def convert_python_source(source, rex=re.compile(r"[uU]('.*?')")):
class QuickstartRenderer(SphinxRenderer):
def __init__(self, templatedir):
- # type: (unicode) -> None
+ # type: (str) -> None
self.templatedir = templatedir or ''
super(QuickstartRenderer, self).__init__()
def render(self, template_name, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
user_template = path.join(self.templatedir, path.basename(template_name))
if self.templatedir and path.exists(user_template):
return self.render_from_file(user_template, context)
@@ -374,7 +372,7 @@ directly.'''))
def generate(d, overwrite=True, silent=False, templatedir=None):
- # type: (Dict, bool, bool, unicode) -> None
+ # type: (Dict, bool, bool, str) -> None
"""Generate project based on values in *d*."""
template = QuickstartRenderer(templatedir=templatedir)
@@ -426,7 +424,7 @@ def generate(d, overwrite=True, silent=False, templatedir=None):
ensuredir(path.join(srcdir, d['dot'] + 'static'))
def write_file(fpath, content, newline=None):
- # type: (unicode, unicode, unicode) -> None
+ # type: (str, str, str) -> None
if overwrite or not path.isfile(fpath):
if 'quiet' not in d:
print(__('Creating file %s.') % fpath)
diff --git a/sphinx/cmdline.py b/sphinx/cmdline.py
index 252f95bbc..545ae4e47 100644
--- a/sphinx/cmdline.py
+++ b/sphinx/cmdline.py
@@ -9,7 +9,6 @@
:license: BSD, see LICENSE for details.
"""
from __future__ import absolute_import
-from __future__ import print_function
import sys
import warnings
@@ -45,8 +44,8 @@ def get_parser():
return build.get_parser()
-def main(argv=sys.argv[1:]): # type: ignore
- # type: (List[unicode]) -> int
+def main(argv=sys.argv[1:]):
+ # type: (List[str]) -> int
warnings.warn('sphinx.cmdline module is deprecated. Use sphinx.cmd.build instead.',
RemovedInSphinx30Warning, stacklevel=2)
return build.main(argv)
diff --git a/sphinx/config.py b/sphinx/config.py
index 017307cb3..ad3b2c493 100644
--- a/sphinx/config.py
+++ b/sphinx/config.py
@@ -17,7 +17,7 @@ from collections import OrderedDict
from os import path, getenv
from typing import Any, NamedTuple, Union
-from six import text_type, integer_types
+from six import text_type
from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.errors import ConfigError, ExtensionError
@@ -33,7 +33,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.tags import Tags # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -68,11 +67,11 @@ class ENUM:
app.add_config_value('latex_show_urls', 'no', None, ENUM('no', 'footnote', 'inline'))
"""
def __init__(self, *candidates):
- # type: (unicode) -> None
+ # type: (str) -> None
self.candidates = candidates
def match(self, value):
- # type: (Union[unicode,List,Tuple]) -> bool
+ # type: (Union[str, List, Tuple]) -> bool
if isinstance(value, (list, tuple)):
return all(item in self.candidates for item in value)
else:
@@ -155,7 +154,7 @@ class Config:
'smartquotes_excludes': ({'languages': ['ja'],
'builders': ['man', 'text']},
'env', []),
- } # type: Dict[unicode, Tuple]
+ } # type: Dict[str, Tuple]
def __init__(self, *args):
# type: (Any) -> None
@@ -166,7 +165,7 @@ class Config:
RemovedInSphinx30Warning, stacklevel=2)
dirname, filename, overrides, tags = args
if dirname is None:
- config = {} # type: Dict[unicode, Any]
+ config = {} # type: Dict[str, Any]
else:
config = eval_config_file(path.join(dirname, filename), tags)
else:
@@ -188,11 +187,11 @@ class Config:
config['extensions'] = overrides.pop('extensions').split(',')
else:
config['extensions'] = overrides.pop('extensions')
- self.extensions = config.get('extensions', []) # type: List[unicode]
+ self.extensions = config.get('extensions', []) # type: List[str]
@classmethod
def read(cls, confdir, overrides=None, tags=None):
- # type: (unicode, Dict, Tags) -> Config
+ # type: (str, Dict, Tags) -> Config
"""Create a Config object from configuration file."""
filename = path.join(confdir, CONFIG_FILENAME)
namespace = eval_config_file(filename, tags)
@@ -211,7 +210,7 @@ class Config:
check_unicode(self)
def convert_overrides(self, name, value):
- # type: (unicode, Any) -> Any
+ # type: (str, Any) -> Any
if not isinstance(value, str):
return value
else:
@@ -224,7 +223,7 @@ class Config:
(name, name + '.key=value'))
elif isinstance(defvalue, list):
return value.split(',')
- elif isinstance(defvalue, integer_types):
+ elif isinstance(defvalue, int):
try:
return int(value)
except ValueError:
@@ -277,7 +276,7 @@ class Config:
self.__dict__[name] = config[name]
def __getattr__(self, name):
- # type: (unicode) -> Any
+ # type: (str) -> Any
if name.startswith('_'):
raise AttributeError(name)
if name not in self.values:
@@ -288,19 +287,19 @@ class Config:
return default
def __getitem__(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return getattr(self, name)
def __setitem__(self, name, value):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
setattr(self, name, value)
def __delitem__(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
delattr(self, name)
def __contains__(self, name):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return name in self.values
def __iter__(self):
@@ -309,14 +308,14 @@ class Config:
yield ConfigValue(name, getattr(self, name), value[1])
def add(self, name, default, rebuild, types):
- # type: (unicode, Any, Union[bool, unicode], Any) -> None
+ # type: (str, Any, Union[bool, str], Any) -> None
if name in self.values:
raise ExtensionError(__('Config value %r already present') % name)
else:
self.values[name] = (default, rebuild, types)
def filter(self, rebuild):
- # type: (Union[unicode, List[unicode]]) -> Iterator[ConfigValue]
+ # type: (Union[str, List[str]]) -> Iterator[ConfigValue]
if isinstance(rebuild, str):
rebuild = [rebuild]
return (value for value in self if value.rebuild in rebuild)
@@ -351,9 +350,9 @@ class Config:
def eval_config_file(filename, tags):
- # type: (unicode, Tags) -> Dict[unicode, Any]
+ # type: (str, Tags) -> Dict[str, Any]
"""Evaluate a config file."""
- namespace = {} # type: Dict[unicode, Any]
+ namespace = {} # type: Dict[str, Any]
namespace['__file__'] = filename
namespace['tags'] = tags
@@ -510,7 +509,7 @@ def check_primary_domain(app, config):
def check_master_doc(app, env, added, changed, removed):
- # type: (Sphinx, BuildEnvironment, Set[unicode], Set[unicode], Set[unicode]) -> Set[unicode] # NOQA
+ # type: (Sphinx, BuildEnvironment, Set[str], Set[str], Set[str]) -> Set[str]
"""Adjust master_doc to 'contents' to support an old project which does not have
no master_doc setting.
"""
@@ -525,7 +524,7 @@ def check_master_doc(app, env, added, changed, removed):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.connect('config-inited', convert_source_suffix)
app.connect('config-inited', init_numfig_format)
app.connect('config-inited', correct_copyright_year)
diff --git a/sphinx/deprecation.py b/sphinx/deprecation.py
index ebf053e7b..ce84f56f5 100644
--- a/sphinx/deprecation.py
+++ b/sphinx/deprecation.py
@@ -13,9 +13,7 @@ import warnings
if False:
# For type annotation
- # note: Don't use typing.TYPE_CHECK here (for py27 and py34).
from typing import Any, Dict, Type # NOQA
- from sphinx.util.typing import unicode # NOQA
class RemovedInSphinx30Warning(PendingDeprecationWarning):
@@ -39,22 +37,22 @@ class DeprecatedDict(dict):
super(DeprecatedDict, self).__init__(data)
def __setitem__(self, key, value):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
warnings.warn(self.message, self.warning, stacklevel=2)
super(DeprecatedDict, self).__setitem__(key, value)
def setdefault(self, key, default=None):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
warnings.warn(self.message, self.warning, stacklevel=2)
return super(DeprecatedDict, self).setdefault(key, default)
def __getitem__(self, key):
- # type: (unicode) -> None
+ # type: (str) -> None
warnings.warn(self.message, self.warning, stacklevel=2)
return super(DeprecatedDict, self).__getitem__(key)
def get(self, key, default=None):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
warnings.warn(self.message, self.warning, stacklevel=2)
return super(DeprecatedDict, self).get(key, default)
diff --git a/sphinx/directives/__init__.py b/sphinx/directives/__init__.py
index f42e2cdf9..ae3f06752 100644
--- a/sphinx/directives/__init__.py
+++ b/sphinx/directives/__init__.py
@@ -39,7 +39,7 @@ if False:
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.docfields import Field # NOQA
- from sphinx.util.typing import DirectiveOption, unicode # NOQA
+ from sphinx.util.typing import DirectiveOption # NOQA
# RE to strip backslash escapes
@@ -64,12 +64,12 @@ class ObjectDescription(SphinxDirective):
# types of doc fields that this directive handles, see sphinx.util.docfields
doc_field_types = [] # type: List[Field]
- domain = None # type: unicode
- objtype = None # type: unicode
+ domain = None # type: str
+ objtype = None # type: str
indexnode = None # type: addnodes.index
def get_signatures(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""
Retrieve the signatures to document from the directive arguments. By
default, signatures are given as arguments, one per line.
@@ -81,7 +81,7 @@ class ObjectDescription(SphinxDirective):
return [strip_backslash_re.sub(r'\1', line.strip()) for line in lines]
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> Any
+ # type: (str, addnodes.desc_signature) -> Any
"""
Parse the signature *sig* into individual nodes and append them to
*signode*. If ValueError is raised, parsing is aborted and the whole
@@ -94,7 +94,7 @@ class ObjectDescription(SphinxDirective):
raise ValueError
def add_target_and_index(self, name, sig, signode):
- # type: (Any, unicode, addnodes.desc_signature) -> None
+ # type: (Any, str, addnodes.desc_signature) -> None
"""
Add cross-reference IDs and entries to self.indexnode, if applicable.
@@ -150,7 +150,7 @@ class ObjectDescription(SphinxDirective):
node['objtype'] = node['desctype'] = self.objtype
node['noindex'] = noindex = ('noindex' in self.options)
- self.names = [] # type: List[unicode]
+ self.names = [] # type: List[str]
signatures = self.get_signatures()
for i, sig in enumerate(signatures):
# add a signature node for each signature in the current unit
@@ -246,7 +246,7 @@ class DefaultDomain(SphinxDirective):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
directives.register_directive('default-role', DefaultRole)
directives.register_directive('default-domain', DefaultDomain)
directives.register_directive('describe', ObjectDescription)
diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py
index 914d5d4b0..05dafb623 100644
--- a/sphinx/directives/code.py
+++ b/sphinx/directives/code.py
@@ -29,7 +29,6 @@ if False:
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -67,7 +66,7 @@ class HighlightLang(Highlight):
def dedent_lines(lines, dedent, location=None):
- # type: (List[unicode], int, Any) -> List[unicode]
+ # type: (List[str], int, Any) -> List[str]
if not dedent:
return lines
@@ -85,7 +84,7 @@ def dedent_lines(lines, dedent, location=None):
def container_wrapper(directive, literal_node, caption):
- # type: (SphinxDirective, nodes.Node, unicode) -> nodes.container
+ # type: (SphinxDirective, nodes.Node, str) -> nodes.container
container_node = nodes.container('', literal_block=True,
classes=['literal-block-wrapper'])
parsed = nodes.Element()
@@ -198,7 +197,7 @@ class LiteralIncludeReader:
]
def __init__(self, filename, options, config):
- # type: (unicode, Dict, Config) -> None
+ # type: (str, Dict, Config) -> None
self.filename = filename
self.options = options
self.encoding = options.get('encoding', config.source_encoding)
@@ -214,10 +213,10 @@ class LiteralIncludeReader:
(option1, option2))
def read_file(self, filename, location=None):
- # type: (unicode, Any) -> List[unicode]
+ # type: (str, Any) -> List[str]
try:
with open(filename, encoding=self.encoding, errors='strict') as f:
- text = f.read() # type: unicode
+ text = f.read()
if 'tab-width' in self.options:
text = text.expandtabs(self.options['tab-width'])
@@ -230,7 +229,7 @@ class LiteralIncludeReader:
(self.encoding, filename))
def read(self, location=None):
- # type: (Any) -> Tuple[unicode, int]
+ # type: (Any) -> Tuple[str, int]
if 'diff' in self.options:
lines = self.show_diff()
else:
@@ -248,7 +247,7 @@ class LiteralIncludeReader:
return ''.join(lines), len(lines)
def show_diff(self, location=None):
- # type: (Any) -> List[unicode]
+ # type: (Any) -> List[str]
new_lines = self.read_file(self.filename)
old_filename = self.options.get('diff')
old_lines = self.read_file(old_filename)
@@ -256,7 +255,7 @@ class LiteralIncludeReader:
return list(diff)
def pyobject_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Any) -> List[str]
pyobject = self.options.get('pyobject')
if pyobject:
from sphinx.pycode import ModuleAnalyzer
@@ -275,7 +274,7 @@ class LiteralIncludeReader:
return lines
def lines_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Any) -> List[str]
linespec = self.options.get('lines')
if linespec:
linelist = parselinenos(linespec, len(lines))
@@ -300,7 +299,7 @@ class LiteralIncludeReader:
return lines
def start_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Any) -> List[str]
if 'start-at' in self.options:
start = self.options.get('start-at')
inclusive = False
@@ -332,7 +331,7 @@ class LiteralIncludeReader:
return lines
def end_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Any) -> List[str]
if 'end-at' in self.options:
end = self.options.get('end-at')
inclusive = True
@@ -360,7 +359,7 @@ class LiteralIncludeReader:
return lines
def prepend_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Any) -> List[str]
prepend = self.options.get('prepend')
if prepend:
lines.insert(0, prepend + '\n')
@@ -368,7 +367,7 @@ class LiteralIncludeReader:
return lines
def append_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Any) -> List[str]
append = self.options.get('append')
if append:
lines.append(append + '\n')
@@ -376,7 +375,7 @@ class LiteralIncludeReader:
return lines
def dedent_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Any) -> List[str]
if 'dedent' in self.options:
return dedent_lines(lines, self.options.get('dedent'), location=location)
else:
@@ -470,7 +469,7 @@ class LiteralInclude(SphinxDirective):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
directives.register_directive('highlight', Highlight)
directives.register_directive('highlightlang', HighlightLang)
directives.register_directive('code-block', CodeBlock)
diff --git a/sphinx/directives/other.py b/sphinx/directives/other.py
index f5c0cccda..89fdd4df5 100644
--- a/sphinx/directives/other.py
+++ b/sphinx/directives/other.py
@@ -29,14 +29,13 @@ if False:
# For type annotation
from typing import Any, Dict, Generator, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
glob_re = re.compile(r'.*[*?\[].*')
def int_or_nothing(argument):
- # type: (unicode) -> int
+ # type: (str) -> int
if not argument:
return 999
return int(argument)
@@ -396,7 +395,7 @@ class Include(BaseInclude, SphinxDirective):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
directives.register_directive('toctree', TocTree)
directives.register_directive('sectionauthor', Author)
directives.register_directive('moduleauthor', Author)
diff --git a/sphinx/directives/patches.py b/sphinx/directives/patches.py
index 1b9d52ac7..7992d30d9 100644
--- a/sphinx/directives/patches.py
+++ b/sphinx/directives/patches.py
@@ -22,7 +22,6 @@ if False:
# For type annotation
from typing import Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
class Figure(images.Figure):
diff --git a/sphinx/domains/__init__.py b/sphinx/domains/__init__.py
index 0b7f9c2cb..5c7965d40 100644
--- a/sphinx/domains/__init__.py
+++ b/sphinx/domains/__init__.py
@@ -25,7 +25,7 @@ if False:
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.roles import XRefRole # NOQA
- from sphinx.util.typing import RoleFunction, unicode # NOQA
+ from sphinx.util.typing import RoleFunction # NOQA
class ObjType:
@@ -48,8 +48,8 @@ class ObjType:
}
def __init__(self, lname, *roles, **attrs):
- # type: (unicode, Any, Any) -> None
- self.lname = lname # type: unicode
+ # type: (str, Any, Any) -> None
+ self.lname = lname
self.roles = roles # type: Tuple
self.attrs = self.known_attrs.copy() # type: Dict
self.attrs.update(attrs)
@@ -79,9 +79,9 @@ class Index:
domains using :meth:`~sphinx.application.Sphinx.add_index_to_domain()`.
"""
- name = None # type: unicode
- localname = None # type: unicode
- shortname = None # type: unicode
+ name = None # type: str
+ localname = None # type: str
+ shortname = None # type: str
def __init__(self, domain):
# type: (Domain) -> None
@@ -91,7 +91,7 @@ class Index:
self.domain = domain
def generate(self, docnames=None):
- # type: (Iterable[unicode]) -> Tuple[List[Tuple[unicode, List[IndexEntry]]], bool]
+ # type: (Iterable[str]) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]
"""Return entries for the index given by *name*. If *docnames* is
given, restrict to entries referring to these docnames.
@@ -150,17 +150,17 @@ class Domain:
#: domain label: longer, more descriptive (used in messages)
label = ''
#: type (usually directive) name -> ObjType instance
- object_types = {} # type: Dict[unicode, ObjType]
+ object_types = {} # type: Dict[str, ObjType]
#: directive name -> directive class
- directives = {} # type: Dict[unicode, Any]
+ directives = {} # type: Dict[str, Any]
#: role name -> role callable
- roles = {} # type: Dict[unicode, Union[RoleFunction, XRefRole]]
+ roles = {} # type: Dict[str, Union[RoleFunction, XRefRole]]
#: a list of Index subclasses
indices = [] # type: List[Type[Index]]
#: role name -> a warning message if reference is missing
- dangling_warnings = {} # type: Dict[unicode, unicode]
+ dangling_warnings = {} # type: Dict[str, str]
#: node_class -> (enum_node_type, title_getter)
- enumerable_nodes = {} # type: Dict[Type[nodes.Node], Tuple[unicode, Callable]]
+ enumerable_nodes = {} # type: Dict[Type[nodes.Node], Tuple[str, Callable]]
#: data value for a fresh environment
initial_data = {} # type: Dict
@@ -172,10 +172,10 @@ class Domain:
def __init__(self, env):
# type: (BuildEnvironment) -> None
self.env = env # type: BuildEnvironment
- self._role_cache = {} # type: Dict[unicode, Callable]
- self._directive_cache = {} # type: Dict[unicode, Callable]
- self._role2type = {} # type: Dict[unicode, List[unicode]]
- self._type2role = {} # type: Dict[unicode, unicode]
+ self._role_cache = {} # type: Dict[str, Callable]
+ self._directive_cache = {} # type: Dict[str, Callable]
+ self._role2type = {} # type: Dict[str, List[str]]
+ self._type2role = {} # type: Dict[str, str]
# convert class variables to instance one (to enhance through API)
self.object_types = dict(self.object_types)
@@ -196,11 +196,11 @@ class Domain:
for rolename in obj.roles:
self._role2type.setdefault(rolename, []).append(name)
self._type2role[name] = obj.roles[0] if obj.roles else ''
- self.objtypes_for_role = self._role2type.get # type: Callable[[unicode], List[unicode]] # NOQA
- self.role_for_objtype = self._type2role.get # type: Callable[[unicode], unicode]
+ self.objtypes_for_role = self._role2type.get # type: Callable[[str], List[str]]
+ self.role_for_objtype = self._type2role.get # type: Callable[[str], str]
def add_object_type(self, name, objtype):
- # type: (unicode, ObjType) -> None
+ # type: (str, ObjType) -> None
"""Add an object type."""
self.object_types[name] = objtype
if objtype.roles:
@@ -212,7 +212,7 @@ class Domain:
self._role2type.setdefault(role, []).append(name)
def role(self, name):
- # type: (unicode) -> RoleFunction
+ # type: (str) -> RoleFunction
"""Return a role adapter function that always gives the registered
role its full name ('domain:name') as the first argument.
"""
@@ -223,14 +223,14 @@ class Domain:
fullname = '%s:%s' % (self.name, name)
def role_adapter(typ, rawtext, text, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
return self.roles[name](fullname, rawtext, text, lineno,
inliner, options, content)
self._role_cache[name] = role_adapter
return role_adapter
def directive(self, name):
- # type: (unicode) -> Callable
+ # type: (str) -> Callable
"""Return a directive adapter class that always gives the registered
directive its full name ('domain:name') as ``self.name``.
"""
@@ -252,12 +252,12 @@ class Domain:
# methods that should be overwritten
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Remove traces of a document in the domain-specific inventories."""
pass
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
"""Merge in data regarding *docnames* from a different domaindata
inventory (coming from a subprocess in parallel builds).
"""
@@ -266,7 +266,7 @@ class Domain:
self.__class__)
def process_doc(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.document) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
"""Process a document after it is read by the environment."""
pass
@@ -284,7 +284,7 @@ class Domain:
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
"""Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
@@ -301,7 +301,7 @@ class Domain:
pass
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
"""Resolve the pending_xref *node* with the given *target*.
The reference comes from an "any" or similar role, which means that we
@@ -318,7 +318,7 @@ class Domain:
raise NotImplementedError
def get_objects(self):
- # type: () -> Iterable[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
+ # type: () -> Iterable[Tuple[str, str, str, str, str, int]]
"""Return an iterable of "object descriptions", which are tuples with
five items:
@@ -338,19 +338,19 @@ class Domain:
return []
def get_type_name(self, type, primary=False):
- # type: (ObjType, bool) -> unicode
+ # type: (ObjType, bool) -> str
"""Return full name for given ObjType."""
if primary:
return type.lname
return _('%s %s') % (self.label, type.lname)
def get_enumerable_node_type(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Node) -> str
"""Get type of enumerable nodes (experimental)."""
enum_node_type, _ = self.enumerable_nodes.get(node.__class__, (None, None))
return enum_node_type
def get_full_qualified_name(self, node):
- # type: (nodes.Element) -> unicode
+ # type: (nodes.Element) -> str
"""Return full qualified name for given node."""
return None
diff --git a/sphinx/domains/c.py b/sphinx/domains/c.py
index 68b5c2065..fe5ef6f12 100644
--- a/sphinx/domains/c.py
+++ b/sphinx/domains/c.py
@@ -28,7 +28,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
# RE to split at word boundaries
@@ -82,7 +81,7 @@ class CObject(ObjectDescription):
))
def _parse_type(self, node, ctype):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
# add cross-ref nodes for all words
for part in [_f for _f in wsplit_re.split(ctype) if _f]:
tnode = nodes.Text(part, part)
@@ -97,7 +96,7 @@ class CObject(ObjectDescription):
node += tnode
def _parse_arglist(self, arglist):
- # type: (unicode) -> Iterator[unicode]
+ # type: (str) -> Iterator[str]
while True:
m = c_funcptr_arg_sig_re.match(arglist)
if m:
@@ -116,7 +115,7 @@ class CObject(ObjectDescription):
break
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> unicode
+ # type: (str, addnodes.desc_signature) -> str
"""Transform a C signature into RST nodes."""
# first try the function pointer signature regex, it's more specific
m = c_funcptr_sig_re.match(sig)
@@ -186,7 +185,7 @@ class CObject(ObjectDescription):
return fullname
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if self.objtype == 'function':
return _('%s (C function)') % name
elif self.objtype == 'member':
@@ -201,7 +200,7 @@ class CObject(ObjectDescription):
return ''
def add_target_and_index(self, name, sig, signode):
- # type: (unicode, unicode, addnodes.desc_signature) -> None
+ # type: (str, str, addnodes.desc_signature) -> None
# for C API items we add a prefix since names are usually not qualified
# by a module name and so easily clash with e.g. section titles
targetname = 'c.' + name
@@ -239,7 +238,7 @@ class CObject(ObjectDescription):
class CXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.Element, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
if not has_explicit_title:
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
@@ -280,16 +279,16 @@ class CDomain(Domain):
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
- } # type: Dict[unicode, Dict[unicode, Tuple[unicode, Any]]]
+ } # type: Dict[str, Dict[str, Tuple[str, Any]]]
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
# XXX check duplicates
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
@@ -297,7 +296,7 @@ class CDomain(Domain):
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# strip pointer asterisk
target = target.rstrip(' *')
# becase TypedField can generate xrefs
@@ -311,7 +310,7 @@ class CDomain(Domain):
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
# strip pointer asterisk
target = target.rstrip(' *')
if target not in self.data['objects']:
@@ -322,13 +321,13 @@ class CDomain(Domain):
contnode, target))]
def get_objects(self):
- # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
+ # type: () -> Iterator[Tuple[str, str, str, str, str, int]]
for refname, (docname, type) in list(self.data['objects'].items()):
yield (refname, refname, type, docname, 'c.' + refname, 1)
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(CDomain)
return {
diff --git a/sphinx/domains/changeset.py b/sphinx/domains/changeset.py
index 8d1bb26cf..1c38ee2af 100644
--- a/sphinx/domains/changeset.py
+++ b/sphinx/domains/changeset.py
@@ -27,14 +27,13 @@ if False:
from typing import Any, Dict, List # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
versionlabels = {
'versionadded': _('New in version %s'),
'versionchanged': _('Changed in version %s'),
'deprecated': _('Deprecated since version %s'),
-} # type: Dict[unicode, unicode]
+}
locale.versionlabels = DeprecatedDict(
versionlabels,
@@ -116,14 +115,14 @@ class ChangeSetDomain(Domain):
} # type: Dict
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for version, changes in self.data['changes'].items():
for changeset in changes[:]:
if changeset.docname == docname:
changes.remove(changeset)
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
# XXX duplicates?
for version, otherchanges in otherdata['changes'].items():
changes = self.data['changes'].setdefault(version, [])
@@ -132,7 +131,7 @@ class ChangeSetDomain(Domain):
changes.append(changeset)
def process_doc(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.document) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
pass # nothing to do here. All changesets are registered on calling directive.
def note_changeset(self, node):
@@ -145,12 +144,12 @@ class ChangeSetDomain(Domain):
self.data['changes'].setdefault(version, []).append(changeset)
def get_changesets_for(self, version):
- # type: (unicode) -> List[ChangeSet]
+ # type: (str) -> List[ChangeSet]
return self.data['changes'].get(version, [])
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(ChangeSetDomain)
app.add_directive('deprecated', VersionChange)
app.add_directive('versionadded', VersionChange)
diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py
index 262d3f6d7..8269258b9 100644
--- a/sphinx/domains/cpp.py
+++ b/sphinx/domains/cpp.py
@@ -36,7 +36,6 @@ if False:
from sphinx.builders import Builder # NOQA
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -372,7 +371,7 @@ _id_fundamental_v1 = {
'signed long': 'l',
'unsigned long': 'L',
'bool': 'b'
-} # type: Dict[unicode, unicode]
+}
_id_shorthands_v1 = {
'std::string': 'ss',
'std::ostream': 'os',
@@ -380,7 +379,7 @@ _id_shorthands_v1 = {
'std::iostream': 'ios',
'std::vector': 'v',
'std::map': 'm'
-} # type: Dict[unicode, unicode]
+}
_id_operator_v1 = {
'new': 'new-operator',
'new[]': 'new-array-operator',
@@ -429,7 +428,7 @@ _id_operator_v1 = {
'->': 'pointer-operator',
'()': 'call-operator',
'[]': 'subscript-operator'
-} # type: Dict[unicode, unicode]
+}
# ------------------------------------------------------------------------------
# Id v > 1 constants
@@ -474,7 +473,7 @@ _id_fundamental_v2 = {
'auto': 'Da',
'decltype(auto)': 'Dc',
'std::nullptr_t': 'Dn'
-} # type: Dict[unicode, unicode]
+}
_id_operator_v2 = {
'new': 'nw',
'new[]': 'na',
@@ -525,7 +524,7 @@ _id_operator_v2 = {
'()': 'cl',
'[]': 'ix',
'.*': 'ds' # this one is not overloadable, but we need it for expressions
-} # type: Dict[unicode, unicode]
+}
_id_operator_unary_v2 = {
'++': 'pp_',
'--': 'mm_',
@@ -539,7 +538,7 @@ _id_operator_unary_v2 = {
_id_char_from_prefix = {
None: 'c', 'u8': 'c',
'u': 'Ds', 'U': 'Di', 'L': 'w'
-} # type: Dict[unicode, unicode]
+} # type: Dict[Any, str]
# these are ordered by preceedence
_expression_bin_ops = [
['||'],
@@ -568,21 +567,21 @@ _id_explicit_cast = {
class NoOldIdError(UnicodeMixin, Exception):
# Used to avoid implementing unneeded id generation for old id schmes.
def __init__(self, description=""):
- # type: (unicode) -> None
+ # type: (str) -> None
self.description = description
def __unicode__(self):
- # type: () -> unicode
+ # type: () -> str
return self.description
class DefinitionError(UnicodeMixin, Exception):
def __init__(self, description):
- # type: (unicode) -> None
+ # type: (str) -> None
self.description = description
def __unicode__(self):
- # type: () -> unicode
+ # type: () -> str
return self.description
@@ -595,7 +594,7 @@ class _DuplicateSymbolError(UnicodeMixin, Exception):
self.declaration = declaration
def __unicode__(self):
- # type: () -> unicode
+ # type: () -> str
return "Internal C++ duplicate symbol error:\n%s" % self.symbol.dump(0)
@@ -620,15 +619,15 @@ class ASTBase(UnicodeMixin):
return deepcopy(self)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
raise NotImplementedError(repr(self))
def __unicode__(self):
- # type: () -> unicode
+ # type: () -> str
return self._stringify(lambda ast: text_type(ast))
def get_display_string(self):
- # type: () -> unicode
+ # type: () -> str
return self._stringify(lambda ast: ast.get_display_string())
def __repr__(self):
@@ -637,7 +636,7 @@ class ASTBase(UnicodeMixin):
def _verify_description_mode(mode):
- # type: (unicode) -> None
+ # type: (str) -> None
if mode not in ('lastIsName', 'noneIsName', 'markType', 'param'):
raise Exception("Description mode '%s' is invalid." % mode)
@@ -648,7 +647,7 @@ def _verify_description_mode(mode):
class ASTCPPAttribute(ASTBase):
def __init__(self, arg):
- # type: (unicode) -> None
+ # type: (str) -> None
self.arg = arg
def _stringify(self, transform):
@@ -662,13 +661,13 @@ class ASTCPPAttribute(ASTBase):
class ASTGnuAttribute(ASTBase):
def __init__(self, name, args):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
self.name = name
self.args = args
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = [self.name] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = [self.name]
if self.args:
res.append('(')
res.append(transform(self.args))
@@ -682,8 +681,8 @@ class ASTGnuAttributeList(ASTBase):
self.attrs = attrs
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = ['__attribute__(('] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = ['__attribute__((']
first = True
for attr in self.attrs:
if not first:
@@ -703,11 +702,11 @@ class ASTIdAttribute(ASTBase):
"""For simple attributes defined by the user."""
def __init__(self, id):
- # type: (unicode) -> None
+ # type: (str) -> None
self.id = id
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return self.id
def describe_signature(self, signode):
@@ -719,12 +718,12 @@ class ASTParenAttribute(ASTBase):
"""For paren attributes defined by the user."""
def __init__(self, id, arg):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
self.id = id
self.arg = arg
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return self.id + '(' + self.arg + ')'
def describe_signature(self, signode):
@@ -739,11 +738,11 @@ class ASTParenAttribute(ASTBase):
class ASTPointerLiteral(ASTBase):
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return u'nullptr'
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return 'LDnE'
def describe_signature(self, signode, mode, env, symbol):
@@ -755,14 +754,14 @@ class ASTBooleanLiteral(ASTBase):
self.value = value
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
if self.value:
return u'true'
else:
return u'false'
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if self.value:
return 'L1E'
else:
@@ -774,15 +773,15 @@ class ASTBooleanLiteral(ASTBase):
class ASTNumberLiteral(ASTBase):
def __init__(self, data):
- # type: (unicode) -> None
+ # type: (str) -> None
self.data = data
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return self.data
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return "L%sE" % self.data
def describe_signature(self, signode, mode, env, symbol):
@@ -797,7 +796,7 @@ class UnsupportedMultiCharacterCharLiteral(UnicodeMixin, Exception):
class ASTCharLiteral(ASTBase):
def __init__(self, prefix, data):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
self.prefix = prefix # may be None when no prefix
self.data = data
assert prefix in _id_char_from_prefix
@@ -809,14 +808,14 @@ class ASTCharLiteral(ASTBase):
raise UnsupportedMultiCharacterCharLiteral(decoded)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
if self.prefix is None:
return "'" + self.data + "'"
else:
return self.prefix + "'" + self.data + "'"
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.type + str(self.value)
def describe_signature(self, signode, mode, env, symbol):
@@ -826,15 +825,15 @@ class ASTCharLiteral(ASTBase):
class ASTStringLiteral(ASTBase):
def __init__(self, data):
- # type: (unicode) -> None
+ # type: (str) -> None
self.data = data
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return self.data
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
# note: the length is not really correct with escaping
return "LA%d_KcE" % (len(self.data) - 2)
@@ -845,11 +844,11 @@ class ASTStringLiteral(ASTBase):
class ASTThisLiteral(ASTBase):
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return "this"
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return "fpT"
def describe_signature(self, signode, mode, env, symbol):
@@ -861,11 +860,11 @@ class ASTParenExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return '(' + transform(self.expr) + ')'
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.expr.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -882,7 +881,7 @@ class ASTFoldExpr(ASTBase):
self.rightExpr = rightExpr
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = [u'(']
if self.leftExpr:
res.append(transform(self.leftExpr))
@@ -899,7 +898,7 @@ class ASTFoldExpr(ASTBase):
return u''.join(res)
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
assert version >= 3
if version == 3:
return text_type(self)
@@ -930,7 +929,7 @@ class ASTBinOpExpr(ASTBase):
self.ops = ops
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.exprs[0]))
for i in range(1, len(self.exprs)):
@@ -941,7 +940,7 @@ class ASTBinOpExpr(ASTBase):
return u''.join(res)
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
assert version >= 2
res = []
for i in range(len(self.ops)):
@@ -967,7 +966,7 @@ class ASTAssignmentExpr(ASTBase):
self.ops = ops
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.exprs[0]))
for i in range(1, len(self.exprs)):
@@ -978,7 +977,7 @@ class ASTAssignmentExpr(ASTBase):
return u''.join(res)
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
res = []
for i in range(len(self.ops)):
res.append(_id_operator_v2[self.ops[i]])
@@ -1001,7 +1000,7 @@ class ASTCastExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = [u'(']
res.append(transform(self.typ))
res.append(u')')
@@ -1009,7 +1008,7 @@ class ASTCastExpr(ASTBase):
return u''.join(res)
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return 'cv' + self.typ.get_id(version) + self.expr.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1025,11 +1024,11 @@ class ASTUnaryOpExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return transform(self.op) + transform(self.expr)
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return _id_operator_unary_v2[self.op] + self.expr.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1042,11 +1041,11 @@ class ASTSizeofParamPack(ASTBase):
self.identifier = identifier
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return "sizeof...(" + transform(self.identifier) + ")"
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return 'sZ' + self.identifier.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1061,11 +1060,11 @@ class ASTSizeofType(ASTBase):
self.typ = typ
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return "sizeof(" + transform(self.typ) + ")"
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return 'st' + self.typ.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1079,11 +1078,11 @@ class ASTSizeofExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return "sizeof " + transform(self.expr)
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return 'sz' + self.expr.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1096,11 +1095,11 @@ class ASTAlignofExpr(ASTBase):
self.typ = typ
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return "alignof(" + transform(self.typ) + ")"
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return 'at' + self.typ.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1114,11 +1113,11 @@ class ASTNoexceptExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return "noexcept(" + transform(self.expr) + ")"
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return 'nx' + self.expr.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1129,7 +1128,7 @@ class ASTNoexceptExpr(ASTBase):
class ASTNewExpr(ASTBase):
def __init__(self, rooted, isNewTypeId, typ, initList, initType):
- # type: (bool, bool, ASTType, List[Any], unicode) -> None
+ # type: (bool, bool, ASTType, List[Any], str) -> None
self.rooted = rooted
self.isNewTypeId = isNewTypeId
self.typ = typ
@@ -1139,8 +1138,8 @@ class ASTNewExpr(ASTBase):
assert self.initType in ')}'
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.rooted:
res.append('::')
res.append('new ')
@@ -1162,9 +1161,9 @@ class ASTNewExpr(ASTBase):
return u''.join(res)
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
# the array part will be in the type mangling, so na is not used
- res = ['nw'] # type: List[unicode]
+ res = ['nw']
# TODO: placement
res.append('_')
res.append(self.typ.get_id(version))
@@ -1210,8 +1209,8 @@ class ASTDeleteExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.rooted:
res.append('::')
res.append('delete ')
@@ -1221,7 +1220,7 @@ class ASTDeleteExpr(ASTBase):
return u''.join(res)
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if self.array:
id = "da"
else:
@@ -1245,7 +1244,7 @@ class ASTExplicitCast(ASTBase):
self.expr = expr
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = [self.cast]
res.append('<')
res.append(transform(self.typ))
@@ -1255,7 +1254,7 @@ class ASTExplicitCast(ASTBase):
return u''.join(res)
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return (_id_explicit_cast[self.cast] +
self.typ.get_id(version) +
self.expr.get_id(version))
@@ -1276,11 +1275,11 @@ class ASTTypeId(ASTBase):
self.isType = isType
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return 'typeid(' + transform(self.typeOrExpr) + ')'
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
prefix = 'ti' if self.isType else 'te'
return prefix + self.typeOrExpr.get_id(version)
@@ -1296,7 +1295,7 @@ class ASTPostfixCallExpr(ASTBase):
self.exprs = exprs
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = [u'(']
first = True
for e in self.exprs:
@@ -1308,7 +1307,7 @@ class ASTPostfixCallExpr(ASTBase):
return u''.join(res)
def get_id(self, idPrefix, version):
- # type: (unicode, int) -> unicode
+ # type: (str, int) -> str
res = ['cl', idPrefix]
for e in self.exprs:
res.append(e.get_id(version))
@@ -1331,11 +1330,11 @@ class ASTPostfixArray(ASTBase):
self.expr = expr
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return u'[' + transform(self.expr) + ']'
def get_id(self, idPrefix, version):
- # type: (unicode, int) -> unicode
+ # type: (str, int) -> str
return 'ix' + idPrefix + self.expr.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1346,11 +1345,11 @@ class ASTPostfixArray(ASTBase):
class ASTPostfixInc(ASTBase):
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return u'++'
def get_id(self, idPrefix, version):
- # type: (unicode, int) -> unicode
+ # type: (str, int) -> str
return 'pp' + idPrefix
def describe_signature(self, signode, mode, env, symbol):
@@ -1359,11 +1358,11 @@ class ASTPostfixInc(ASTBase):
class ASTPostfixDec(ASTBase):
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return u'--'
def get_id(self, idPrefix, version):
- # type: (unicode, int) -> unicode
+ # type: (str, int) -> str
return 'mm' + idPrefix
def describe_signature(self, signode, mode, env, symbol):
@@ -1375,11 +1374,11 @@ class ASTPostfixMember(ASTBase):
self.name = name
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return u'.' + transform(self.name)
def get_id(self, idPrefix, version):
- # type: (unicode, int) -> unicode
+ # type: (str, int) -> str
return 'dt' + idPrefix + self.name.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1392,11 +1391,11 @@ class ASTPostfixMemberOfPointer(ASTBase):
self.name = name
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return u'->' + transform(self.name)
def get_id(self, idPrefix, version):
- # type: (unicode, int) -> unicode
+ # type: (str, int) -> str
return 'pt' + idPrefix + self.name.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1411,14 +1410,14 @@ class ASTPostfixExpr(ASTBase):
self.postFixes = postFixes
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = [transform(self.prefix)]
for p in self.postFixes:
res.append(transform(p))
return u''.join(res)
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
id = self.prefix.get_id(version)
for p in self.postFixes:
id = p.get_id(id, version)
@@ -1435,11 +1434,11 @@ class ASTPackExpansionExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return transform(self.expr) + '...'
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
id = self.expr.get_id(version)
return 'sp' + id
@@ -1453,11 +1452,11 @@ class ASTFallbackExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return self.expr
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return text_type(self.expr)
def describe_signature(self, signode, mode, env, symbol):
@@ -1470,7 +1469,7 @@ class ASTFallbackExpr(ASTBase):
class ASTIdentifier(ASTBase):
def __init__(self, identifier):
- # type: (unicode) -> None
+ # type: (str) -> None
assert identifier is not None
assert len(identifier) != 0
self.identifier = identifier
@@ -1479,7 +1478,7 @@ class ASTIdentifier(ASTBase):
return self.identifier[0] == '@'
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if self.is_anon() and version < 3:
raise NoOldIdError()
if version == 1:
@@ -1501,15 +1500,15 @@ class ASTIdentifier(ASTBase):
# and this is where we finally make a difference between __unicode__ and the display string
def __unicode__(self):
- # type: () -> unicode
+ # type: () -> str
return self.identifier
def get_display_string(self):
- # type: () -> unicode
+ # type: () -> str
return u"[anonymous]" if self.is_anon() else self.identifier
def describe_signature(self, signode, mode, env, prefix, templateArgs, symbol):
- # type: (Any, unicode, BuildEnvironment, unicode, unicode, Symbol) -> None
+ # type: (Any, str, BuildEnvironment, str, str, Symbol) -> None
_verify_description_mode(mode)
if mode == 'markType':
targetText = prefix + self.identifier + templateArgs
@@ -1540,7 +1539,7 @@ class ASTIdentifier(ASTBase):
class ASTTemplateKeyParamPackIdDefault(ASTBase):
def __init__(self, key, identifier, parameterPack, default):
- # type: (unicode, ASTIdentifier, bool, ASTType) -> None
+ # type: (str, ASTIdentifier, bool, ASTType) -> None
assert key
if parameterPack:
assert default is None
@@ -1554,7 +1553,7 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase):
return self.identifier
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
assert version >= 2
# this is not part of the normal name mangling in C++
res = []
@@ -1565,8 +1564,8 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase):
return ''.join(res)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = [self.key] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = [self.key]
if self.parameterPack:
if self.identifier:
res.append(' ')
@@ -1581,7 +1580,7 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase):
return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
signode += nodes.Text(self.key)
if self.parameterPack:
if self.identifier:
@@ -1618,7 +1617,7 @@ class ASTTemplateParamType(ASTBase):
return self.data.get_identifier()
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
# this is not part of the normal name mangling in C++
assert version >= 2
if symbol:
@@ -1628,11 +1627,11 @@ class ASTTemplateParamType(ASTBase):
return self.data.get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return transform(self.data)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
self.data.describe_signature(signode, mode, env, symbol)
@@ -1654,7 +1653,7 @@ class ASTTemplateParamConstrainedTypeWithInit(ASTBase):
return self.type.isPack
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
# this is not part of the normal name mangling in C++
assert version >= 2
if symbol:
@@ -1664,7 +1663,7 @@ class ASTTemplateParamConstrainedTypeWithInit(ASTBase):
return self.type.get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = transform(self.type)
if self.init:
res += " = "
@@ -1672,7 +1671,7 @@ class ASTTemplateParamConstrainedTypeWithInit(ASTBase):
return res
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
self.type.describe_signature(signode, mode, env, symbol)
if self.init:
signode += nodes.Text(" = ")
@@ -1703,7 +1702,7 @@ class ASTTemplateParamTemplateType(ASTBase):
return self.data.get_identifier()
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
assert version >= 2
# this is not part of the normal name mangling in C++
if symbol:
@@ -1713,11 +1712,11 @@ class ASTTemplateParamTemplateType(ASTBase):
return self.nestedParams.get_id(version) + self.data.get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return transform(self.nestedParams) + transform(self.data)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
self.nestedParams.describe_signature(signode, 'noneIsName', env, symbol)
signode += nodes.Text(' ')
self.data.describe_signature(signode, mode, env, symbol)
@@ -1752,7 +1751,7 @@ class ASTTemplateParamNonType(ASTBase):
return None
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
assert version >= 2
# this is not part of the normal name mangling in C++
if symbol:
@@ -1762,11 +1761,11 @@ class ASTTemplateParamNonType(ASTBase):
return '_' + self.param.get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return transform(self.param)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
self.param.describe_signature(signode, mode, env, symbol)
@@ -1778,7 +1777,7 @@ class ASTTemplateParams(ASTBase):
self.isNested = False # whether it's a template template param
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
assert version >= 2
res = []
res.append("I")
@@ -1788,7 +1787,7 @@ class ASTTemplateParams(ASTBase):
return ''.join(res)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(u"template<")
res.append(u", ".join(transform(a) for a in self.params))
@@ -1796,7 +1795,7 @@ class ASTTemplateParams(ASTBase):
return ''.join(res)
def describe_signature(self, parentNode, mode, env, symbol, lineSpec=None):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol, bool) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol, bool) -> None
# 'lineSpec' is defaulted becuase of template template parameters
def makeLine(parentNode=parentNode):
signode = addnodes.desc_signature_line()
@@ -1843,7 +1842,7 @@ class ASTTemplateIntroductionParameter(ASTBase):
return self.identifier
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
assert version >= 2
# this is not part of the normal name mangling in C++
if symbol:
@@ -1856,7 +1855,7 @@ class ASTTemplateIntroductionParameter(ASTBase):
return '0' # we need to put something
def get_id_as_arg(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
assert version >= 2
# used for the implicit requires clause
res = self.identifier.get_id(version)
@@ -1866,15 +1865,15 @@ class ASTTemplateIntroductionParameter(ASTBase):
return res
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.parameterPack:
res.append('...')
res.append(transform(self.identifier))
return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
if self.parameterPack:
signode += nodes.Text('...')
self.identifier.describe_signature(signode, mode, env, '', '', symbol)
@@ -1888,7 +1887,7 @@ class ASTTemplateIntroduction(ASTBase):
self.params = params
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
assert version >= 2
# first do the same as a normal template parameter list
res = []
@@ -1907,7 +1906,7 @@ class ASTTemplateIntroduction(ASTBase):
return ''.join(res)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.concept))
res.append('{')
@@ -1916,7 +1915,7 @@ class ASTTemplateIntroduction(ASTBase):
return ''.join(res)
def describe_signature(self, parentNode, mode, env, symbol, lineSpec):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol, bool) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol, bool) -> None
# Note: 'lineSpec' has no effect on template introductions.
signode = addnodes.desc_signature_line()
parentNode += signode
@@ -1939,7 +1938,7 @@ class ASTTemplateDeclarationPrefix(ASTBase):
self.templates = templates
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
assert version >= 2
# this is not part of a normal name mangling system
res = []
@@ -1948,14 +1947,14 @@ class ASTTemplateDeclarationPrefix(ASTBase):
return u''.join(res)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = []
for t in self.templates:
res.append(transform(t))
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol, lineSpec):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol, bool) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol, bool) -> None
_verify_description_mode(mode)
for t in self.templates:
t.describe_signature(signode, 'lastIsName', env, symbol, lineSpec)
@@ -1973,11 +1972,11 @@ class ASTOperator(ASTBase):
return True
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
raise NotImplementedError()
def describe_signature(self, signode, mode, env, prefix, templateArgs, symbol):
- # type: (addnodes.desc_signature, unicode, Any, unicode, unicode, Symbol) -> None
+ # type: (addnodes.desc_signature, str, Any, str, str, Symbol) -> None
_verify_description_mode(mode)
identifier = text_type(self)
if mode == 'lastIsName':
@@ -1988,11 +1987,11 @@ class ASTOperator(ASTBase):
class ASTOperatorBuildIn(ASTOperator):
def __init__(self, op):
- # type: (unicode) -> None
+ # type: (str) -> None
self.op = op
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
ids = _id_operator_v1
else:
@@ -2003,7 +2002,7 @@ class ASTOperatorBuildIn(ASTOperator):
return ids[self.op]
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
if self.op in ('new', 'new[]', 'delete', 'delete[]'):
return u'operator ' + self.op
else:
@@ -2016,18 +2015,18 @@ class ASTOperatorType(ASTOperator):
self.type = type
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
return u'castto-%s-operator' % self.type.get_id(version)
else:
return u'cv' + self.type.get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return u''.join(['operator ', transform(self.type)])
def get_name_no_template(self):
- # type: () -> unicode
+ # type: () -> str
return text_type(self)
@@ -2037,14 +2036,14 @@ class ASTOperatorLiteral(ASTOperator):
self.identifier = identifier
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
raise NoOldIdError()
else:
return u'li' + self.identifier.get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return u'operator""' + transform(self.identifier)
@@ -2057,11 +2056,11 @@ class ASTTemplateArgConstant(ASTBase):
self.value = value
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return transform(self.value)
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
return text_type(self).replace(u' ', u'-')
if version == 2:
@@ -2069,7 +2068,7 @@ class ASTTemplateArgConstant(ASTBase):
return u'X' + self.value.get_id(version) + u'E'
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.value.describe_signature(signode, mode, env, symbol)
@@ -2081,9 +2080,9 @@ class ASTTemplateArgs(ASTBase):
self.args = args
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
- res = [] # type: List[unicode]
+ res = []
res.append(':')
res.append(u'.'.join(a.get_id(version) for a in self.args))
res.append(':')
@@ -2097,12 +2096,12 @@ class ASTTemplateArgs(ASTBase):
return u''.join(res)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = ', '.join(transform(a) for a in self.args)
return '<' + res + '>'
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text('<')
first = True
@@ -2125,21 +2124,21 @@ class ASTNestedNameElement(ASTBase):
return False
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
res = self.identOrOp.get_id(version)
if self.templateArgs:
res += self.templateArgs.get_id(version)
return res
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = transform(self.identOrOp)
if self.templateArgs:
res += transform(self.templateArgs)
return res
def describe_signature(self, signode, mode, env, prefix, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, str, Symbol) -> None
tArgs = text_type(self.templateArgs) if self.templateArgs is not None else ''
self.identOrOp.describe_signature(signode, mode, env, prefix, tArgs, symbol)
if self.templateArgs is not None:
@@ -2171,14 +2170,14 @@ class ASTNestedName(ASTBase):
return count
def get_id(self, version, modifiers=''):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
if version == 1:
tt = text_type(self)
if tt in _id_shorthands_v1:
return _id_shorthands_v1[tt]
else:
return u'::'.join(n.get_id(version) for n in self.names)
- res = [] # type: List[unicode]
+ res = []
if len(self.names) > 1 or len(modifiers) > 0:
res.append('N')
res.append(modifiers)
@@ -2189,8 +2188,8 @@ class ASTNestedName(ASTBase):
return u''.join(res)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.rooted:
res.append('')
for i in range(len(self.names)):
@@ -2203,7 +2202,7 @@ class ASTNestedName(ASTBase):
return '::'.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
# just print the name part, with template args, not template params
if mode == 'noneIsName':
@@ -2223,7 +2222,7 @@ class ASTNestedName(ASTBase):
templateParams = symbol.declaration.templatePrefix.templates
iTemplateParams = 0
templateParamsPrefix = u''
- prefix = '' # type: unicode
+ prefix = ''
first = True
names = self.names[:-1] if mode == 'lastIsName' else self.names
# If lastIsName, then wrap all of the prefix in a desc_addname,
@@ -2263,15 +2262,15 @@ class ASTNestedName(ASTBase):
class ASTTrailingTypeSpecFundamental(ASTBase):
def __init__(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
self.name = name
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return self.name
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
res = []
for a in self.name.split(' '):
@@ -2289,13 +2288,13 @@ class ASTTrailingTypeSpecFundamental(ASTBase):
return _id_fundamental_v2[self.name]
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
signode += nodes.Text(text_type(self.name))
class ASTTrailingTypeSpecName(ASTBase):
def __init__(self, prefix, nestedName):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
self.prefix = prefix
self.nestedName = nestedName
@@ -2305,12 +2304,12 @@ class ASTTrailingTypeSpecName(ASTBase):
return self.nestedName
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.nestedName.get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.prefix:
res.append(self.prefix)
res.append(' ')
@@ -2318,7 +2317,7 @@ class ASTTrailingTypeSpecName(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
if self.prefix:
signode += addnodes.desc_annotation(self.prefix, self.prefix)
signode += nodes.Text(' ')
@@ -2327,17 +2326,17 @@ class ASTTrailingTypeSpecName(ASTBase):
class ASTTrailingTypeSpecDecltypeAuto(ASTBase):
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return u'decltype(auto)'
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
raise NoOldIdError()
return 'Dc'
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
signode.append(nodes.Text(text_type(self)))
@@ -2346,17 +2345,17 @@ class ASTTrailingTypeSpecDecltype(ASTBase):
self.expr = expr
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return u'decltype(' + transform(self.expr) + ')'
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
raise NoOldIdError()
return 'DT' + self.expr.get_id(version) + "E"
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
signode.append(nodes.Text('decltype('))
self.expr.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
@@ -2369,7 +2368,7 @@ class ASTFunctionParameter(ASTBase):
self.ellipsis = ellipsis
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
@@ -2381,14 +2380,14 @@ class ASTFunctionParameter(ASTBase):
return self.arg.get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
if self.ellipsis:
return '...'
else:
return transform(self.arg)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
if self.ellipsis:
signode += nodes.Text('...')
@@ -2399,7 +2398,7 @@ class ASTFunctionParameter(ASTBase):
class ASTParametersQualifiers(ASTBase):
def __init__(self, args, volatile, const, refQual, exceptionSpec, override,
final, initializer):
- # type: (List[Any], bool, bool, unicode, unicode, bool, bool, unicode) -> None
+ # type: (List[Any], bool, bool, str, str, bool, bool, str) -> None
self.args = args
self.volatile = volatile
self.const = const
@@ -2415,7 +2414,7 @@ class ASTParametersQualifiers(ASTBase):
return self.args
def get_modifiers_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
res = []
if self.volatile:
res.append('V')
@@ -2431,7 +2430,7 @@ class ASTParametersQualifiers(ASTBase):
return u''.join(res)
def get_param_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
if len(self.args) == 0:
return ''
@@ -2443,8 +2442,8 @@ class ASTParametersQualifiers(ASTBase):
return u''.join(a.get_id(version) for a in self.args)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
res.append('(')
first = True
for a in self.args:
@@ -2473,7 +2472,7 @@ class ASTParametersQualifiers(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
paramlist = addnodes.desc_parameterlist()
for arg in self.args:
@@ -2511,7 +2510,7 @@ class ASTParametersQualifiers(ASTBase):
class ASTDeclSpecsSimple(ASTBase):
def __init__(self, storage, threadLocal, inline, virtual, explicit,
constexpr, volatile, const, friend, attrs):
- # type: (unicode, bool, bool, bool, bool, bool, bool, bool, bool, List[Any]) -> None
+ # type: (str, bool, bool, bool, bool, bool, bool, bool, bool, List[Any]) -> None
self.storage = storage
self.threadLocal = threadLocal
self.inline = inline
@@ -2539,8 +2538,8 @@ class ASTDeclSpecsSimple(ASTBase):
self.attrs + other.attrs)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = [] # type: List[str]
res.extend(transform(attr) for attr in self.attrs)
if self.storage:
res.append(self.storage)
@@ -2608,7 +2607,7 @@ class ASTDeclSpecs(ASTBase):
return self.trailingTypeSpec.name
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
res = []
res.append(self.trailingTypeSpec.get_id(version))
@@ -2626,8 +2625,8 @@ class ASTDeclSpecs(ASTBase):
return u''.join(res)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = [] # type: List[str]
l = transform(self.leftSpecs)
if len(l) > 0:
if len(res) > 0:
@@ -2645,7 +2644,7 @@ class ASTDeclSpecs(ASTBase):
return "".join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
modifiers = [] # type: List[nodes.Node]
@@ -2676,14 +2675,14 @@ class ASTArray(ASTBase):
self.size = size
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
if self.size:
return u'[' + transform(self.size) + ']'
else:
return u'[]'
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
return u'A'
if version == 2:
@@ -2729,8 +2728,8 @@ class ASTDeclaratorPtr(ASTBase):
return True
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = ['*'] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = ['*']
for a in self.attrs:
res.append(transform(a))
if len(self.attrs) > 0 and (self.volatile or self.const):
@@ -2748,15 +2747,15 @@ class ASTDeclaratorPtr(ASTBase):
return u''.join(res)
def get_modifiers_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.next.get_modifiers_id(version)
def get_param_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.next.get_param_id(version)
def get_ptr_suffix_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
res = ['P']
if self.volatile:
@@ -2775,9 +2774,9 @@ class ASTDeclaratorPtr(ASTBase):
return u''.join(res)
def get_type_id(self, version, returnTypeId):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
# ReturnType *next, so we are part of the return type of 'next
- res = ['P'] # type: List[unicode]
+ res = ['P']
if self.volatile:
res.append('V')
if self.const:
@@ -2790,7 +2789,7 @@ class ASTDeclaratorPtr(ASTBase):
return self.next.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text("*")
for a in self.attrs:
@@ -2839,8 +2838,8 @@ class ASTDeclaratorRef(ASTBase):
return self.next.require_space_after_declSpecs()
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = ['&'] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = ['&']
for a in self.attrs:
res.append(transform(a))
if len(self.attrs) > 0 and self.next.require_space_after_declSpecs:
@@ -2849,22 +2848,22 @@ class ASTDeclaratorRef(ASTBase):
return u''.join(res)
def get_modifiers_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.next.get_modifiers_id(version)
def get_param_id(self, version): # only the parameters (if any)
- # type: (int) -> unicode
+ # type: (int) -> str
return self.next.get_param_id(version)
def get_ptr_suffix_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
return u'R' + self.next.get_ptr_suffix_id(version)
else:
return self.next.get_ptr_suffix_id(version) + u'R'
def get_type_id(self, version, returnTypeId):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
assert version >= 2
# ReturnType &next, so we are part of the return type of 'next
return self.next.get_type_id(version, returnTypeId=u'R' + returnTypeId)
@@ -2874,7 +2873,7 @@ class ASTDeclaratorRef(ASTBase):
return self.next.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text("&")
for a in self.attrs:
@@ -2905,29 +2904,29 @@ class ASTDeclaratorParamPack(ASTBase):
return False
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = transform(self.next)
if self.next.name:
res = ' ' + res
return '...' + res
def get_modifiers_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.next.get_modifiers_id(version)
def get_param_id(self, version): # only the parameters (if any)
- # type: (int) -> unicode
+ # type: (int) -> str
return self.next.get_param_id(version)
def get_ptr_suffix_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
return 'Dp' + self.next.get_ptr_suffix_id(version)
else:
return self.next.get_ptr_suffix_id(version) + u'Dp'
def get_type_id(self, version, returnTypeId):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
assert version >= 2
# ReturnType... next, so we are part of the return type of 'next
return self.next.get_type_id(version, returnTypeId=u'Dp' + returnTypeId)
@@ -2937,7 +2936,7 @@ class ASTDeclaratorParamPack(ASTBase):
return self.next.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text("...")
if self.next.name:
@@ -2970,7 +2969,7 @@ class ASTDeclaratorMemPtr(ASTBase):
return True
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.className))
res.append('::*')
@@ -2984,21 +2983,21 @@ class ASTDeclaratorMemPtr(ASTBase):
return ''.join(res)
def get_modifiers_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
raise NoOldIdError()
else:
return self.next.get_modifiers_id(version)
def get_param_id(self, version): # only the parameters (if any)
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
raise NoOldIdError()
else:
return self.next.get_param_id(version)
def get_ptr_suffix_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
raise NoOldIdError()
else:
@@ -3006,10 +3005,10 @@ class ASTDeclaratorMemPtr(ASTBase):
return self.next.get_ptr_suffix_id(version) + u'Dp'
def get_type_id(self, version, returnTypeId):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
assert version >= 2
# ReturnType name::* next, so we are part of the return type of next
- nextReturnTypeId = '' # type: unicode
+ nextReturnTypeId = ''
if self.volatile:
nextReturnTypeId += 'V'
if self.const:
@@ -3024,7 +3023,7 @@ class ASTDeclaratorMemPtr(ASTBase):
return self.next.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.className.describe_signature(signode, mode, env, symbol)
signode += nodes.Text('::*')
@@ -3067,23 +3066,23 @@ class ASTDeclaratorParen(ASTBase):
return True
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = ['('] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = ['(']
res.append(transform(self.inner))
res.append(')')
res.append(transform(self.next))
return ''.join(res)
def get_modifiers_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.inner.get_modifiers_id(version)
def get_param_id(self, version): # only the parameters (if any)
- # type: (int) -> unicode
+ # type: (int) -> str
return self.inner.get_param_id(version)
def get_ptr_suffix_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
raise NoOldIdError() # TODO: was this implemented before?
return self.next.get_ptr_suffix_id(version) + \
@@ -3093,7 +3092,7 @@ class ASTDeclaratorParen(ASTBase):
self.next.get_ptr_suffix_id(version)
def get_type_id(self, version, returnTypeId):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
assert version >= 2
# ReturnType (inner)next, so 'inner' returns everything outside
nextId = self.next.get_type_id(version, returnTypeId)
@@ -3104,7 +3103,7 @@ class ASTDeclaratorParen(ASTBase):
return self.inner.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text('(')
self.inner.describe_signature(signode, mode, env, symbol)
@@ -3135,7 +3134,7 @@ class ASTDeclaratorNameParamQual(ASTBase):
return self.paramQual.function_params
def get_modifiers_id(self, version): # only the modifiers for a function, e.g.,
- # type: (int) -> unicode
+ # type: (int) -> str
# cv-qualifiers
if self.paramQual:
return self.paramQual.get_modifiers_id(version)
@@ -3143,18 +3142,18 @@ class ASTDeclaratorNameParamQual(ASTBase):
"This should only be called on a function: %s" % text_type(self))
def get_param_id(self, version): # only the parameters (if any)
- # type: (int) -> unicode
+ # type: (int) -> str
if self.paramQual:
return self.paramQual.get_param_id(version)
else:
return ''
def get_ptr_suffix_id(self, version): # only the array specifiers
- # type: (int) -> unicode
+ # type: (int) -> str
return u''.join(a.get_id(version) for a in self.arrayOps)
def get_type_id(self, version, returnTypeId):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
assert version >= 2
res = []
# TOOD: can we actually have both array ops and paramQual?
@@ -3180,7 +3179,7 @@ class ASTDeclaratorNameParamQual(ASTBase):
return self.paramQual is not None
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = []
if self.declId:
res.append(transform(self.declId))
@@ -3191,7 +3190,7 @@ class ASTDeclaratorNameParamQual(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
@@ -3206,11 +3205,11 @@ class ASTInitializer(ASTBase):
self.value = value
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return u' = ' + transform(self.value)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode.append(nodes.Text(' = '))
self.value.describe_signature(signode, 'markType', env, symbol)
@@ -3240,7 +3239,7 @@ class ASTType(ASTBase):
return self.decl.function_params
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if version == 1:
res = []
if objectType: # needs the name
@@ -3285,7 +3284,7 @@ class ASTType(ASTBase):
return u''.join(res)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = []
declSpecs = transform(self.declSpecs)
res.append(declSpecs)
@@ -3295,14 +3294,14 @@ class ASTType(ASTBase):
return u''.join(res)
def get_type_declaration_prefix(self):
- # type: () -> unicode
+ # type: () -> str
if self.declSpecs.trailingTypeSpec:
return 'typedef'
else:
return 'type'
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.declSpecs.describe_signature(signode, 'markType', env, symbol)
if (self.decl.require_space_after_declSpecs() and
@@ -3332,7 +3331,7 @@ class ASTTypeWithInit(ASTBase):
return self.type.isPack
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if objectType != 'member':
return self.type.get_id(version, objectType)
if version == 1:
@@ -3341,7 +3340,7 @@ class ASTTypeWithInit(ASTBase):
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.type))
if self.init:
@@ -3349,7 +3348,7 @@ class ASTTypeWithInit(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.type.describe_signature(signode, mode, env, symbol)
if self.init:
@@ -3363,13 +3362,13 @@ class ASTTypeUsing(ASTBase):
self.type = type
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.name))
if self.type:
@@ -3378,11 +3377,11 @@ class ASTTypeUsing(ASTBase):
return u''.join(res)
def get_type_declaration_prefix(self):
- # type: () -> unicode
+ # type: () -> str
return 'using'
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
if self.type:
@@ -3402,20 +3401,20 @@ class ASTConcept(ASTBase):
return self.nestedName
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = transform(self.nestedName)
if self.initializer:
res += transform(self.initializer)
return res
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
self.nestedName.describe_signature(signode, mode, env, symbol)
if self.initializer:
self.initializer.describe_signature(signode, mode, env, symbol)
@@ -3423,15 +3422,15 @@ class ASTConcept(ASTBase):
class ASTBaseClass(ASTBase):
def __init__(self, name, visibility, virtual, pack):
- # type: (Any, unicode, bool, bool) -> None
+ # type: (Any, str, bool, bool) -> None
self.name = name
self.visibility = visibility
self.virtual = virtual
self.pack = pack
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.visibility != 'private':
res.append(self.visibility)
res.append(' ')
@@ -3443,7 +3442,7 @@ class ASTBaseClass(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
if self.visibility != 'private':
signode += addnodes.desc_annotation(self.visibility,
@@ -3465,11 +3464,11 @@ class ASTClass(ASTBase):
self.bases = bases
def get_id(self, version, objectType, symbol):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.name))
if self.final:
@@ -3485,7 +3484,7 @@ class ASTClass(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
if self.final:
@@ -3505,37 +3504,37 @@ class ASTUnion(ASTBase):
self.name = name
def get_id(self, version, objectType, symbol):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
return transform(self.name)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
class ASTEnum(ASTBase):
def __init__(self, name, scoped, underlyingType):
- # type: (Any, unicode, Any) -> None
+ # type: (Any, str, Any) -> None
self.name = name
self.scoped = scoped
self.underlyingType = underlyingType
def get_id(self, version, objectType, symbol):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.scoped:
res.append(self.scoped)
res.append(' ')
@@ -3546,7 +3545,7 @@ class ASTEnum(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
# self.scoped has been done by the CPPEnumObject
self.name.describe_signature(signode, mode, env, symbol=symbol)
@@ -3563,13 +3562,13 @@ class ASTEnumerator(ASTBase):
self.init = init
def get_id(self, version, objectType, symbol):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.name))
if self.init:
@@ -3577,7 +3576,7 @@ class ASTEnumerator(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol)
if self.init:
@@ -3586,7 +3585,7 @@ class ASTEnumerator(ASTBase):
class ASTDeclaration(ASTBase):
def __init__(self, objectType, visibility, templatePrefix, declaration):
- # type: (unicode, unicode, Any, Any) -> None
+ # type: (str, str, Any, Any) -> None
self.objectType = objectType
self.visibility = visibility
self.templatePrefix = templatePrefix
@@ -3619,7 +3618,7 @@ class ASTDeclaration(ASTBase):
return self.declaration.function_params
def get_id(self, version, prefixed=True):
- # type: (int, bool) -> unicode
+ # type: (int, bool) -> str
if version == 1:
if self.templatePrefix:
raise NoOldIdError()
@@ -3639,12 +3638,12 @@ class ASTDeclaration(ASTBase):
return u''.join(res)
def get_newest_id(self):
- # type: () -> unicode
+ # type: () -> str
return self.get_id(_max_id, True)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.visibility and self.visibility != "public":
res.append(self.visibility)
res.append(u' ')
@@ -3654,7 +3653,7 @@ class ASTDeclaration(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, options):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Dict) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Dict) -> None
_verify_description_mode(mode)
assert self.symbol
# The caller of the domain added a desc_signature node.
@@ -3745,7 +3744,7 @@ class Symbol:
templateParams, # type: Any
templateArgs, # type: Any
declaration, # type: ASTDeclaration
- docname # type: unicode
+ docname # type: str
):
# type: (...) -> None
self.parent = parent
@@ -3770,7 +3769,7 @@ class Symbol:
self._add_template_and_function_params()
def _fill_empty(self, declaration, docname):
- # type: (ASTDeclaration, unicode) -> None
+ # type: (ASTDeclaration, str) -> None
self._assert_invariants()
assert not self.declaration
assert not self.docname
@@ -3822,7 +3821,7 @@ class Symbol:
self.parent = None
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
newChildren = []
for sChild in self._children:
sChild.clear_doc(docname)
@@ -3845,9 +3844,8 @@ class Symbol:
yield c
if not c.identOrOp.is_anon():
continue
- # TODO: change to 'yield from' when Python 2 support is dropped
- for nested in c.children_recurse_anon:
- yield nested
+
+ yield from c.children_recurse_anon
def get_lookup_key(self):
# type: () -> List[Tuple[ASTNestedNameElement, Any]]
@@ -3966,7 +3964,7 @@ class Symbol:
onMissingQualifiedSymbol,
# type: Callable[[Symbol, Union[ASTIdentifier, ASTOperator], Any, ASTTemplateArgs], Symbol] # NOQA
strictTemplateParamArgLists, # type: bool
- ancestorLookupType, # type: unicode
+ ancestorLookupType, # type: str
templateShorthand, # type: bool
matchSelf, # type: bool
recurseInAnon, # type: bool
@@ -4067,7 +4065,7 @@ class Symbol:
identOrOp, templateParams, templateArgs)
def _add_symbols(self, nestedName, templateDecls, declaration, docname):
- # type: (ASTNestedName, List[Any], ASTDeclaration, unicode) -> Symbol
+ # type: (ASTNestedName, List[Any], ASTDeclaration, str) -> Symbol
# Used for adding a whole path of symbols, where the last may or may not
# be an actual declaration.
@@ -4220,7 +4218,7 @@ class Symbol:
return symbol
def merge_with(self, other, docnames, env):
- # type: (Symbol, List[unicode], BuildEnvironment) -> None
+ # type: (Symbol, List[str], BuildEnvironment) -> None
assert other is not None
for otherChild in other._children:
ourChild = self._find_first_named_symbol(
@@ -4261,7 +4259,7 @@ class Symbol:
declaration=None, docname=None)
def add_declaration(self, declaration, docname):
- # type: (ASTDeclaration, unicode) -> Symbol
+ # type: (ASTDeclaration, str) -> Symbol
assert declaration
assert docname
nestedName = declaration.name
@@ -4299,7 +4297,7 @@ class Symbol:
def find_name(self, nestedName, templateDecls, typ, templateShorthand,
matchSelf, recurseInAnon):
- # type: (ASTNestedName, List[Any], unicode, bool, bool, bool) -> Symbol
+ # type: (ASTNestedName, List[Any], str, bool, bool, bool) -> Symbol
# templateShorthand: missing template parameter lists for templates is ok
def onMissingQualifiedSymbol(parentSymbol, identOrOp, templateParams, templateArgs):
@@ -4337,7 +4335,7 @@ class Symbol:
def find_declaration(self, declaration, typ, templateShorthand,
matchSelf, recurseInAnon):
- # type: (ASTDeclaration, unicode, bool, bool, bool) -> Symbol
+ # type: (ASTDeclaration, str, bool, bool, bool) -> Symbol
# templateShorthand: missing template parameter lists for templates is ok
nestedName = declaration.name
if declaration.templatePrefix:
@@ -4381,8 +4379,8 @@ class Symbol:
return None
def to_string(self, indent):
- # type: (int) -> unicode
- res = ['\t' * indent] # type: List[unicode]
+ # type: (int) -> str
+ res = ['\t' * indent]
if not self.parent:
res.append('::')
else:
@@ -4407,7 +4405,7 @@ class Symbol:
return ''.join(res)
def dump(self, indent):
- # type: (int) -> unicode
+ # type: (int) -> str
res = [self.to_string(indent)]
for c in self._children:
res.append(c.dump(indent + 1))
@@ -4439,7 +4437,7 @@ class DefinitionParser:
self.config = config
def _make_multi_error(self, errors, header):
- # type: (List[Any], unicode) -> DefinitionError
+ # type: (List[Any], str) -> DefinitionError
if len(errors) == 1:
if len(header) > 0:
return DefinitionError(header + '\n' + errors[0][0].description)
@@ -4462,13 +4460,13 @@ class DefinitionParser:
return DefinitionError(''.join(result))
def status(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
# for debugging
indicator = '-' * self.pos + '^'
print("%s\n%s\n%s" % (msg, self.definition, indicator))
def fail(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
errors = []
indicator = '-' * self.pos + '^'
exMain = DefinitionError(
@@ -4481,7 +4479,7 @@ class DefinitionParser:
raise self._make_multi_error(errors, '')
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
if self.warnEnv:
self.warnEnv.warn(msg)
else:
@@ -4502,7 +4500,7 @@ class DefinitionParser:
self.pos, self.last_match = self._previous_state
def skip_string(self, string):
- # type: (unicode) -> bool
+ # type: (str) -> bool
strlen = len(string)
if self.definition[self.pos:self.pos + strlen] == string:
self.pos += strlen
@@ -4510,7 +4508,7 @@ class DefinitionParser:
return False
def skip_word(self, word):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return self.match(re.compile(r'\b%s\b' % re.escape(word)))
def skip_ws(self):
@@ -4518,14 +4516,14 @@ class DefinitionParser:
return self.match(_whitespace_re)
def skip_word_and_ws(self, word):
- # type: (unicode) -> bool
+ # type: (str) -> bool
if self.skip_word(word):
self.skip_ws()
return True
return False
def skip_string_and_ws(self, string):
- # type: (unicode) -> bool
+ # type: (str) -> bool
if self.skip_string(string):
self.skip_ws()
return True
@@ -4538,7 +4536,7 @@ class DefinitionParser:
@property
def current_char(self):
- # type: () -> unicode
+ # type: () -> str
try:
return self.definition[self.pos]
except IndexError:
@@ -4546,14 +4544,14 @@ class DefinitionParser:
@property
def matched_text(self):
- # type: () -> unicode
+ # type: () -> str
if self.last_match is not None:
return self.last_match.group()
else:
return None
def read_rest(self):
- # type: () -> unicode
+ # type: () -> str
rv = self.definition[self.pos:]
self.pos = self.end
return rv
@@ -4584,11 +4582,11 @@ class DefinitionParser:
return self.definition[startPos:self.pos]
def _parse_balanced_token_seq(self, end):
- # type: (List[unicode]) -> unicode
+ # type: (List[str]) -> str
# TODO: add handling of string literals and similar
- brackets = {'(': ')', '[': ']', '{': '}'} # type: Dict[unicode, unicode]
+ brackets = {'(': ')', '[': ']', '{': '}'}
startPos = self.pos
- symbols = [] # type: List[unicode]
+ symbols = [] # type: List[str]
while not self.eof:
if len(symbols) == 0 and self.current_char in end:
break
@@ -4754,7 +4752,7 @@ class DefinitionParser:
return self._parse_nested_name()
def _parse_expression_list_or_braced_init_list(self):
- # type: () -> Tuple[List[Any], unicode]
+ # type: () -> Tuple[List[Any], str]
self.skip_ws()
if self.skip_string_and_ws('('):
close = ')'
@@ -5176,8 +5174,8 @@ class DefinitionParser:
value = self.matched_text
else:
# TODO: add handling of more bracket-like things, and quote handling
- brackets = {'(': ')', '[': ']', '<': '>'} # type: Dict[unicode, unicode]
- symbols = [] # type: List[unicode]
+ brackets = {'(': ')', '[': ']', '<': '>'}
+ symbols = [] # type: List[str]
while not self.eof:
if (len(symbols) == 0 and self.current_char in end):
break
@@ -5379,7 +5377,7 @@ class DefinitionParser:
return ASTTrailingTypeSpecName(prefix, nestedName)
def _parse_parameters_and_qualifiers(self, paramMode):
- # type: (unicode) -> ASTParametersQualifiers
+ # type: (str) -> ASTParametersQualifiers
if paramMode == 'new':
return None
self.skip_ws()
@@ -5471,7 +5469,7 @@ class DefinitionParser:
initializer)
def _parse_decl_specs_simple(self, outer, typed):
- # type: (unicode, bool) -> ASTDeclSpecsSimple
+ # type: (str, bool) -> ASTDeclSpecsSimple
"""Just parse the simple ones."""
storage = None
threadLocal = None
@@ -5546,7 +5544,7 @@ class DefinitionParser:
friend, attrs)
def _parse_decl_specs(self, outer, typed=True):
- # type: (unicode, bool) -> ASTDeclSpecs
+ # type: (str, bool) -> ASTDeclSpecs
if outer:
if outer not in ('type', 'member', 'function', 'templateParam'):
raise Exception('Internal error, unknown outer "%s".' % outer)
@@ -5574,7 +5572,7 @@ class DefinitionParser:
return ASTDeclSpecs(outer, leftSpecs, rightSpecs, trailing)
def _parse_declarator_name_param_qual(self, named, paramMode, typed):
- # type: (Union[bool, unicode], unicode, bool) -> ASTDeclaratorNameParamQual
+ # type: (Union[bool, str], str, bool) -> ASTDeclaratorNameParamQual
# now we should parse the name, and then suffixes
if named == 'maybe':
pos = self.pos
@@ -5621,7 +5619,7 @@ class DefinitionParser:
paramQual=paramQual)
def _parse_declarator(self, named, paramMode, typed=True):
- # type: (Union[bool, unicode], unicode, bool) -> Any
+ # type: (Union[bool, str], str, bool) -> Any
# 'typed' here means 'parse return type stuff'
if paramMode not in ('type', 'function', 'operatorCast', 'new'):
raise Exception(
@@ -5733,7 +5731,7 @@ class DefinitionParser:
raise self._make_multi_error(prevErrors, header)
def _parse_initializer(self, outer=None, allowFallback=True):
- # type: (unicode, bool) -> ASTInitializer
+ # type: (str, bool) -> ASTInitializer
self.skip_ws()
# TODO: support paren and brace initialization for memberObject
if not self.skip_string('='):
@@ -5760,7 +5758,7 @@ class DefinitionParser:
return ASTInitializer(value)
def _parse_type(self, named, outer=None):
- # type: (Union[bool, unicode], unicode) -> ASTType
+ # type: (Union[bool, str], str) -> ASTType
"""
named=False|'maybe'|True: 'maybe' is e.g., for function objects which
doesn't need to name the arguments
@@ -5843,7 +5841,7 @@ class DefinitionParser:
return ASTType(declSpecs, decl)
def _parse_type_with_init(self, named, outer):
- # type: (Union[bool, unicode], unicode) -> Any
+ # type: (Union[bool, str], str) -> Any
if outer:
assert outer in ('type', 'member', 'function', 'templateParam')
type = self._parse_type(outer=outer, named=named)
@@ -5912,7 +5910,7 @@ class DefinitionParser:
if self.skip_string(':'):
while 1:
self.skip_ws()
- visibility = 'private' # type: unicode
+ visibility = 'private'
virtual = False
pack = False
if self.skip_word_and_ws('virtual'):
@@ -5940,7 +5938,7 @@ class DefinitionParser:
def _parse_enum(self):
# type: () -> ASTEnum
- scoped = None # type: unicode # is set by CPPEnumObject
+ scoped = None # is set by CPPEnumObject
self.skip_ws()
name = self._parse_nested_name()
self.skip_ws()
@@ -6075,8 +6073,8 @@ class DefinitionParser:
return ASTTemplateIntroduction(concept, params)
def _parse_template_declaration_prefix(self, objectType):
- # type: (unicode) -> ASTTemplateDeclarationPrefix
- templates = [] # type: List
+ # type: (str) -> ASTTemplateDeclarationPrefix
+ templates = [] # type: List[str]
while 1:
self.skip_ws()
# the saved position is only used to provide a better error message
@@ -6131,7 +6129,7 @@ class DefinitionParser:
msg = "Too many template argument lists compared to parameter" \
" lists. Argument lists: %d, Parameter lists: %d," \
" Extra empty parameters lists prepended: %d." \
- % (numArgs, numParams, numExtra) # type: unicode
+ % (numArgs, numParams, numExtra)
msg += " Declaration:\n\t"
if templatePrefix:
msg += "%s\n\t" % text_type(templatePrefix)
@@ -6147,7 +6145,7 @@ class DefinitionParser:
return templatePrefix
def parse_declaration(self, objectType):
- # type: (unicode) -> ASTDeclaration
+ # type: (str) -> ASTDeclaration
if objectType not in ('type', 'concept', 'member',
'function', 'class', 'union', 'enum', 'enumerator'):
raise Exception('Internal error, unknown objectType "%s".' % objectType)
@@ -6290,7 +6288,7 @@ class CPPObject(ObjectDescription):
option_spec['tparam-line-spec'] = directives.flag
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
self.state_machine.reporter.warning(msg, line=self.lineno)
def _add_enumerator_to_parent(self, ast):
@@ -6336,7 +6334,7 @@ class CPPObject(ObjectDescription):
docname=self.env.docname)
def add_target_and_index(self, ast, sig, signode):
- # type: (Any, unicode, addnodes.desc_signature) -> None
+ # type: (Any, str, addnodes.desc_signature) -> None
# general note: name must be lstrip(':')'ed, to remove "::"
ids = []
for i in range(1, _max_id + 1):
@@ -6396,7 +6394,7 @@ class CPPObject(ObjectDescription):
self.state.document.note_explicit_target(signode)
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
raise NotImplementedError()
def parse_definition(self, parser):
@@ -6437,7 +6435,7 @@ class CPPObject(ObjectDescription):
return super(CPPObject, self).run()
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> Any
+ # type: (str, addnodes.desc_signature) -> Any
parentSymbol = self.env.temp_data['cpp:parent_symbol']
parser = DefinitionParser(sig, self, self.env.config)
@@ -6489,7 +6487,7 @@ class CPPObject(ObjectDescription):
class CPPTypeObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ type)') % name
def parse_definition(self, parser):
@@ -6499,7 +6497,7 @@ class CPPTypeObject(CPPObject):
class CPPConceptObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ concept)') % name
def parse_definition(self, parser):
@@ -6509,7 +6507,7 @@ class CPPConceptObject(CPPObject):
class CPPMemberObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ member)') % name
def parse_definition(self, parser):
@@ -6519,7 +6517,7 @@ class CPPMemberObject(CPPObject):
class CPPFunctionObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ function)') % name
def parse_definition(self, parser):
@@ -6529,7 +6527,7 @@ class CPPFunctionObject(CPPObject):
class CPPClassObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ class)') % name
def parse_definition(self, parser):
@@ -6539,7 +6537,7 @@ class CPPClassObject(CPPObject):
class CPPUnionObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ union)') % name
def parse_definition(self, parser):
@@ -6549,7 +6547,7 @@ class CPPUnionObject(CPPObject):
class CPPEnumObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ enum)') % name
def parse_definition(self, parser):
@@ -6569,7 +6567,7 @@ class CPPEnumObject(CPPObject):
class CPPEnumeratorObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ enumerator)') % name
def parse_definition(self, parser):
@@ -6590,7 +6588,7 @@ class CPPNamespaceObject(SphinxDirective):
option_spec = {} # type: Dict
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
self.state_machine.reporter.warning(msg, line=self.lineno)
def run(self):
@@ -6624,7 +6622,7 @@ class CPPNamespacePushObject(SphinxDirective):
option_spec = {} # type: Dict
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
self.state_machine.reporter.warning(msg, line=self.lineno)
def run(self):
@@ -6659,7 +6657,7 @@ class CPPNamespacePopObject(SphinxDirective):
option_spec = {} # type: Dict
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
self.state_machine.reporter.warning(msg, line=self.lineno)
def run(self):
@@ -6682,7 +6680,7 @@ class CPPNamespacePopObject(SphinxDirective):
class CPPXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.Element, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
refnode.attributes.update(env.ref_context)
if not has_explicit_title:
@@ -6798,7 +6796,7 @@ class CPPDomain(Domain):
}
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
rootSymbol = self.data['root_symbol']
rootSymbol.clear_doc(docname)
for name, nDocname in list(self.data['names'].items()):
@@ -6806,7 +6804,7 @@ class CPPDomain(Domain):
del self.data['names'][name]
def process_doc(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.document) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
# just for debugging
# print("process_doc:", docname)
# print(self.data['root_symbol'].dump(0))
@@ -6817,7 +6815,7 @@ class CPPDomain(Domain):
pnode.attributes.update(self.env.ref_context)
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
# print("merge_domaindata:")
# print("self")
# print(self.data['root_symbol'].dump(0))
@@ -6839,7 +6837,7 @@ class CPPDomain(Domain):
def _resolve_xref_inner(self, env, fromdocname, builder, typ,
target, node, contnode, emitWarnings=True):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element, bool) -> Tuple[nodes.Element, unicode] # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element, bool) -> Tuple[nodes.Element, str] # NOQA
class Warner:
def warn(self, msg):
if emitWarnings:
@@ -6977,12 +6975,12 @@ class CPPDomain(Domain):
), declaration.objectType
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
return self._resolve_xref_inner(env, fromdocname, builder, typ,
target, node, contnode)[0]
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
retnode, objtype = self._resolve_xref_inner(env, fromdocname, builder,
'any', target, node, contnode,
emitWarnings=False)
@@ -6994,7 +6992,7 @@ class CPPDomain(Domain):
return []
def get_objects(self):
- # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
+ # type: () -> Iterator[Tuple[str, str, str, str, str, int]]
rootSymbol = self.data['root_symbol']
for symbol in rootSymbol.get_all_symbols():
if symbol.declaration is None:
@@ -7009,7 +7007,7 @@ class CPPDomain(Domain):
yield (name, dispname, objectType, docname, newestId, 1)
def get_full_qualified_name(self, node):
- # type: (nodes.Element) -> unicode
+ # type: (nodes.Element) -> str
target = node.get('reftarget', None)
if target is None:
return None
@@ -7024,7 +7022,7 @@ class CPPDomain(Domain):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(CPPDomain)
app.add_config_value("cpp_index_common_prefix", [], 'env')
app.add_config_value("cpp_id_attributes", [], 'env')
diff --git a/sphinx/domains/javascript.py b/sphinx/domains/javascript.py
index ca9930ac4..68bd045ef 100644
--- a/sphinx/domains/javascript.py
+++ b/sphinx/domains/javascript.py
@@ -29,7 +29,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
class JSObject(ObjectDescription):
@@ -41,14 +40,14 @@ class JSObject(ObjectDescription):
has_arguments = False
#: what is displayed right before the documentation entry
- display_prefix = None # type: unicode
+ display_prefix = None # type: str
#: If ``allow_nesting`` is ``True``, the object prefixes will be accumulated
#: based on directive nesting
allow_nesting = False
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode]
+ # type: (str, addnodes.desc_signature) -> Tuple[str, str]
"""Breaks down construct signatures
Parses out prefix and argument list from construct definition. The
@@ -102,7 +101,7 @@ class JSObject(ObjectDescription):
return fullname, prefix
def add_target_and_index(self, name_obj, sig, signode):
- # type: (Tuple[unicode, unicode], unicode, addnodes.desc_signature) -> None
+ # type: (Tuple[str, str], str, addnodes.desc_signature) -> None
mod_name = self.env.ref_context.get('js:module')
fullname = (mod_name and mod_name + '.' or '') + name_obj[0]
if fullname not in self.state.document.ids:
@@ -126,7 +125,7 @@ class JSObject(ObjectDescription):
'', None))
def get_index_text(self, objectname, name_obj):
- # type: (unicode, Tuple[unicode, unicode]) -> unicode
+ # type: (str, Tuple[str, str]) -> str
name, obj = name_obj
if self.objtype == 'function':
if not obj:
@@ -273,7 +272,7 @@ class JSModule(SphinxDirective):
class JSXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.Element, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
# basically what sphinx.domains.python.PyXRefRole does
refnode['js:object'] = env.ref_context.get('js:object')
refnode['js:module'] = env.ref_context.get('js:module')
@@ -323,10 +322,10 @@ class JavaScriptDomain(Domain):
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # mod_name -> docname
- } # type: Dict[unicode, Dict[unicode, Tuple[unicode, unicode]]]
+ } # type: Dict[str, Dict[str, Tuple[str, str]]]
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for fullname, (pkg_docname, _l) in list(self.data['objects'].items()):
if pkg_docname == docname:
del self.data['objects'][fullname]
@@ -335,7 +334,7 @@ class JavaScriptDomain(Domain):
del self.data['modules'][mod_name]
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
# XXX check duplicates
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
@@ -345,7 +344,7 @@ class JavaScriptDomain(Domain):
self.data['modules'][mod_name] = pkg_docname
def find_obj(self, env, mod_name, prefix, name, typ, searchorder=0):
- # type: (BuildEnvironment, unicode, unicode, unicode, unicode, int) -> Tuple[unicode, Tuple[unicode, unicode]] # NOQA
+ # type: (BuildEnvironment, str, str, str, str, int) -> Tuple[str, Tuple[str, str]]
if name[-2:] == '()':
name = name[:-2]
objects = self.data['objects']
@@ -371,7 +370,7 @@ class JavaScriptDomain(Domain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
mod_name = node.get('js:module')
prefix = node.get('js:object')
searchorder = node.hasattr('refspecific') and 1 or 0
@@ -383,7 +382,7 @@ class JavaScriptDomain(Domain):
def resolve_any_xref(self, env, fromdocname, builder, target, node,
contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
mod_name = node.get('js:module')
prefix = node.get('js:object')
name, obj = self.find_obj(env, mod_name, prefix, target, None, 1)
@@ -394,13 +393,13 @@ class JavaScriptDomain(Domain):
name.replace('$', '_S_'), contnode, name))]
def get_objects(self):
- # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
+ # type: () -> Iterator[Tuple[str, str, str, str, str, int]]
for refname, (docname, type) in list(self.data['objects'].items()):
yield refname, refname, type, docname, \
refname.replace('$', '_S_'), 1
def get_full_qualified_name(self, node):
- # type: (nodes.Element) -> unicode
+ # type: (nodes.Element) -> str
modname = node.get('js:module')
prefix = node.get('js:object')
target = node.get('reftarget')
@@ -411,7 +410,7 @@ class JavaScriptDomain(Domain):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(JavaScriptDomain)
return {
diff --git a/sphinx/domains/math.py b/sphinx/domains/math.py
index 89fc8c357..b1d23ec74 100644
--- a/sphinx/domains/math.py
+++ b/sphinx/domains/math.py
@@ -26,7 +26,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -46,7 +45,7 @@ class MathDomain(Domain):
initial_data = {
'objects': {}, # labelid -> (docname, eqno)
'has_equations': {}, # docname -> bool
- } # type: Dict[unicode, Dict[unicode, Tuple[unicode, int]]]
+ } # type: Dict[str, Dict[str, Tuple[str, int]]]
dangling_warnings = {
'eq': 'equation not found: %(target)s',
}
@@ -59,7 +58,7 @@ class MathDomain(Domain):
}
def process_doc(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.document) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
def math_node(node):
# type: (nodes.Node) -> bool
return isinstance(node, (nodes.math, nodes.math_block))
@@ -67,7 +66,7 @@ class MathDomain(Domain):
self.data['has_equations'][docname] = any(document.traverse(math_node))
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for equation_id, (doc, eqno) in list(self.data['objects'].items()):
if doc == docname:
del self.data['objects'][equation_id]
@@ -75,7 +74,7 @@ class MathDomain(Domain):
self.data['has_equations'].pop(docname, None)
def merge_domaindata(self, docnames, otherdata):
- # type: (Iterable[unicode], Dict) -> None
+ # type: (Iterable[str], Dict) -> None
for labelid, (doc, eqno) in otherdata['objects'].items():
if doc in docnames:
self.data['objects'][labelid] = (doc, eqno)
@@ -84,7 +83,7 @@ class MathDomain(Domain):
self.data['has_equations'][docname] = otherdata['has_equations'][docname]
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
assert typ in ('eq', 'numref')
docname, number = self.data['objects'].get(target, (None, None))
if docname:
@@ -109,7 +108,7 @@ class MathDomain(Domain):
return None
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
refnode = self.resolve_xref(env, fromdocname, builder, 'eq', target, node, contnode)
if refnode is None:
return []
@@ -121,7 +120,7 @@ class MathDomain(Domain):
return []
def add_equation(self, env, docname, labelid):
- # type: (BuildEnvironment, unicode, unicode) -> int
+ # type: (BuildEnvironment, str, str) -> int
equations = self.data['objects']
if labelid in equations:
path = env.doc2path(equations[labelid][0])
@@ -133,7 +132,7 @@ class MathDomain(Domain):
return eqno
def get_next_equation_number(self, docname):
- # type: (unicode) -> int
+ # type: (str) -> int
targets = [eq for eq in self.data['objects'].values() if eq[0] == docname]
return len(targets) + 1
@@ -143,7 +142,7 @@ class MathDomain(Domain):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(MathDomain)
app.add_role('eq', MathReferenceRole(warn_dangling=True))
diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py
index 8c5c6f999..c0df50979 100644
--- a/sphinx/domains/python.py
+++ b/sphinx/domains/python.py
@@ -31,7 +31,7 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import TextlikeNode, unicode # NOQA
+ from sphinx.util.typing import TextlikeNode # NOQA
logger = logging.getLogger(__name__)
@@ -54,7 +54,7 @@ pairindextypes = {
'exception': _('exception'),
'statement': _('statement'),
'builtin': _('built-in function'),
-} # Dict[unicode, unicode]
+}
locale.pairindextypes = DeprecatedDict(
pairindextypes,
@@ -65,7 +65,7 @@ locale.pairindextypes = DeprecatedDict(
def _pseudo_parse_arglist(signode, arglist):
- # type: (addnodes.desc_signature, unicode) -> None
+ # type: (addnodes.desc_signature, str) -> None
""""Parse" a list of arguments separated by commas.
Arguments can have "optional" annotations given by enclosing them in
@@ -117,9 +117,9 @@ def _pseudo_parse_arglist(signode, arglist):
# when it comes to handling "." and "~" prefixes.
class PyXrefMixin:
def make_xref(self,
- rolename, # type: unicode
- domain, # type: unicode
- target, # type: unicode
+ rolename, # type: str
+ domain, # type: str
+ target, # type: str
innernode=nodes.emphasis, # type: Type[TextlikeNode]
contnode=None, # type: nodes.Node
env=None, # type: BuildEnvironment
@@ -140,9 +140,9 @@ class PyXrefMixin:
return result
def make_xrefs(self,
- rolename, # type: unicode
- domain, # type: unicode
- target, # type: unicode
+ rolename, # type: str
+ domain, # type: str
+ target, # type: str
innernode=nodes.emphasis, # type: Type[TextlikeNode]
contnode=None, # type: nodes.Node
env=None, # type: BuildEnvironment
@@ -171,7 +171,7 @@ class PyXrefMixin:
class PyField(PyXrefMixin, Field):
def make_xref(self, rolename, domain, target,
innernode=nodes.emphasis, contnode=None, env=None):
- # type: (unicode, unicode, unicode, Type[TextlikeNode], nodes.Node, BuildEnvironment) -> nodes.Node # NOQA
+ # type: (str, str, str, Type[TextlikeNode], nodes.Node, BuildEnvironment) -> nodes.Node # NOQA
if rolename == 'class' and target == 'None':
# None is not a type, so use obj role instead.
rolename = 'obj'
@@ -187,7 +187,7 @@ class PyGroupedField(PyXrefMixin, GroupedField):
class PyTypedField(PyXrefMixin, TypedField):
def make_xref(self, rolename, domain, target,
innernode=nodes.emphasis, contnode=None, env=None):
- # type: (unicode, unicode, unicode, Type[TextlikeNode], nodes.Node, BuildEnvironment) -> nodes.Node # NOQA
+ # type: (str, str, str, Type[TextlikeNode], nodes.Node, BuildEnvironment) -> nodes.Node # NOQA
if rolename == 'class' and target == 'None':
# None is not a type, so use obj role instead.
rolename = 'obj'
@@ -231,7 +231,7 @@ class PyObject(ObjectDescription):
allow_nesting = False
def get_signature_prefix(self, sig):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""May return a prefix to put before the object name in the
signature.
"""
@@ -245,7 +245,7 @@ class PyObject(ObjectDescription):
return False
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode]
+ # type: (str, addnodes.desc_signature) -> Tuple[str, str]
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
@@ -325,12 +325,12 @@ class PyObject(ObjectDescription):
return fullname, name_prefix
def get_index_text(self, modname, name):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls, sig, signode):
- # type: (unicode, unicode, addnodes.desc_signature) -> None
+ # type: (str, str, addnodes.desc_signature) -> None
modname = self.options.get(
'module', self.env.ref_context.get('py:module'))
fullname = (modname and modname + '.' or '') + name_cls[0]
@@ -426,7 +426,7 @@ class PyModulelevel(PyObject):
return self.objtype == 'function'
def get_index_text(self, modname, name_cls):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if self.objtype == 'function':
if not modname:
return _('%s() (built-in function)') % name_cls[0]
@@ -447,11 +447,11 @@ class PyClasslike(PyObject):
allow_nesting = True
def get_signature_prefix(self, sig):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.objtype + ' '
def get_index_text(self, modname, name_cls):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if self.objtype == 'class':
if not modname:
return _('%s (built-in class)') % name_cls[0]
@@ -472,7 +472,7 @@ class PyClassmember(PyObject):
return self.objtype.endswith('method')
def get_signature_prefix(self, sig):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if self.objtype == 'staticmethod':
return 'static '
elif self.objtype == 'classmethod':
@@ -480,7 +480,7 @@ class PyClassmember(PyObject):
return ''
def get_index_text(self, modname, name_cls):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
name, cls = name_cls
add_modules = self.env.config.add_module_names
if self.objtype == 'method':
@@ -542,7 +542,7 @@ class PyDecoratorMixin:
Mixin for decorator directives.
"""
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode]
+ # type: (str, addnodes.desc_signature) -> Tuple[str, str]
ret = super(PyDecoratorMixin, self).handle_signature(sig, signode) # type: ignore
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
@@ -640,7 +640,7 @@ class PyCurrentModule(SphinxDirective):
class PyXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.Element, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
refnode['py:module'] = env.ref_context.get('py:module')
refnode['py:class'] = env.ref_context.get('py:class')
if not has_explicit_title:
@@ -671,10 +671,10 @@ class PythonModuleIndex(Index):
shortname = _('modules')
def generate(self, docnames=None):
- # type: (Iterable[unicode]) -> Tuple[List[Tuple[unicode, List[IndexEntry]]], bool]
- content = {} # type: Dict[unicode, List[IndexEntry]]
+ # type: (Iterable[str]) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]
+ content = {} # type: Dict[str, List[IndexEntry]]
# list of prefixes to ignore
- ignores = None # type: List[unicode]
+ ignores = None # type: List[str]
ignores = self.domain.env.config['modindex_common_prefix'] # type: ignore
ignores = sorted(ignores, key=len, reverse=True)
# list of all modules, sorted by module name
@@ -750,7 +750,7 @@ class PythonDomain(Domain):
'staticmethod': ObjType(_('static method'), 'meth', 'obj'),
'attribute': ObjType(_('attribute'), 'attr', 'obj'),
'module': ObjType(_('module'), 'mod', 'obj'),
- } # type: Dict[unicode, ObjType]
+ } # type: Dict[str, ObjType]
directives = {
'function': PyModulelevel,
@@ -780,13 +780,13 @@ class PythonDomain(Domain):
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
- } # type: Dict[unicode, Dict[unicode, Tuple[Any]]]
+ } # type: Dict[str, Dict[str, Tuple[Any]]]
indices = [
PythonModuleIndex,
]
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
@@ -795,7 +795,7 @@ class PythonDomain(Domain):
del self.data['modules'][modname]
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
# XXX check duplicates?
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
@@ -805,7 +805,7 @@ class PythonDomain(Domain):
self.data['modules'][modname] = data
def find_obj(self, env, modname, classname, name, type, searchmode=0):
- # type: (BuildEnvironment, unicode, unicode, unicode, unicode, int) -> List[Tuple[unicode, Any]] # NOQA
+ # type: (BuildEnvironment, str, str, str, str, int) -> List[Tuple[str, Any]]
"""Find a Python object for "name", perhaps using the given module
and/or classname. Returns a list of (name, object entry) tuples.
"""
@@ -817,7 +817,7 @@ class PythonDomain(Domain):
return []
objects = self.data['objects']
- matches = [] # type: List[Tuple[unicode, Any]]
+ matches = [] # type: List[Tuple[str, Any]]
newname = None
if searchmode == 1:
@@ -870,7 +870,7 @@ class PythonDomain(Domain):
def resolve_xref(self, env, fromdocname, builder,
type, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
modname = node.get('py:module')
clsname = node.get('py:class')
searchmode = node.hasattr('refspecific') and 1 or 0
@@ -891,10 +891,10 @@ class PythonDomain(Domain):
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
modname = node.get('py:module')
clsname = node.get('py:class')
- results = [] # type: List[Tuple[unicode, nodes.Element]]
+ results = [] # type: List[Tuple[str, nodes.Element]]
# always search in "refspecific" mode with the :any: role
matches = self.find_obj(env, modname, clsname, target, None, 1)
@@ -910,7 +910,7 @@ class PythonDomain(Domain):
return results
def _make_module_refnode(self, builder, fromdocname, name, contnode):
- # type: (Builder, unicode, unicode, nodes.Node) -> nodes.Element
+ # type: (Builder, str, str, nodes.Node) -> nodes.Element
# get additional info for modules
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
@@ -924,7 +924,7 @@ class PythonDomain(Domain):
'module-' + name, contnode, title)
def get_objects(self):
- # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
+ # type: () -> Iterator[Tuple[str, str, str, str, str, int]]
for modname, info in self.data['modules'].items():
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
for refname, (docname, type) in self.data['objects'].items():
@@ -932,7 +932,7 @@ class PythonDomain(Domain):
yield (refname, refname, type, docname, refname, 1)
def get_full_qualified_name(self, node):
- # type: (nodes.Element) -> unicode
+ # type: (nodes.Element) -> str
modname = node.get('py:module')
clsname = node.get('py:class')
target = node.get('reftarget')
@@ -943,7 +943,7 @@ class PythonDomain(Domain):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(PythonDomain)
return {
diff --git a/sphinx/domains/rst.py b/sphinx/domains/rst.py
index 2f8156238..c7680dc1a 100644
--- a/sphinx/domains/rst.py
+++ b/sphinx/domains/rst.py
@@ -25,7 +25,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
dir_sig_re = re.compile(r'\.\. (.+?)::(.*)$')
@@ -37,7 +36,7 @@ class ReSTMarkup(ObjectDescription):
"""
def add_target_and_index(self, name, sig, signode):
- # type: (unicode, unicode, addnodes.desc_signature) -> None
+ # type: (str, str, addnodes.desc_signature) -> None
targetname = self.objtype + '-' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
@@ -59,7 +58,7 @@ class ReSTMarkup(ObjectDescription):
targetname, '', None))
def get_index_text(self, objectname, name):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if self.objtype == 'directive':
return _('%s (directive)') % name
elif self.objtype == 'role':
@@ -68,7 +67,7 @@ class ReSTMarkup(ObjectDescription):
def parse_directive(d):
- # type: (unicode) -> Tuple[unicode, unicode]
+ # type: (str) -> Tuple[str, str]
"""Parse a directive signature.
Returns (directive, arguments) string tuple. If no arguments are given,
@@ -90,7 +89,7 @@ class ReSTDirective(ReSTMarkup):
Description of a reST directive.
"""
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> unicode
+ # type: (str, addnodes.desc_signature) -> str
name, args = parse_directive(sig)
desc_name = '.. %s::' % name
signode += addnodes.desc_name(desc_name, desc_name)
@@ -104,7 +103,7 @@ class ReSTRole(ReSTMarkup):
Description of a reST role.
"""
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> unicode
+ # type: (str, addnodes.desc_signature) -> str
signode += addnodes.desc_name(':%s:' % sig, ':%s:' % sig)
return sig
@@ -128,23 +127,23 @@ class ReSTDomain(Domain):
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
- } # type: Dict[unicode, Dict[unicode, Tuple[unicode, ObjType]]]
+ } # type: Dict[str, Dict[str, Tuple[str, ObjType]]]
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for (typ, name), doc in list(self.data['objects'].items()):
if doc == docname:
del self.data['objects'][typ, name]
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
# XXX check duplicates
for (typ, name), doc in otherdata['objects'].items():
if doc in docnames:
self.data['objects'][typ, name] = doc
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
objects = self.data['objects']
objtypes = self.objtypes_for_role(typ)
for objtype in objtypes:
@@ -156,9 +155,9 @@ class ReSTDomain(Domain):
return None
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
objects = self.data['objects']
- results = [] # type: List[Tuple[unicode, nodes.Element]]
+ results = [] # type: List[Tuple[str, nodes.Element]]
for objtype in self.object_types:
if (objtype, target) in self.data['objects']:
results.append(('rst:' + self.role_for_objtype(objtype),
@@ -169,13 +168,13 @@ class ReSTDomain(Domain):
return results
def get_objects(self):
- # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
+ # type: () -> Iterator[Tuple[str, str, str, str, str, int]]
for (typ, name), docname in self.data['objects'].items():
yield name, name, typ, docname, typ + '-' + name, 1
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(ReSTDomain)
return {
diff --git a/sphinx/domains/std.py b/sphinx/domains/std.py
index 917b5cd76..12d0dfa1b 100644
--- a/sphinx/domains/std.py
+++ b/sphinx/domains/std.py
@@ -36,7 +36,7 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import RoleFunction, unicode # NOQA
+ from sphinx.util.typing import RoleFunction # NOQA
logger = logging.getLogger(__name__)
@@ -51,11 +51,11 @@ class GenericObject(ObjectDescription):
"""
A generic x-ref directive registered with Sphinx.add_object_type().
"""
- indextemplate = '' # type: unicode
- parse_node = None # type: Callable[[GenericObject, BuildEnvironment, unicode, addnodes.desc_signature], unicode] # NOQA
+ indextemplate = ''
+ parse_node = None # type: Callable[[GenericObject, BuildEnvironment, str, addnodes.desc_signature], str] # NOQA
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> unicode
+ # type: (str, addnodes.desc_signature) -> str
if self.parse_node:
name = self.parse_node(self.env, sig, signode)
else:
@@ -66,7 +66,7 @@ class GenericObject(ObjectDescription):
return name
def add_target_and_index(self, name, sig, signode):
- # type: (unicode, unicode, addnodes.desc_signature) -> None
+ # type: (str, str, addnodes.desc_signature) -> None
targetname = '%s-%s' % (self.objtype, name)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
@@ -156,10 +156,10 @@ class Cmdoption(ObjectDescription):
"""
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> unicode
+ # type: (str, addnodes.desc_signature) -> str
"""Transform an option description into RST nodes."""
count = 0
- firstname = '' # type: unicode
+ firstname = ''
for potential_option in sig.split(', '):
potential_option = potential_option.strip()
m = option_desc_re.match(potential_option)
@@ -185,7 +185,7 @@ class Cmdoption(ObjectDescription):
return firstname
def add_target_and_index(self, firstname, sig, signode):
- # type: (unicode, unicode, addnodes.desc_signature) -> None
+ # type: (str, str, addnodes.desc_signature) -> None
currprogram = self.env.ref_context.get('std:program')
for optname in signode.get('allnames', []):
targetname = optname.replace('/', '-')
@@ -233,20 +233,20 @@ class Program(SphinxDirective):
class OptionXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.Element, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
refnode['std:program'] = env.ref_context.get('std:program')
return title, target
def split_term_classifiers(line):
- # type: (unicode) -> List[Union[unicode, None]]
+ # type: (str) -> List[Union[str, None]]
# split line into a term and classifiers. if no classifier, None is used..
parts = re.split(' +: +', line) + [None]
return parts
def make_glossary_term(env, textnodes, index_key, source, lineno, new_id=None):
- # type: (BuildEnvironment, Iterable[nodes.Node], unicode, unicode, int, unicode) -> nodes.term # NOQA
+ # type: (BuildEnvironment, Iterable[nodes.Node], str, str, int, str) -> nodes.term
# get a text-only representation of the term and register it
# as a cross-reference target
term = nodes.term('', '', *textnodes)
@@ -300,7 +300,7 @@ class Glossary(SphinxDirective):
# be* a definition list.
# first, collect single entries
- entries = [] # type: List[Tuple[List[Tuple[unicode, unicode, int]], StringList]]
+ entries = [] # type: List[Tuple[List[Tuple[str, str, int]], StringList]]
in_definition = True
was_empty = True
messages = [] # type: List[nodes.Node]
@@ -352,7 +352,7 @@ class Glossary(SphinxDirective):
# now, parse all the entries into a big definition list
items = []
for terms, definition in entries:
- termtexts = [] # type: List[unicode]
+ termtexts = [] # type: List[str]
termnodes = [] # type: List[nodes.Node]
system_messages = [] # type: List[nodes.Node]
for line, source, lineno in terms:
@@ -390,7 +390,7 @@ class Glossary(SphinxDirective):
def token_xrefs(text):
- # type: (unicode) -> List[nodes.Node]
+ # type: (str) -> List[nodes.Node]
retnodes = [] # type: List[nodes.Node]
pos = 0
for m in token_re.finditer(text):
@@ -463,7 +463,7 @@ class StandardDomain(Domain):
'envvar': ObjType(_('environment variable'), 'envvar'),
'cmdoption': ObjType(_('program option'), 'option'),
'doc': ObjType(_('document'), 'doc', searchprio=-1)
- } # type: Dict[unicode, ObjType]
+ } # type: Dict[str, ObjType]
directives = {
'program': Program,
@@ -472,7 +472,7 @@ class StandardDomain(Domain):
'envvar': EnvVar,
'glossary': Glossary,
'productionlist': ProductionList,
- } # type: Dict[unicode, Type[Directive]]
+ } # type: Dict[str, Type[Directive]]
roles = {
'option': OptionXRefRole(warn_dangling=True),
'envvar': EnvVarXRefRole(),
@@ -491,7 +491,7 @@ class StandardDomain(Domain):
'keyword': XRefRole(warn_dangling=True),
# links to documents
'doc': XRefRole(warn_dangling=True, innernodeclass=nodes.inline),
- } # type: Dict[unicode, Union[RoleFunction, XRefRole]]
+ } # type: Dict[str, Union[RoleFunction, XRefRole]]
initial_data = {
'progoptions': {}, # (program, name) -> docname, labelid
@@ -525,7 +525,7 @@ class StandardDomain(Domain):
nodes.figure: ('figure', None),
nodes.table: ('table', None),
nodes.container: ('code-block', None),
- } # type: Dict[Type[nodes.Node], Tuple[unicode, Callable]]
+ } # type: Dict[Type[nodes.Node], Tuple[str, Callable]]
def __init__(self, env):
# type: (BuildEnvironment) -> None
@@ -537,7 +537,7 @@ class StandardDomain(Domain):
self.enumerable_nodes[node] = settings
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for key, (fn, _l) in list(self.data['progoptions'].items()):
if fn == docname:
del self.data['progoptions'][key]
@@ -560,7 +560,7 @@ class StandardDomain(Domain):
del self.data['anonlabels'][key]
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
# XXX duplicates?
for key, data in otherdata['progoptions'].items():
if data[0] in docnames:
@@ -584,13 +584,13 @@ class StandardDomain(Domain):
self.data['anonlabels'][key] = data
def process_doc(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.document) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
self.note_citations(env, docname, document)
self.note_citation_refs(env, docname, document)
self.note_labels(env, docname, document)
def note_citations(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.document) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
for node in document.traverse(nodes.citation):
node['docname'] = docname
label = cast(nodes.label, node[0]).astext()
@@ -601,7 +601,7 @@ class StandardDomain(Domain):
self.data['citations'][label] = (docname, node['ids'][0], node.line)
def note_citation_refs(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.document) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
for node in document.traverse(addnodes.pending_xref):
if node['refdomain'] == 'std' and node['reftype'] == 'citation':
label = node['reftarget']
@@ -609,7 +609,7 @@ class StandardDomain(Domain):
citation_refs.append(docname)
def note_labels(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.document) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
labels, anonlabels = self.data['labels'], self.data['anonlabels']
for name, explicit in document.nametypes.items():
if not explicit:
@@ -652,11 +652,11 @@ class StandardDomain(Domain):
labels[name] = docname, labelid, sectname
def add_object(self, objtype, name, docname, labelid):
- # type: (unicode, unicode, unicode, unicode) -> None
+ # type: (str, str, str, str) -> None
self.data['objects'][objtype, name] = (docname, labelid)
def add_program_option(self, program, name, docname, labelid):
- # type: (unicode, unicode, unicode, unicode) -> None
+ # type: (str, str, str, str) -> None
self.data['progoptions'][program, name] = (docname, labelid)
def check_consistency(self):
@@ -669,7 +669,7 @@ class StandardDomain(Domain):
def build_reference_node(self, fromdocname, builder, docname, labelid,
sectname, rolename, **options):
- # type: (unicode, Builder, unicode, unicode, unicode, unicode, Any) -> nodes.Element
+ # type: (str, Builder, str, str, str, str, Any) -> nodes.Element
nodeclass = options.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **options)
innernode = nodes.inline(sectname, sectname)
@@ -693,7 +693,7 @@ class StandardDomain(Domain):
return newnode
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
if typ == 'ref':
resolver = self._resolve_ref_xref
elif typ == 'numref':
@@ -712,7 +712,7 @@ class StandardDomain(Domain):
return resolver(env, fromdocname, builder, typ, target, node, contnode)
def _resolve_ref_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
if node['refexplicit']:
# reference to anonymous label; the reference uses
# the supplied link caption
@@ -730,7 +730,7 @@ class StandardDomain(Domain):
docname, labelid, sectname, 'ref')
def _resolve_numref_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
if target in self.data['labels']:
docname, labelid, figname = self.data['labels'].get(target, ('', '', ''))
else:
@@ -791,7 +791,7 @@ class StandardDomain(Domain):
title=title)
def _resolve_keyword_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# keywords are oddballs: they are referenced by named labels
docname, labelid, _ = self.data['labels'].get(target, ('', '', ''))
if not docname:
@@ -800,7 +800,7 @@ class StandardDomain(Domain):
labelid, contnode)
def _resolve_doc_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# directly reference to document by source name; can be absolute or relative
refdoc = node.get('refdoc', fromdocname)
docname = docname_join(refdoc, node['reftarget'])
@@ -816,7 +816,7 @@ class StandardDomain(Domain):
return make_refnode(builder, fromdocname, docname, None, innernode)
def _resolve_option_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
progname = node.get('std:program')
target = target.strip()
docname, labelid = self.data['progoptions'].get((progname, target), ('', ''))
@@ -838,7 +838,7 @@ class StandardDomain(Domain):
labelid, contnode)
def _resolve_citation_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
from sphinx.environment import NoUri
docname, labelid, lineno = self.data['citations'].get(target, ('', '', 0))
@@ -861,7 +861,7 @@ class StandardDomain(Domain):
raise
def _resolve_obj_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
objtypes = self.objtypes_for_role(typ) or []
for objtype in objtypes:
if (objtype, target) in self.data['objects']:
@@ -875,8 +875,8 @@ class StandardDomain(Domain):
labelid, contnode)
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
- results = [] # type: List[Tuple[unicode, nodes.Element]]
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
+ results = [] # type: List[Tuple[str, nodes.Element]]
ltarget = target.lower() # :ref: lowercases its target automatically
for role in ('ref', 'option'): # do not try "keyword"
res = self.resolve_xref(env, fromdocname, builder, role,
@@ -897,7 +897,7 @@ class StandardDomain(Domain):
return results
def get_objects(self):
- # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
+ # type: () -> Iterator[Tuple[str, str, str, str, str, int]]
# handle the special 'doc' reference here
for doc in self.env.all_docs:
yield (doc, clean_astext(self.env.titles[doc]), 'doc', doc, '', -1)
@@ -919,7 +919,7 @@ class StandardDomain(Domain):
yield (name, name, 'label', info[0], info[1], -1)
def get_type_name(self, type, primary=False):
- # type: (ObjType, bool) -> unicode
+ # type: (ObjType, bool) -> str
# never prepend "Default"
return type.lname
@@ -928,7 +928,7 @@ class StandardDomain(Domain):
return node.__class__ in self.enumerable_nodes
def get_numfig_title(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Node) -> str
"""Get the title of enumerable nodes to refer them using its title"""
if self.is_enumerable_node(node):
_, title_getter = self.enumerable_nodes.get(node.__class__, (None, None))
@@ -942,7 +942,7 @@ class StandardDomain(Domain):
return None
def get_enumerable_node_type(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Node) -> str
"""Get type of enumerable nodes."""
def has_child(node, cls):
# type: (nodes.Element, Type) -> bool
@@ -960,7 +960,7 @@ class StandardDomain(Domain):
return figtype
def get_figtype(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Node) -> str
"""Get figure type of nodes.
.. deprecated:: 1.8
@@ -971,7 +971,7 @@ class StandardDomain(Domain):
return self.get_enumerable_node_type(node)
def get_fignumber(self, env, builder, figtype, docname, target_node):
- # type: (BuildEnvironment, Builder, unicode, unicode, nodes.Element) -> Tuple[int, ...]
+ # type: (BuildEnvironment, Builder, str, str, nodes.Element) -> Tuple[int, ...]
if figtype == 'section':
if builder.name == 'latex':
return tuple()
@@ -994,7 +994,7 @@ class StandardDomain(Domain):
raise ValueError
def get_full_qualified_name(self, node):
- # type: (nodes.Element) -> unicode
+ # type: (nodes.Element) -> str
if node.get('reftype') == 'option':
progname = node.get('std:program')
command = ws_re.split(node.get('reftarget'))
@@ -1010,7 +1010,7 @@ class StandardDomain(Domain):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(StandardDomain)
return {
diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py
index 6797ce28c..50e47c388 100644
--- a/sphinx/environment/__init__.py
+++ b/sphinx/environment/__init__.py
@@ -40,7 +40,6 @@ if False:
from sphinx.config import Config # NOQA
from sphinx.domains import Domain # NOQA
from sphinx.project import Project # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -80,7 +79,7 @@ versioning_conditions = {
'none': False,
'text': is_translatable,
'commentable': is_commentable,
-} # type: Dict[unicode, Union[bool, Callable]]
+}
class NoUri(Exception):
@@ -95,19 +94,19 @@ class BuildEnvironment:
transformations to resolve links to them.
"""
- domains = None # type: Dict[unicode, Domain]
+ domains = None # type: Dict[str, Domain]
# --------- ENVIRONMENT INITIALIZATION -------------------------------------
def __init__(self, app=None):
# type: (Sphinx) -> None
self.app = None # type: Sphinx
- self.doctreedir = None # type: unicode
- self.srcdir = None # type: unicode
+ self.doctreedir = None # type: str
+ self.srcdir = None # type: str
self.config = None # type: Config
self.config_status = None # type: int
self.project = None # type: Project
- self.version = None # type: Dict[unicode, unicode]
+ self.version = None # type: Dict[str, str]
# the method of doctree versioning; see set_versioning_method
self.versioning_condition = None # type: Union[bool, Callable]
@@ -123,60 +122,60 @@ class BuildEnvironment:
# All "docnames" here are /-separated and relative and exclude
# the source suffix.
- self.all_docs = {} # type: Dict[unicode, float]
+ self.all_docs = {} # type: Dict[str, float]
# docname -> mtime at the time of reading
# contains all read docnames
- self.dependencies = defaultdict(set) # type: Dict[unicode, Set[unicode]]
+ self.dependencies = defaultdict(set) # type: Dict[str, Set[str]]
# docname -> set of dependent file
# names, relative to documentation root
- self.included = defaultdict(set) # type: Dict[unicode, Set[unicode]]
+ self.included = defaultdict(set) # type: Dict[str, Set[str]]
# docname -> set of included file
# docnames included from other documents
- self.reread_always = set() # type: Set[unicode]
+ self.reread_always = set() # type: Set[str]
# docnames to re-read unconditionally on
# next build
# File metadata
- self.metadata = defaultdict(dict) # type: Dict[unicode, Dict[unicode, Any]]
+ self.metadata = defaultdict(dict) # type: Dict[str, Dict[str, Any]]
# docname -> dict of metadata items
# TOC inventory
- self.titles = {} # type: Dict[unicode, nodes.title]
+ self.titles = {} # type: Dict[str, nodes.title]
# docname -> title node
- self.longtitles = {} # type: Dict[unicode, nodes.title]
+ self.longtitles = {} # type: Dict[str, nodes.title]
# docname -> title node; only different if
# set differently with title directive
- self.tocs = {} # type: Dict[unicode, nodes.bullet_list]
+ self.tocs = {} # type: Dict[str, nodes.bullet_list]
# docname -> table of contents nodetree
- self.toc_num_entries = {} # type: Dict[unicode, int]
+ self.toc_num_entries = {} # type: Dict[str, int]
# docname -> number of real entries
# used to determine when to show the TOC
# in a sidebar (don't show if it's only one item)
- self.toc_secnumbers = {} # type: Dict[unicode, Dict[unicode, Tuple[int, ...]]]
+ self.toc_secnumbers = {} # type: Dict[str, Dict[str, Tuple[int, ...]]]
# docname -> dict of sectionid -> number
- self.toc_fignumbers = {} # type: Dict[unicode, Dict[unicode, Dict[unicode, Tuple[int, ...]]]] # NOQA
+ self.toc_fignumbers = {} # type: Dict[str, Dict[str, Dict[str, Tuple[int, ...]]]]
# docname -> dict of figtype ->
# dict of figureid -> number
- self.toctree_includes = {} # type: Dict[unicode, List[unicode]]
+ self.toctree_includes = {} # type: Dict[str, List[str]]
# docname -> list of toctree includefiles
- self.files_to_rebuild = {} # type: Dict[unicode, Set[unicode]]
+ self.files_to_rebuild = {} # type: Dict[str, Set[str]]
# docname -> set of files
# (containing its TOCs) to rebuild too
- self.glob_toctrees = set() # type: Set[unicode]
+ self.glob_toctrees = set() # type: Set[str]
# docnames that have :glob: toctrees
- self.numbered_toctrees = set() # type: Set[unicode]
+ self.numbered_toctrees = set() # type: Set[str]
# docnames that have :numbered: toctrees
# domain-specific inventories, here to be pickled
- self.domaindata = {} # type: Dict[unicode, Dict]
+ self.domaindata = {} # type: Dict[str, Dict]
# domainname -> domain-specific dict
# Other inventories
- self.indexentries = {} # type: Dict[unicode, List[Tuple[unicode, unicode, unicode, unicode, unicode]]] # NOQA
+ self.indexentries = {} # type: Dict[str, List[Tuple[str, str, str, str, str]]]
# docname -> list of
- # (type, unicode, target, aliasname)
+ # (type, str, target, aliasname)
# these map absolute path -> (docnames, unique filename)
self.images = FilenameUniqDict() # type: FilenameUniqDict
@@ -184,14 +183,14 @@ class BuildEnvironment:
# filename -> (set of docnames, destination)
# the original URI for images
- self.original_image_uri = {} # type: Dict[unicode, unicode]
+ self.original_image_uri = {} # type: Dict[str, str]
# temporary data storage while reading a document
- self.temp_data = {} # type: Dict[unicode, Any]
+ self.temp_data = {} # type: Dict[str, Any]
# context for cross-references (e.g. current module or class)
# this is similar to temp_data, but will for example be copied to
# attributes of "any" cross references
- self.ref_context = {} # type: Dict[unicode, Any]
+ self.ref_context = {} # type: Dict[str, Any]
# set up environment
if app:
@@ -268,7 +267,7 @@ class BuildEnvironment:
self.settings.setdefault('smart_quotes', True)
def set_versioning_method(self, method, compare):
- # type: (unicode, bool) -> None
+ # type: (str, bool) -> None
"""This sets the doctree versioning method for this environment.
Versioning methods are a builder property; only builders with the same
@@ -287,7 +286,7 @@ class BuildEnvironment:
self.versioning_compare = compare
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Remove all traces of a source file in the inventory."""
if docname in self.all_docs:
self.all_docs.pop(docname, None)
@@ -298,7 +297,7 @@ class BuildEnvironment:
domain.clear_doc(docname)
def merge_info_from(self, docnames, other, app):
- # type: (List[unicode], BuildEnvironment, Sphinx) -> None
+ # type: (List[str], BuildEnvironment, Sphinx) -> None
"""Merge global information gathered about *docnames* while reading them
from the *other* environment.
@@ -320,7 +319,7 @@ class BuildEnvironment:
app.emit('env-merge-info', self, docnames, other)
def path2doc(self, filename):
- # type: (unicode) -> Optional[unicode]
+ # type: (str) -> Optional[str]
"""Return the docname for the filename if the file is document.
*filename* should be absolute or relative to the source directory.
@@ -328,7 +327,7 @@ class BuildEnvironment:
return self.project.path2doc(filename)
def doc2path(self, docname, base=True, suffix=None):
- # type: (unicode, Union[bool, unicode], unicode) -> unicode
+ # type: (str, Union[bool, str], str) -> str
"""Return the filename for the document name.
If *base* is True, return absolute path under self.srcdir.
@@ -352,7 +351,7 @@ class BuildEnvironment:
return pathname
def relfn2path(self, filename, docname=None):
- # type: (unicode, unicode) -> Tuple[unicode, unicode]
+ # type: (str, str) -> Tuple[str, str]
"""Return paths to a file referenced from a document, relative to
documentation root and absolute.
@@ -378,7 +377,7 @@ class BuildEnvironment:
@property
def found_docs(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
"""contains all existing docnames."""
return self.project.docnames
@@ -414,13 +413,13 @@ class BuildEnvironment:
raise DocumentError(__('Failed to scan documents in %s: %r') % (self.srcdir, exc))
def get_outdated_files(self, config_changed):
- # type: (bool) -> Tuple[Set[unicode], Set[unicode], Set[unicode]]
+ # type: (bool) -> Tuple[Set[str], Set[str], Set[str]]
"""Return (added, changed, removed) sets."""
# clear all files no longer present
removed = set(self.all_docs) - self.found_docs
- added = set() # type: Set[unicode]
- changed = set() # type: Set[unicode]
+ added = set() # type: Set[str]
+ changed = set() # type: Set[str]
if config_changed:
# config values affect e.g. substitutions
@@ -465,8 +464,8 @@ class BuildEnvironment:
return added, changed, removed
def check_dependents(self, app, already):
- # type: (Sphinx, Set[unicode]) -> Iterator[unicode]
- to_rewrite = [] # type: List[unicode]
+ # type: (Sphinx, Set[str]) -> Iterator[str]
+ to_rewrite = [] # type: List[str]
for docnames in app.emit('env-get-updated', self):
to_rewrite.extend(docnames)
for docname in set(to_rewrite):
@@ -476,7 +475,7 @@ class BuildEnvironment:
# --------- SINGLE FILE READING --------------------------------------------
def prepare_settings(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Prepare to set up environment for reading."""
self.temp_data['docname'] = docname
# defaults to the global default, but can be re-set in a document
@@ -488,12 +487,12 @@ class BuildEnvironment:
@property
def docname(self):
- # type: () -> unicode
+ # type: () -> str
"""Returns the docname of the document currently being parsed."""
return self.temp_data['docname']
def new_serialno(self, category=''):
- # type: (unicode) -> int
+ # type: (str) -> int
"""Return a serial number, e.g. for index entry targets.
The number is guaranteed to be unique in the current document.
@@ -504,7 +503,7 @@ class BuildEnvironment:
return cur
def note_dependency(self, filename):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Add *filename* as a dependency of the current document.
This means that the document will be rebuilt if this file changes.
@@ -514,7 +513,7 @@ class BuildEnvironment:
self.dependencies[self.docname].add(filename)
def note_included(self, filename):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Add *filename* as a included from other document.
This means the document is not orphaned.
@@ -531,7 +530,7 @@ class BuildEnvironment:
self.reread_always.add(self.docname)
def get_domain(self, domainname):
- # type: (unicode) -> Domain
+ # type: (str) -> Domain
"""Return the domain instance with the specified name.
Raises an ExtensionError if the domain is not registered.
@@ -544,7 +543,7 @@ class BuildEnvironment:
# --------- RESOLVING REFERENCES AND TOCTREES ------------------------------
def get_doctree(self, docname):
- # type: (unicode) -> nodes.document
+ # type: (str) -> nodes.document
"""Read the doctree for a file from the pickle and return it."""
filename = path.join(self.doctreedir, docname + '.doctree')
with open(filename, 'rb') as f:
@@ -555,7 +554,7 @@ class BuildEnvironment:
def get_and_resolve_doctree(self, docname, builder, doctree=None,
prune_toctrees=True, includehidden=False):
- # type: (unicode, Builder, nodes.document, bool, bool) -> nodes.document
+ # type: (str, Builder, nodes.document, bool, bool) -> nodes.document
"""Read the doctree from the pickle, resolve cross-references and
toctrees and return it.
"""
@@ -579,7 +578,7 @@ class BuildEnvironment:
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
- # type: (unicode, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node
+ # type: (str, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node
"""Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
@@ -596,11 +595,11 @@ class BuildEnvironment:
includehidden)
def resolve_references(self, doctree, fromdocname, builder):
- # type: (nodes.document, unicode, Builder) -> None
+ # type: (nodes.document, str, Builder) -> None
self.apply_post_transforms(doctree, fromdocname)
def apply_post_transforms(self, doctree, docname):
- # type: (nodes.document, unicode) -> None
+ # type: (nodes.document, str) -> None
"""Apply all post-transforms."""
try:
# set env.docname during applying post-transforms
@@ -618,11 +617,11 @@ class BuildEnvironment:
self.app.emit('doctree-resolved', doctree, docname)
def collect_relations(self):
- # type: () -> Dict[unicode, List[unicode]]
+ # type: () -> Dict[str, List[str]]
traversed = set()
def traverse_toctree(parent, docname):
- # type: (unicode, unicode) -> Iterator[Tuple[unicode, unicode]]
+ # type: (str, str) -> Iterator[Tuple[str, str]]
if parent == docname:
logger.warning(__('self referenced toctree found. Ignored.'), location=docname)
return
@@ -676,31 +675,31 @@ class BuildEnvironment:
# --------- METHODS FOR COMPATIBILITY --------------------------------------
def update(self, config, srcdir, doctreedir):
- # type: (Config, unicode, unicode) -> List[unicode]
+ # type: (Config, str, str) -> List[str]
warnings.warn('env.update() is deprecated. Please use builder.read() instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.app.builder.read()
def _read_serial(self, docnames, app):
- # type: (List[unicode], Sphinx) -> None
+ # type: (List[str], Sphinx) -> None
warnings.warn('env._read_serial() is deprecated. Please use builder.read() instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.app.builder._read_serial(docnames)
def _read_parallel(self, docnames, app, nproc):
- # type: (List[unicode], Sphinx, int) -> None
+ # type: (List[str], Sphinx, int) -> None
warnings.warn('env._read_parallel() is deprecated. Please use builder.read() instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.app.builder._read_parallel(docnames, nproc)
def read_doc(self, docname, app=None):
- # type: (unicode, Sphinx) -> None
+ # type: (str, Sphinx) -> None
warnings.warn('env.read_doc() is deprecated. Please use builder.read_doc() instead.',
RemovedInSphinx30Warning, stacklevel=2)
self.app.builder.read_doc(docname)
def write_doctree(self, docname, doctree):
- # type: (unicode, nodes.document) -> None
+ # type: (str, nodes.document) -> None
warnings.warn('env.write_doctree() is deprecated. '
'Please use builder.write_doctree() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -708,7 +707,7 @@ class BuildEnvironment:
@property
def _nitpick_ignore(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
warnings.warn('env._nitpick_ignore is deprecated. '
'Please use config.nitpick_ignore instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -742,7 +741,7 @@ class BuildEnvironment:
@classmethod
def frompickle(cls, filename, app):
- # type: (unicode, Sphinx) -> BuildEnvironment
+ # type: (str, Sphinx) -> BuildEnvironment
warnings.warn('BuildEnvironment.frompickle() is deprecated. '
'Please use pickle.load() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -768,7 +767,7 @@ class BuildEnvironment:
return io.getvalue()
def topickle(self, filename):
- # type: (unicode) -> None
+ # type: (str) -> None
warnings.warn('env.topickle() is deprecated. '
'Please use pickle.dump() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -777,14 +776,14 @@ class BuildEnvironment:
@property
def versionchanges(self):
- # type: () -> Dict[unicode, List[Tuple[unicode, unicode, int, unicode, unicode, unicode]]] # NOQA
+ # type: () -> Dict[str, List[Tuple[str, str, int, str, str, str]]]
warnings.warn('env.versionchanges() is deprecated. '
'Please use ChangeSetDomain instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.domaindata['changeset']['changes']
def note_versionchange(self, type, version, node, lineno):
- # type: (unicode, unicode, addnodes.versionmodified, int) -> None
+ # type: (str, str, addnodes.versionmodified, int) -> None
warnings.warn('env.note_versionchange() is deprecated. '
'Please use ChangeSetDomain.note_changeset() instead.',
RemovedInSphinx30Warning, stacklevel=2)
diff --git a/sphinx/environment/adapters/asset.py b/sphinx/environment/adapters/asset.py
index d1e7ab995..c729263f7 100644
--- a/sphinx/environment/adapters/asset.py
+++ b/sphinx/environment/adapters/asset.py
@@ -12,7 +12,6 @@
if False:
# For type annotation
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
class ImageAdapter:
@@ -21,7 +20,7 @@ class ImageAdapter:
self.env = env
def get_original_image_uri(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Get the original image URI."""
while name in self.env.original_image_uri:
name = self.env.original_image_uri[name]
diff --git a/sphinx/environment/adapters/indexentries.py b/sphinx/environment/adapters/indexentries.py
index 5235ef706..3b6bba826 100644
--- a/sphinx/environment/adapters/indexentries.py
+++ b/sphinx/environment/adapters/indexentries.py
@@ -23,7 +23,6 @@ if False:
from typing import Any, Dict, Pattern, List, Tuple # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -35,14 +34,14 @@ class IndexEntries:
def create_index(self, builder, group_entries=True,
_fixre=re.compile(r'(.*) ([(][^()]*[)])')):
- # type: (Builder, bool, Pattern) -> List[Tuple[unicode, List[Tuple[unicode, Any]]]] # NOQA
+ # type: (Builder, bool, Pattern) -> List[Tuple[str, List[Tuple[str, Any]]]]
"""Create the real index from the collected index entries."""
from sphinx.environment import NoUri
- new = {} # type: Dict[unicode, List]
+ new = {} # type: Dict[str, List]
def add_entry(word, subword, main, link=True, dic=new, key=None):
- # type: (unicode, unicode, unicode, bool, Dict, unicode) -> None
+ # type: (str, str, str, bool, Dict, str) -> None
# Force the word to be unicode if it's a ASCII bytestring.
# This will solve problems with unicode normalization later.
# For instance the RFC role will add bytestrings at the moment
@@ -97,7 +96,7 @@ class IndexEntries:
# sort the index entries; put all symbols at the front, even those
# following the letters in ASCII, this is where the chr(127) comes from
def keyfunc(entry):
- # type: (Tuple[unicode, List]) -> Tuple[unicode, unicode]
+ # type: (Tuple[str, List]) -> Tuple[str, str]
key, (void, void, category_key) = entry
if category_key:
# using specified category key to sort
@@ -120,8 +119,8 @@ class IndexEntries:
# func()
# (in module foo)
# (in module bar)
- oldkey = '' # type: unicode
- oldsubitems = None # type: Dict[unicode, List]
+ oldkey = ''
+ oldsubitems = None # type: Dict[str, List]
i = 0
while i < len(newlist):
key, (targets, subitems, _key) = newlist[i]
@@ -144,7 +143,7 @@ class IndexEntries:
# group the entries by letter
def keyfunc2(item):
- # type: (Tuple[unicode, List]) -> unicode
+ # type: (Tuple[str, List]) -> str
# hack: mutating the subitems dicts to a list in the keyfunc
k, v = item
v[1] = sorted((si, se) for (si, (se, void, void)) in v[1].items())
diff --git a/sphinx/environment/adapters/toctree.py b/sphinx/environment/adapters/toctree.py
index b5e6d3938..08098b5d7 100644
--- a/sphinx/environment/adapters/toctree.py
+++ b/sphinx/environment/adapters/toctree.py
@@ -24,7 +24,6 @@ if False:
from typing import Any, Dict, List # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -35,7 +34,7 @@ class TocTree:
self.env = env
def note(self, docname, toctreenode):
- # type: (unicode, addnodes.toctree) -> None
+ # type: (str, addnodes.toctree) -> None
"""Note a TOC tree directive in a document and gather information about
file relations from it.
"""
@@ -52,7 +51,7 @@ class TocTree:
def resolve(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
- # type: (unicode, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Element # NOQA
+ # type: (str, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Element
"""Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
@@ -120,7 +119,7 @@ class TocTree:
subnode = subnode.parent
def _entries_from_toctree(toctreenode, parents, separate=False, subtree=False):
- # type: (addnodes.toctree, List[unicode], bool, bool) -> List[nodes.Element]
+ # type: (addnodes.toctree, List[str], bool, bool) -> List[nodes.Element]
"""Return TOC entries for a toctree node."""
refs = [(e[0], e[1]) for e in toctreenode['entries']]
entries = [] # type: List[nodes.Element]
@@ -268,12 +267,12 @@ class TocTree:
return newnode
def get_toctree_ancestors(self, docname):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
parent = {}
for p, children in self.env.toctree_includes.items():
for child in children:
parent[child] = p
- ancestors = [] # type: List[unicode]
+ ancestors = [] # type: List[str]
d = docname
while d in parent and d not in ancestors:
ancestors.append(d)
@@ -303,7 +302,7 @@ class TocTree:
self._toctree_prune(subnode, depth + 1, maxdepth, collapse)
def get_toc_for(self, docname, builder):
- # type: (unicode, Builder) -> nodes.Node
+ # type: (str, Builder) -> nodes.Node
"""Return a TOC nodetree -- for use on the same page only!"""
tocdepth = self.env.metadata[docname].get('tocdepth', 0)
try:
@@ -319,7 +318,7 @@ class TocTree:
return toc
def get_toctree_for(self, docname, builder, collapse, **kwds):
- # type: (unicode, Builder, bool, Any) -> nodes.Element
+ # type: (str, Builder, bool, Any) -> nodes.Element
"""Return the global TOC nodetree."""
doctree = self.env.get_doctree(self.env.config.master_doc)
toctrees = [] # type: List[nodes.Element]
diff --git a/sphinx/environment/collectors/__init__.py b/sphinx/environment/collectors/__init__.py
index f29b8e9af..619de8e6d 100644
--- a/sphinx/environment/collectors/__init__.py
+++ b/sphinx/environment/collectors/__init__.py
@@ -15,7 +15,6 @@ if False:
from docutils import nodes # NOQA
from sphinx.sphinx import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
class EnvironmentCollector:
@@ -27,7 +26,7 @@ class EnvironmentCollector:
entries and toctrees, etc.
"""
- listener_ids = None # type: Dict[unicode, int]
+ listener_ids = None # type: Dict[str, int]
def enable(self, app):
# type: (Sphinx) -> None
@@ -48,14 +47,14 @@ class EnvironmentCollector:
self.listener_ids = None
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
"""Remove specified data of a document.
This method is called on the removal of the document."""
raise NotImplementedError
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
"""Merge in specified data regarding docnames from a different `BuildEnvironment`
object which coming from a subprocess in parallel builds."""
raise NotImplementedError
@@ -68,7 +67,7 @@ class EnvironmentCollector:
raise NotImplementedError
def get_updated_docs(self, app, env):
- # type: (Sphinx, BuildEnvironment) -> List[unicode]
+ # type: (Sphinx, BuildEnvironment) -> List[str]
"""Return a list of docnames to re-read.
This methods is called after reading the whole of documents (experimental).
@@ -76,7 +75,7 @@ class EnvironmentCollector:
return []
def get_outdated_docs(self, app, env, added, changed, removed):
- # type: (Sphinx, BuildEnvironment, unicode, Set[unicode], Set[unicode], Set[unicode]) -> List[unicode] # NOQA
+ # type: (Sphinx, BuildEnvironment, str, Set[str], Set[str], Set[str]) -> List[str]
"""Return a list of docnames to re-read.
This methods is called before reading the documents.
diff --git a/sphinx/environment/collectors/asset.py b/sphinx/environment/collectors/asset.py
index 3ebd0cfe0..e199444e8 100644
--- a/sphinx/environment/collectors/asset.py
+++ b/sphinx/environment/collectors/asset.py
@@ -29,7 +29,6 @@ if False:
from docutils import nodes # NOQA
from sphinx.sphinx import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -38,11 +37,11 @@ class ImageCollector(EnvironmentCollector):
"""Image files collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.images.purge_doc(docname)
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
env.images.merge_other(docnames, other.images)
def process_doc(self, app, doctree):
@@ -55,7 +54,7 @@ class ImageCollector(EnvironmentCollector):
# choose the best image from these candidates. The special key * is
# set if there is only single candidate to be used by a writer.
# The special key ? is set for nonlocal URIs.
- candidates = {} # type: Dict[unicode, unicode]
+ candidates = {} # type: Dict[str, str]
node['candidates'] = candidates
imguri = node['uri']
if imguri.startswith('data:'):
@@ -96,8 +95,8 @@ class ImageCollector(EnvironmentCollector):
app.env.images.add_file(docname, imgpath)
def collect_candidates(self, env, imgpath, candidates, node):
- # type: (BuildEnvironment, unicode, Dict[unicode, unicode], nodes.Node) -> None
- globbed = {} # type: Dict[unicode, List[unicode]]
+ # type: (BuildEnvironment, str, Dict[str, str], nodes.Node) -> None
+ globbed = {} # type: Dict[str, List[str]]
for filename in glob(imgpath):
new_imgpath = relative_path(path.join(env.srcdir, 'dummy'),
filename)
@@ -116,11 +115,11 @@ class DownloadFileCollector(EnvironmentCollector):
"""Download files collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.dlfiles.purge_doc(docname)
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
env.dlfiles.merge_other(docnames, other.dlfiles)
def process_doc(self, app, doctree):
diff --git a/sphinx/environment/collectors/dependencies.py b/sphinx/environment/collectors/dependencies.py
index da3b954de..ce103fada 100644
--- a/sphinx/environment/collectors/dependencies.py
+++ b/sphinx/environment/collectors/dependencies.py
@@ -23,18 +23,17 @@ if False:
from docutils import nodes # NOQA
from sphinx.sphinx import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
class DependenciesCollector(EnvironmentCollector):
"""dependencies collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.dependencies.pop(docname, None)
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
if docname in other.dependencies:
env.dependencies[docname] = other.dependencies[docname]
diff --git a/sphinx/environment/collectors/indexentries.py b/sphinx/environment/collectors/indexentries.py
index 662965c67..45f2c6742 100644
--- a/sphinx/environment/collectors/indexentries.py
+++ b/sphinx/environment/collectors/indexentries.py
@@ -21,7 +21,6 @@ if False:
from docutils import nodes # NOQA
from sphinx.applicatin import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -30,11 +29,11 @@ class IndexEntriesCollector(EnvironmentCollector):
name = 'indices'
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.indexentries.pop(docname, None)
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
env.indexentries[docname] = other.indexentries[docname]
diff --git a/sphinx/environment/collectors/metadata.py b/sphinx/environment/collectors/metadata.py
index c55b8458e..00df134ba 100644
--- a/sphinx/environment/collectors/metadata.py
+++ b/sphinx/environment/collectors/metadata.py
@@ -21,18 +21,17 @@ if False:
from docutils import nodes # NOQA
from sphinx.sphinx import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
class MetadataCollector(EnvironmentCollector):
"""metadata collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.metadata.pop(docname, None)
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
env.metadata[docname] = other.metadata[docname]
diff --git a/sphinx/environment/collectors/title.py b/sphinx/environment/collectors/title.py
index 175e05c8d..3f7c1f55e 100644
--- a/sphinx/environment/collectors/title.py
+++ b/sphinx/environment/collectors/title.py
@@ -20,19 +20,18 @@ if False:
from docutils import nodes # NOQA
from sphinx.sphinx import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
class TitleCollector(EnvironmentCollector):
"""title collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.titles.pop(docname, None)
env.longtitles.pop(docname, None)
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
env.titles[docname] = other.titles[docname]
env.longtitles[docname] = other.longtitles[docname]
diff --git a/sphinx/environment/collectors/toctree.py b/sphinx/environment/collectors/toctree.py
index ed10fdf4c..783e1749a 100644
--- a/sphinx/environment/collectors/toctree.py
+++ b/sphinx/environment/collectors/toctree.py
@@ -26,7 +26,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
N = TypeVar('N')
@@ -35,7 +34,7 @@ logger = logging.getLogger(__name__)
class TocTreeCollector(EnvironmentCollector):
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.tocs.pop(docname, None)
env.toc_secnumbers.pop(docname, None)
env.toc_fignumbers.pop(docname, None)
@@ -50,7 +49,7 @@ class TocTreeCollector(EnvironmentCollector):
del env.files_to_rebuild[subfn]
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
env.tocs[docname] = other.tocs[docname]
env.toc_num_entries[docname] = other.toc_num_entries[docname]
@@ -139,16 +138,16 @@ class TocTreeCollector(EnvironmentCollector):
app.env.toc_num_entries[docname] = numentries[0]
def get_updated_docs(self, app, env):
- # type: (Sphinx, BuildEnvironment) -> List[unicode]
+ # type: (Sphinx, BuildEnvironment) -> List[str]
return self.assign_section_numbers(env) + self.assign_figure_numbers(env)
def assign_section_numbers(self, env):
- # type: (BuildEnvironment) -> List[unicode]
+ # type: (BuildEnvironment) -> List[str]
"""Assign a section number to each heading under a numbered toctree."""
# a list of all docnames whose section numbers changed
rewrite_needed = []
- assigned = set() # type: Set[unicode]
+ assigned = set() # type: Set[str]
old_secnumbers = env.toc_secnumbers
env.toc_secnumbers = {}
@@ -200,7 +199,7 @@ class TocTreeCollector(EnvironmentCollector):
'(nested numbered toctree?)'), ref,
location=toctreenode, type='toc', subtype='secnum')
elif ref in env.tocs:
- secnums = {} # type: Dict[unicode, Tuple[int, ...]]
+ secnums = {} # type: Dict[str, Tuple[int, ...]]
env.toc_secnumbers[ref] = secnums
assigned.add(ref)
_walk_toc(env.tocs[ref], secnums, depth, env.titles.get(ref))
@@ -220,18 +219,18 @@ class TocTreeCollector(EnvironmentCollector):
return rewrite_needed
def assign_figure_numbers(self, env):
- # type: (BuildEnvironment) -> List[unicode]
+ # type: (BuildEnvironment) -> List[str]
"""Assign a figure number to each figure under a numbered toctree."""
rewrite_needed = []
- assigned = set() # type: Set[unicode]
+ assigned = set() # type: Set[str]
old_fignumbers = env.toc_fignumbers
env.toc_fignumbers = {}
- fignum_counter = {} # type: Dict[unicode, Dict[Tuple[int, ...], int]]
+ fignum_counter = {} # type: Dict[str, Dict[Tuple[int, ...], int]]
def get_figtype(node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Node) -> str
for domain in env.domains.values():
figtype = domain.get_enumerable_node_type(node)
if figtype:
@@ -240,7 +239,7 @@ class TocTreeCollector(EnvironmentCollector):
return None
def get_section_number(docname, section):
- # type: (unicode, nodes.section) -> Tuple[int, ...]
+ # type: (str, nodes.section) -> Tuple[int, ...]
anchorname = '#' + section['ids'][0]
secnumbers = env.toc_secnumbers.get(docname, {})
if anchorname in secnumbers:
@@ -251,7 +250,7 @@ class TocTreeCollector(EnvironmentCollector):
return secnum or tuple()
def get_next_fignumber(figtype, secnum):
- # type: (unicode, Tuple[int, ...]) -> Tuple[int, ...]
+ # type: (str, Tuple[int, ...]) -> Tuple[int, ...]
counter = fignum_counter.setdefault(figtype, {})
secnum = secnum[:env.config.numfig_secnum_depth]
@@ -259,7 +258,7 @@ class TocTreeCollector(EnvironmentCollector):
return secnum + (counter[secnum],)
def register_fignumber(docname, secnum, figtype, fignode):
- # type: (unicode, Tuple[int, ...], unicode, nodes.Element) -> None
+ # type: (str, Tuple[int, ...], str, nodes.Element) -> None
env.toc_fignumbers.setdefault(docname, {})
fignumbers = env.toc_fignumbers[docname].setdefault(figtype, {})
figure_id = fignode['ids'][0]
@@ -267,7 +266,7 @@ class TocTreeCollector(EnvironmentCollector):
fignumbers[figure_id] = get_next_fignumber(figtype, secnum)
def _walk_doctree(docname, doctree, secnum):
- # type: (unicode, nodes.Element, Tuple[int, ...]) -> None
+ # type: (str, nodes.Element, Tuple[int, ...]) -> None
for subnode in doctree.children:
if isinstance(subnode, nodes.section):
next_secnum = get_section_number(docname, subnode)
@@ -290,7 +289,7 @@ class TocTreeCollector(EnvironmentCollector):
_walk_doctree(docname, subnode, secnum)
def _walk_doc(docname, secnum):
- # type: (unicode, Tuple[int, ...]) -> None
+ # type: (str, Tuple[int, ...]) -> None
if docname not in assigned:
assigned.add(docname)
doctree = env.get_doctree(docname)
diff --git a/sphinx/errors.py b/sphinx/errors.py
index dead3f1c8..df858fa87 100644
--- a/sphinx/errors.py
+++ b/sphinx/errors.py
@@ -13,7 +13,6 @@
if False:
# For type annotation
from typing import Any # NOQA
- from sphinx.util.typing import unicode # NOQA
class SphinxError(Exception):
@@ -54,7 +53,7 @@ class ExtensionError(SphinxError):
category = 'Extension error'
def __init__(self, message, orig_exc=None):
- # type: (unicode, Exception) -> None
+ # type: (str, Exception) -> None
super(ExtensionError, self).__init__(message)
self.message = message
self.orig_exc = orig_exc
diff --git a/sphinx/events.py b/sphinx/events.py
index fca304773..61e30dd83 100644
--- a/sphinx/events.py
+++ b/sphinx/events.py
@@ -10,7 +10,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
from collections import OrderedDict, defaultdict
@@ -20,7 +19,6 @@ from sphinx.locale import __
if False:
# For type annotation
from typing import Any, Callable, Dict, List # NOQA
- from sphinx.util.typing import unicode # NOQA
# List of all known core events. Maps name to arguments description.
@@ -41,24 +39,24 @@ core_events = {
'html-collect-pages': 'builder',
'html-page-context': 'pagename, context, doctree or None',
'build-finished': 'exception',
-} # type: Dict[unicode, unicode]
+}
class EventManager:
def __init__(self):
# type: () -> None
self.events = core_events.copy()
- self.listeners = defaultdict(OrderedDict) # type: Dict[unicode, Dict[int, Callable]]
+ self.listeners = defaultdict(OrderedDict) # type: Dict[str, Dict[int, Callable]]
self.next_listener_id = 0
def add(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
if name in self.events:
raise ExtensionError(__('Event %r already present') % name)
self.events[name] = ''
def connect(self, name, callback):
- # type: (unicode, Callable) -> int
+ # type: (str, Callable) -> int
if name not in self.events:
raise ExtensionError(__('Unknown event name: %s') % name)
@@ -73,14 +71,14 @@ class EventManager:
event.pop(listener_id, None)
def emit(self, name, *args):
- # type: (unicode, Any) -> List
+ # type: (str, Any) -> List
results = []
for callback in self.listeners[name].values():
results.append(callback(*args))
return results
def emit_firstresult(self, name, *args):
- # type: (unicode, Any) -> Any
+ # type: (str, Any) -> Any
for result in self.emit(name, *args):
if result is not None:
return result
diff --git a/sphinx/ext/apidoc.py b/sphinx/ext/apidoc.py
index 85a41e650..65b611b97 100644
--- a/sphinx/ext/apidoc.py
+++ b/sphinx/ext/apidoc.py
@@ -15,8 +15,6 @@
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
-
import argparse
import glob
import locale
@@ -35,7 +33,6 @@ from sphinx.util.osutil import FileAvoidWrite, ensuredir
if False:
# For type annotation
from typing import Any, List, Tuple # NOQA
- from sphinx.util.typing import unicode # NOQA
# automodule options
if 'SPHINX_APIDOC_OPTIONS' in os.environ:
@@ -53,7 +50,7 @@ PY_SUFFIXES = set(['.py', '.pyx'])
def makename(package, module):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Join package and module with a dot."""
# Both package and module can be None/empty.
if package:
@@ -66,7 +63,7 @@ def makename(package, module):
def write_file(name, text, opts):
- # type: (unicode, unicode, Any) -> None
+ # type: (str, str, Any) -> None
"""Write the output file for module/package <name>."""
fname = path.join(opts.destdir, '%s.%s' % (name, opts.suffix))
if opts.dryrun:
@@ -81,7 +78,7 @@ def write_file(name, text, opts):
def format_heading(level, text, escape=True):
- # type: (int, unicode, bool) -> unicode
+ # type: (int, str, bool) -> str
"""Create a heading of <level> [1, 2 or 3 supported]."""
if escape:
text = rst.escape(text)
@@ -90,7 +87,7 @@ def format_heading(level, text, escape=True):
def format_directive(module, package=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Create the automodule directive and add the options."""
directive = '.. automodule:: %s\n' % makename(package, module)
for option in OPTIONS:
@@ -99,7 +96,7 @@ def format_directive(module, package=None):
def create_module_file(package, module, opts):
- # type: (unicode, unicode, Any) -> None
+ # type: (str, str, Any) -> None
"""Build the text of the file and write the file."""
if not opts.noheadings:
text = format_heading(1, '%s module' % module)
@@ -111,7 +108,7 @@ def create_module_file(package, module, opts):
def create_package_file(root, master_package, subroot, py_files, opts, subs, is_namespace, excludes=[]): # NOQA
- # type: (unicode, unicode, unicode, List[unicode], Any, List[unicode], bool, List[unicode]) -> None # NOQA
+ # type: (str, str, str, List[str], Any, List[str], bool, List[str]) -> None
"""Build the text of the file and write the file."""
text = format_heading(1, ('%s package' if not is_namespace else "%s namespace")
% makename(master_package, subroot))
@@ -171,14 +168,14 @@ def create_package_file(root, master_package, subroot, py_files, opts, subs, is_
def create_modules_toc_file(modules, opts, name='modules'):
- # type: (List[unicode], Any, unicode) -> None
+ # type: (List[str], Any, str) -> None
"""Create the module's index."""
text = format_heading(1, '%s' % opts.header, escape=False)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
- prev_module = '' # type: unicode
+ prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
@@ -190,7 +187,7 @@ def create_modules_toc_file(modules, opts, name='modules'):
def shall_skip(module, opts, excludes=[]):
- # type: (unicode, Any, List[unicode]) -> bool
+ # type: (str, Any, List[str]) -> bool
"""Check if we want to skip this module."""
# skip if the file doesn't exist and not using implicit namespaces
if not opts.implicit_namespaces and not path.exists(module):
@@ -217,7 +214,7 @@ def shall_skip(module, opts, excludes=[]):
def recurse_tree(rootpath, excludes, opts):
- # type: (unicode, List[unicode], Any) -> List[unicode]
+ # type: (str, List[str], Any) -> List[str]
"""
Look for every file in the directory tree and create the corresponding
ReST files.
@@ -252,7 +249,7 @@ def recurse_tree(rootpath, excludes, opts):
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if includeprivate:
- exclude_prefixes = ('.',) # type: Tuple[unicode, ...]
+ exclude_prefixes = ('.',) # type: Tuple[str, ...]
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and
@@ -282,7 +279,7 @@ def recurse_tree(rootpath, excludes, opts):
def is_excluded(root, excludes):
- # type: (unicode, List[unicode]) -> bool
+ # type: (str, List[str]) -> bool
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
@@ -411,7 +408,7 @@ def main(argv=sys.argv[1:]):
if args.full:
from sphinx.cmd import quickstart as qs
modules.sort()
- prev_module = '' # type: unicode
+ prev_module = ''
text = ''
for module in modules:
if module.startswith(prev_module + '.'):
diff --git a/sphinx/ext/autodoc/__init__.py b/sphinx/ext/autodoc/__init__.py
index 578c1d968..45be2ab10 100644
--- a/sphinx/ext/autodoc/__init__.py
+++ b/sphinx/ext/autodoc/__init__.py
@@ -43,7 +43,6 @@ if False:
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc.directive import DocumenterBridge # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -74,7 +73,7 @@ INSTANCEATTR = object()
def members_option(arg):
- # type: (Any) -> Union[object, List[unicode]]
+ # type: (Any) -> Union[object, List[str]]
"""Used to convert the :members: option to auto directives."""
if arg is None:
return ALL
@@ -82,7 +81,7 @@ def members_option(arg):
def members_set_option(arg):
- # type: (Any) -> Union[object, Set[unicode]]
+ # type: (Any) -> Union[object, Set[str]]
"""Used to convert the :members: option to auto directives."""
if arg is None:
return ALL
@@ -126,7 +125,7 @@ def merge_special_members_option(options):
# Some useful event listener factories for autodoc-process-docstring.
def cut_lines(pre, post=0, what=None):
- # type: (int, int, unicode) -> Callable
+ # type: (int, int, str) -> Callable
"""Return a listener that removes the first *pre* and last *post*
lines of every docstring. If *what* is a sequence of strings,
only docstrings of a type in *what* will be processed.
@@ -139,7 +138,7 @@ def cut_lines(pre, post=0, what=None):
This can (and should) be used in place of :confval:`automodule_skip_lines`.
"""
def process(app, what_, name, obj, options, lines):
- # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
+ # type: (Sphinx, str, str, Any, Any, List[str]) -> None
if what and what_ not in what:
return
del lines[:pre]
@@ -155,7 +154,7 @@ def cut_lines(pre, post=0, what=None):
def between(marker, what=None, keepempty=False, exclude=False):
- # type: (unicode, Sequence[unicode], bool, bool) -> Callable
+ # type: (str, Sequence[str], bool, bool) -> Callable
"""Return a listener that either keeps, or if *exclude* is True excludes,
lines between lines that match the *marker* regular expression. If no line
matches, the resulting docstring would be empty, so no change will be made
@@ -167,7 +166,7 @@ def between(marker, what=None, keepempty=False, exclude=False):
marker_re = re.compile(marker)
def process(app, what_, name, obj, options, lines):
- # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
+ # type: (Sphinx, str, str, Any, Any, List[str]) -> None
if what and what_ not in what:
return
deleted = 0
@@ -195,7 +194,7 @@ def between(marker, what=None, keepempty=False, exclude=False):
class Options(dict):
"""A dict/attribute hybrid that returns None on nonexisting keys."""
def __getattr__(self, name):
- # type: (unicode) -> Any
+ # type: (str) -> Any
try:
return self[name.replace('_', '-')]
except KeyError:
@@ -229,21 +228,21 @@ class Documenter:
#: true if the generated content may contain titles
titles_allowed = False
- option_spec = {'noindex': bool_option} # type: Dict[unicode, Callable]
+ option_spec = {'noindex': bool_option} # type: Dict[str, Callable]
def get_attr(self, obj, name, *defargs):
- # type: (Any, unicode, Any) -> Any
+ # type: (Any, str, Any) -> Any
"""getattr() override for types such as Zope interfaces."""
return autodoc_attrgetter(self.env.app, obj, name, *defargs)
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
"""Called to see if a member can be documented by this documenter."""
raise NotImplementedError('must be implemented in subclasses')
def __init__(self, directive, name, indent=u''):
- # type: (DocumenterBridge, unicode, unicode) -> None
+ # type: (DocumenterBridge, str, str) -> None
self.directive = directive
self.env = directive.env # type: BuildEnvironment
self.options = directive.genopt
@@ -253,15 +252,15 @@ class Documenter:
# qualified name (all set after resolve_name succeeds)
self.modname = None # type: str
self.module = None # type: ModuleType
- self.objpath = None # type: List[unicode]
- self.fullname = None # type: unicode
+ self.objpath = None # type: List[str]
+ self.fullname = None # type: str
# extra signature items (arguments and return annotation,
# also set after resolve_name succeeds)
- self.args = None # type: unicode
- self.retann = None # type: unicode
+ self.args = None # type: str
+ self.retann = None # type: str
# the object to document (set after import_object succeeds)
self.object = None # type: Any
- self.object_name = None # type: unicode
+ self.object_name = None # type: str
# the parent/owner of the object to document
self.parent = None # type: Any
# the module analyzer to get at attribute docs, or None
@@ -269,17 +268,17 @@ class Documenter:
@property
def documenters(self):
- # type: () -> Dict[unicode, Type[Documenter]]
+ # type: () -> Dict[str, Type[Documenter]]
"""Returns registered Documenter classes"""
return get_documenters(self.env.app)
def add_line(self, line, source, *lineno):
- # type: (unicode, unicode, int) -> None
+ # type: (str, str, int) -> None
"""Append one line of generated reST to the output."""
self.directive.result.append(self.indent + line, source, *lineno)
def resolve_name(self, modname, parents, path, base):
- # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
+ # type: (str, Any, str, Any) -> Tuple[str, List[str]]
"""Resolve the module and name of the object to document given by the
arguments and the current module/class.
@@ -370,7 +369,7 @@ class Documenter:
return True
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
"""Format the argument signature of *self.object*.
Should return None if the object does not have a signature.
@@ -378,7 +377,7 @@ class Documenter:
return None
def format_name(self):
- # type: () -> unicode
+ # type: () -> str
"""Format the name of *self.object*.
This normally should be something that can be parsed by the generated
@@ -390,14 +389,14 @@ class Documenter:
return '.'.join(self.objpath) or self.modname
def format_signature(self):
- # type: () -> unicode
+ # type: () -> str
"""Format the signature (arguments and return annotation) of the object.
Let the user process it via the ``autodoc-process-signature`` event.
"""
if self.args is not None:
# signature given explicitly
- args = "(%s)" % self.args # type: unicode
+ args = "(%s)" % self.args
else:
# try to introspect the signature
try:
@@ -421,7 +420,7 @@ class Documenter:
return ''
def add_directive_header(self, sig):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Add the directive header and options to the generated content."""
domain = getattr(self, 'domain', 'py')
directive = getattr(self, 'directivetype', self.objtype)
@@ -437,7 +436,7 @@ class Documenter:
self.add_line(u' :module: %s' % self.modname, sourcename)
def get_doc(self, encoding=None, ignore=1):
- # type: (unicode, int) -> List[List[unicode]]
+ # type: (str, int) -> List[List[str]]
"""Decode and return lines of the docstring(s) for the object."""
if encoding is not None:
warnings.warn("The 'encoding' argument to autodoc.%s.get_doc() is deprecated."
@@ -450,7 +449,7 @@ class Documenter:
return []
def process_doc(self, docstrings):
- # type: (List[List[unicode]]) -> Iterator[unicode]
+ # type: (List[List[str]]) -> Iterator[str]
"""Let the user process the docstrings before adding them."""
for docstringlines in docstrings:
if self.env.app:
@@ -458,11 +457,10 @@ class Documenter:
self.env.app.emit('autodoc-process-docstring',
self.objtype, self.fullname, self.object,
self.options, docstringlines)
- for line in docstringlines:
- yield line
+ yield from docstringlines
def get_sourcename(self):
- # type: () -> unicode
+ # type: () -> str
if self.analyzer:
# prevent encoding errors when the file name is non-ASCII
if not isinstance(self.analyzer.srcname, text_type):
@@ -505,7 +503,7 @@ class Documenter:
self.add_line(line, src[0], src[1])
def get_object_members(self, want_all):
- # type: (bool) -> Tuple[bool, List[Tuple[unicode, Any]]]
+ # type: (bool) -> Tuple[bool, List[Tuple[str, Any]]]
"""Return `(members_check_module, members)` where `members` is a
list of `(membername, member)` pairs of the members of *self.object*.
@@ -532,7 +530,7 @@ class Documenter:
if m.directly_defined)
def filter_members(self, members, want_all):
- # type: (List[Tuple[unicode, Any]], bool) -> List[Tuple[unicode, Any, bool]]
+ # type: (List[Tuple[str, Any]], bool) -> List[Tuple[str, Any, bool]]
"""Filter the given member list.
Members are skipped if
@@ -780,7 +778,7 @@ class ModuleDocumenter(Documenter):
'member-order': identity, 'exclude-members': members_set_option,
'private-members': bool_option, 'special-members': members_option,
'imported-members': bool_option, 'ignore-module-all': bool_option
- } # type: Dict[unicode, Callable]
+ } # type: Dict[str, Callable]
def __init__(self, *args):
# type: (Any) -> None
@@ -789,12 +787,12 @@ class ModuleDocumenter(Documenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
# don't document submodules automatically
return False
def resolve_name(self, modname, parents, path, base):
- # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
+ # type: (str, Any, str, Any) -> Tuple[str, List[str]]
if modname is not None:
logger.warning(__('"::" in automodule name doesn\'t make sense'),
type='autodoc')
@@ -810,7 +808,7 @@ class ModuleDocumenter(Documenter):
return ret
def add_directive_header(self, sig):
- # type: (unicode) -> None
+ # type: (str) -> None
Documenter.add_directive_header(self, sig)
sourcename = self.get_sourcename()
@@ -826,7 +824,7 @@ class ModuleDocumenter(Documenter):
self.add_line(u' :deprecated:', sourcename)
def get_object_members(self, want_all):
- # type: (bool) -> Tuple[bool, List[Tuple[unicode, object]]]
+ # type: (bool) -> Tuple[bool, List[Tuple[str, object]]]
if want_all:
if (self.options.ignore_module_all or not
hasattr(self.object, '__all__')):
@@ -868,7 +866,7 @@ class ModuleLevelDocumenter(Documenter):
classes, data/constants).
"""
def resolve_name(self, modname, parents, path, base):
- # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
+ # type: (str, Any, str, Any) -> Tuple[str, List[str]]
if modname is None:
if path:
modname = path.rstrip('.')
@@ -889,7 +887,7 @@ class ClassLevelDocumenter(Documenter):
attributes).
"""
def resolve_name(self, modname, parents, path, base):
- # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
+ # type: (str, Any, str, Any) -> Tuple[str, List[str]]
if modname is None:
if path:
mod_cls = path.rstrip('.')
@@ -923,7 +921,7 @@ class DocstringSignatureMixin:
"""
def _find_signature(self, encoding=None):
- # type: (unicode) -> Tuple[str, str]
+ # type: (str) -> Tuple[str, str]
if encoding is not None:
warnings.warn("The 'encoding' argument to autodoc.%s._find_signature() is "
"deprecated." % self.__class__.__name__,
@@ -956,7 +954,7 @@ class DocstringSignatureMixin:
return result
def get_doc(self, encoding=None, ignore=1):
- # type: (unicode, int) -> List[List[unicode]]
+ # type: (str, int) -> List[List[str]]
if encoding is not None:
warnings.warn("The 'encoding' argument to autodoc.%s.get_doc() is deprecated."
% self.__class__.__name__,
@@ -967,7 +965,7 @@ class DocstringSignatureMixin:
return super(DocstringSignatureMixin, self).get_doc(None, ignore) # type: ignore
def format_signature(self):
- # type: () -> unicode
+ # type: () -> str
if self.args is None and self.env.config.autodoc_docstring_signature: # type: ignore
# only act if a signature is not explicitly given already, and if
# the feature is enabled
@@ -983,7 +981,7 @@ class DocstringStripSignatureMixin(DocstringSignatureMixin):
feature of stripping any function signature from the docstring.
"""
def format_signature(self):
- # type: () -> unicode
+ # type: () -> str
if self.args is None and self.env.config.autodoc_docstring_signature: # type: ignore
# only act if a signature is not explicitly given already, and if
# the feature is enabled
@@ -1005,11 +1003,11 @@ class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # typ
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
return isfunction(member) or isbuiltin(member)
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
if isbuiltin(self.object) or inspect.ismethoddescriptor(self.object):
# cannot introspect arguments of a C function or method
return None
@@ -1057,7 +1055,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
'show-inheritance': bool_option, 'member-order': identity,
'exclude-members': members_set_option,
'private-members': bool_option, 'special-members': members_option,
- } # type: Dict[unicode, Callable]
+ } # type: Dict[str, Callable]
def __init__(self, *args):
# type: (Any) -> None
@@ -1066,7 +1064,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
return isinstance(member, type)
def import_object(self):
@@ -1082,7 +1080,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
return ret
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
# for classes, the relevant signature is the __init__ method's
initmeth = self.get_attr(self.object, '__init__', None)
# classes without __init__ method, default __init__ or
@@ -1099,14 +1097,14 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
return None
def format_signature(self):
- # type: () -> unicode
+ # type: () -> str
if self.doc_as_attr:
return ''
return super(ClassDocumenter, self).format_signature()
def add_directive_header(self, sig):
- # type: (unicode) -> None
+ # type: (str) -> None
if self.doc_as_attr:
self.directivetype = 'attribute'
super(ClassDocumenter, self).add_directive_header(sig)
@@ -1124,7 +1122,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
sourcename)
def get_doc(self, encoding=None, ignore=1):
- # type: (unicode, int) -> List[List[unicode]]
+ # type: (str, int) -> List[List[str]]
if encoding is not None:
warnings.warn("The 'encoding' argument to autodoc.%s.get_doc() is deprecated."
% self.__class__.__name__,
@@ -1213,7 +1211,7 @@ class ExceptionDocumenter(ClassDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
return isinstance(member, type) and issubclass(member, BaseException)
@@ -1229,11 +1227,11 @@ class DataDocumenter(ModuleLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
return isinstance(parent, ModuleDocumenter) and isattr
def add_directive_header(self, sig):
- # type: (unicode) -> None
+ # type: (str) -> None
super(DataDocumenter, self).add_directive_header(sig)
sourcename = self.get_sourcename()
if not self.options.annotation:
@@ -1269,7 +1267,7 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # type:
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
return inspect.isroutine(member) and \
not isinstance(parent, ModuleDocumenter)
@@ -1297,7 +1295,7 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # type:
return ret
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
if isbuiltin(self.object) or inspect.ismethoddescriptor(self.object):
# can never get arguments of a C function or method
return None
@@ -1334,7 +1332,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
non_attr_types = (type, MethodDescriptorType)
isdatadesc = isdescriptor(member) and not \
cls.is_function_or_method(member) and not \
@@ -1370,7 +1368,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
or self.modname
def add_directive_header(self, sig):
- # type: (unicode) -> None
+ # type: (str) -> None
super(AttributeDocumenter, self).add_directive_header(sig)
sourcename = self.get_sourcename()
if not self.options.annotation:
@@ -1410,7 +1408,7 @@ class InstanceAttributeDocumenter(AttributeDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
"""This documents only INSTANCEATTR members."""
return isattr and (member is INSTANCEATTR)
@@ -1429,13 +1427,13 @@ class InstanceAttributeDocumenter(AttributeDocumenter):
def get_documenters(app):
- # type: (Sphinx) -> Dict[unicode, Type[Documenter]]
+ # type: (Sphinx) -> Dict[str, Type[Documenter]]
"""Returns registered Documenter classes"""
return app.registry.documenters
def autodoc_attrgetter(app, obj, name, *defargs):
- # type: (Sphinx, Any, unicode, Any) -> Any
+ # type: (Sphinx, Any, str, Any) -> Any
"""Alternative getattr() for types"""
for typ, func in app.registry.autodoc_attrgettrs.items():
if isinstance(obj, typ):
@@ -1469,7 +1467,7 @@ def merge_autodoc_default_flags(app, config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_autodocumenter(ModuleDocumenter)
app.add_autodocumenter(ClassDocumenter)
app.add_autodocumenter(ExceptionDocumenter)
diff --git a/sphinx/ext/autodoc/directive.py b/sphinx/ext/autodoc/directive.py
index bb6e05751..2f207a251 100644
--- a/sphinx/ext/autodoc/directive.py
+++ b/sphinx/ext/autodoc/directive.py
@@ -25,7 +25,6 @@ if False:
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -58,11 +57,11 @@ class DocumenterBridge:
self.reporter = reporter
self.genopt = options
self.lineno = lineno
- self.filename_set = set() # type: Set[unicode]
+ self.filename_set = set() # type: Set[str]
self.result = StringList()
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
logger.warning(msg, location=(self.env.docname, self.lineno))
diff --git a/sphinx/ext/autodoc/importer.py b/sphinx/ext/autodoc/importer.py
index ef2b9368d..7f906a6b9 100644
--- a/sphinx/ext/autodoc/importer.py
+++ b/sphinx/ext/autodoc/importer.py
@@ -25,7 +25,6 @@ from sphinx.util.inspect import isenumclass, safe_getattr
if False:
# For type annotation
from typing import Any, Callable, Dict, Generator, Iterator, List, Optional, Sequence, Tuple, Union # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -220,7 +219,7 @@ def import_module(modname, warningiserror=False):
def import_object(modname, objpath, objtype='', attrgetter=safe_getattr, warningiserror=False):
- # type: (str, List[unicode], str, Callable[[Any, unicode], Any], bool) -> Any
+ # type: (str, List[str], str, Callable[[Any, str], Any], bool) -> Any
if objpath:
logger.debug('[autodoc] from %s import %s', modname, '.'.join(objpath))
else:
@@ -287,7 +286,7 @@ Attribute = namedtuple('Attribute', ['name', 'directly_defined', 'value'])
def get_object_members(subject, objpath, attrgetter, analyzer=None):
- # type: (Any, List[unicode], Callable, Any) -> Dict[str, Attribute] # NOQA
+ # type: (Any, List[str], Callable, Any) -> Dict[str, Attribute] # NOQA
"""Get members and attributes of target object."""
# the members directly defined in the class
obj_dict = attrgetter(subject, '__dict__', {})
diff --git a/sphinx/ext/autosectionlabel.py b/sphinx/ext/autosectionlabel.py
index db4185331..1a3424a48 100644
--- a/sphinx/ext/autosectionlabel.py
+++ b/sphinx/ext/autosectionlabel.py
@@ -29,7 +29,6 @@ if False:
# For type annotation
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
def register_sections_as_label(app, document):
@@ -57,7 +56,7 @@ def register_sections_as_label(app, document):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_config_value('autosectionlabel_prefix_document', False, 'env')
app.connect('doctree-read', register_sections_as_label)
diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py
index f76ec3ba7..4717f998b 100644
--- a/sphinx/ext/autosummary/__init__.py
+++ b/sphinx/ext/autosummary/__init__.py
@@ -90,7 +90,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
- from sphinx.util.typing import unicode # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
logger = logging.getLogger(__name__)
@@ -275,13 +274,13 @@ class Autosummary(SphinxDirective):
return nodes
def get_items(self, names):
- # type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode, unicode]]
+ # type: (List[str]) -> List[Tuple[str, str, str, str]]
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
prefixes = get_import_prefixes_from_env(self.env)
- items = [] # type: List[Tuple[unicode, unicode, unicode, unicode]]
+ items = [] # type: List[Tuple[str, str, str, str]]
max_item_chars = 50
@@ -350,7 +349,7 @@ class Autosummary(SphinxDirective):
return items
def get_table(self, items):
- # type: (List[Tuple[unicode, unicode, unicode, unicode]]) -> List[nodes.Node]
+ # type: (List[Tuple[str, str, str, str]]) -> List[nodes.Node]
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
@@ -369,7 +368,7 @@ class Autosummary(SphinxDirective):
group.append(body)
def append_row(*column_texts):
- # type: (unicode) -> None
+ # type: (str) -> None
row = nodes.row('')
source, line = self.state_machine.get_source_and_line()
for text in column_texts:
@@ -389,7 +388,7 @@ class Autosummary(SphinxDirective):
for name, sig, summary, real_name in items:
qualifier = 'obj'
if 'nosignatures' not in self.options:
- col1 = ':%s:`%s <%s>`\\ %s' % (qualifier, name, real_name, rst.escape(sig)) # type: unicode # NOQA
+ col1 = ':%s:`%s <%s>`\\ %s' % (qualifier, name, real_name, rst.escape(sig))
else:
col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name)
col2 = summary
@@ -398,7 +397,7 @@ class Autosummary(SphinxDirective):
return [table_spec, table]
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
warnings.warn('Autosummary.warn() is deprecated',
RemovedInSphinx40Warning, stacklevel=2)
logger.warning(msg)
@@ -426,13 +425,13 @@ class Autosummary(SphinxDirective):
def strip_arg_typehint(s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Strip a type hint from argument definition."""
return s.split(':')[0].strip()
def mangle_signature(sig, max_chars=30):
- # type: (unicode, int) -> unicode
+ # type: (str, int) -> str
"""Reformat a function signature to a more compact form."""
# Strip return type annotation
s = re.sub(r"\)\s*->\s.*$", ")", sig)
@@ -446,8 +445,8 @@ def mangle_signature(sig, max_chars=30):
s = re.sub(r"'[^']*'", "", s)
# Parse the signature to arguments + options
- args = [] # type: List[unicode]
- opts = [] # type: List[unicode]
+ args = [] # type: List[str]
+ opts = [] # type: List[str]
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
@@ -480,7 +479,7 @@ def mangle_signature(sig, max_chars=30):
def extract_summary(doc, document):
- # type: (List[unicode], Any) -> unicode
+ # type: (List[str], Any) -> str
"""Extract summary from docstring."""
# Skip a blank lines at the top
@@ -529,7 +528,7 @@ def extract_summary(doc, document):
def limited_join(sep, items, max_chars=30, overflow_marker="..."):
- # type: (unicode, List[unicode], int, unicode) -> unicode
+ # type: (str, List[str], int, str) -> str
"""Join a number of strings to one, limiting the length to *max_chars*.
If the string overflows this limit, replace the last fitting item by
@@ -578,7 +577,7 @@ def get_import_prefixes_from_env(env):
def import_by_name(name, prefixes=[None]):
- # type: (unicode, List) -> Tuple[unicode, Any, Any, unicode]
+ # type: (str, List) -> Tuple[str, Any, Any, str]
"""Import a Python object that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
@@ -597,7 +596,7 @@ def import_by_name(name, prefixes=[None]):
def _import_by_name(name):
- # type: (str) -> Tuple[Any, Any, unicode]
+ # type: (str) -> Tuple[Any, Any, str]
"""Import a Python object given its full name."""
try:
name_parts = name.split('.')
@@ -641,7 +640,7 @@ def _import_by_name(name):
# -- :autolink: (smart default role) -------------------------------------------
def autolink_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
@@ -666,9 +665,9 @@ def autolink_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]):
def get_rst_suffix(app):
- # type: (Sphinx) -> unicode
+ # type: (Sphinx) -> str
def get_supported_format(suffix):
- # type: (unicode) -> Tuple[unicode, ...]
+ # type: (str) -> Tuple[str, ...]
parser_class = app.registry.get_source_parsers().get(suffix)
if parser_class is None:
return ('restructuredtext',)
@@ -676,7 +675,7 @@ def get_rst_suffix(app):
parser_class = import_object(parser_class, 'source parser')
return parser_class.supported
- suffix = None # type: unicode
+ suffix = None # type: str
for suffix in app.config.source_suffix:
if 'restructuredtext' in get_supported_format(suffix):
return suffix
@@ -715,7 +714,7 @@ def process_generate_options(app):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
# I need autodoc
app.setup_extension('sphinx.ext.autodoc')
app.add_node(autosummary_toc,
diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py
index 04ba43d94..8718b774b 100644
--- a/sphinx/ext/autosummary/generate.py
+++ b/sphinx/ext/autosummary/generate.py
@@ -17,7 +17,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import argparse
import locale
@@ -47,7 +46,6 @@ if False:
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
- from sphinx.util.typing import unicode # NOQA
class DummyApplication:
@@ -75,17 +73,17 @@ def setup_documenters(app):
def _simple_info(msg):
- # type: (unicode) -> None
+ # type: (str) -> None
print(msg)
def _simple_warn(msg):
- # type: (unicode) -> None
+ # type: (str) -> None
print('WARNING: ' + msg, file=sys.stderr)
def _underline(title, line='='):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if '\n' in title:
raise ValueError('Can only underline single lines')
return title + '\n' + line * len(title)
@@ -97,7 +95,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
warn=_simple_warn, info=_simple_info,
base_path=None, builder=None, template_dir=None,
imported_members=False, app=None):
- # type: (List[unicode], unicode, unicode, Callable, Callable, unicode, Builder, unicode, bool, Any) -> None # NOQA
+ # type: (List[str], str, str, Callable, Callable, str, Builder, str, bool, Any) -> None
showed_sources = list(sorted(sources))
if len(showed_sources) > 20:
@@ -112,7 +110,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
sources = [os.path.join(base_path, filename) for filename in sources]
# create our own templating environment
- template_dirs = None # type: List[unicode]
+ template_dirs = None # type: List[str]
template_dirs = [os.path.join(package_dir, 'ext',
'autosummary', 'templates')]
@@ -175,8 +173,8 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
template = template_env.get_template('autosummary/base.rst')
def get_members(obj, typ, include_public=[], imported=True):
- # type: (Any, unicode, List[unicode], bool) -> Tuple[List[unicode], List[unicode]] # NOQA
- items = [] # type: List[unicode]
+ # type: (Any, str, List[str], bool) -> Tuple[List[str], List[str]]
+ items = [] # type: List[str]
for name in dir(obj):
try:
value = safe_getattr(obj, name)
@@ -191,7 +189,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
if x in include_public or not x.startswith('_')]
return public, items
- ns = {} # type: Dict[unicode, Any]
+ ns = {} # type: Dict[str, Any]
if doc.objtype == 'module':
ns['members'] = dir(obj)
@@ -241,12 +239,12 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
# -- Finding documented entries in files ---------------------------------------
def find_autosummary_in_files(filenames):
- # type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode]]
+ # type: (List[str]) -> List[Tuple[str, str, str]]
"""Find out what items are documented in source/*.rst.
See `find_autosummary_in_lines`.
"""
- documented = [] # type: List[Tuple[unicode, unicode, unicode]]
+ documented = [] # type: List[Tuple[str, str, str]]
for filename in filenames:
with open(filename, encoding='utf-8', errors='ignore') as f:
lines = f.read().splitlines()
@@ -255,7 +253,7 @@ def find_autosummary_in_files(filenames):
def find_autosummary_in_docstring(name, module=None, filename=None):
- # type: (unicode, Any, unicode) -> List[Tuple[unicode, unicode, unicode]]
+ # type: (str, Any, str) -> List[Tuple[str, str, str]]
"""Find out what items are documented in the given object's docstring.
See `find_autosummary_in_lines`.
@@ -275,7 +273,7 @@ def find_autosummary_in_docstring(name, module=None, filename=None):
def find_autosummary_in_lines(lines, module=None, filename=None):
- # type: (List[unicode], Any, unicode) -> List[Tuple[unicode, unicode, unicode]]
+ # type: (List[str], Any, str) -> List[Tuple[str, str, str]]
"""Find out what items appear in autosummary:: directives in the
given lines.
@@ -295,13 +293,13 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
- documented = [] # type: List[Tuple[unicode, unicode, unicode]]
+ documented = [] # type: List[Tuple[str, str, str]]
- toctree = None # type: unicode
+ toctree = None # type: str
template = None
current_module = module
in_autosummary = False
- base_indent = "" # type: unicode
+ base_indent = ""
for line in lines:
if in_autosummary:
diff --git a/sphinx/ext/coverage.py b/sphinx/ext/coverage.py
index fa9fac03b..1d7c87ec5 100644
--- a/sphinx/ext/coverage.py
+++ b/sphinx/ext/coverage.py
@@ -26,20 +26,19 @@ if False:
# For type annotation
from typing import Any, Callable, Dict, IO, List, Pattern, Set, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
# utility
def write_header(f, text, char='-'):
- # type:(IO, unicode, unicode) -> None
+ # type:(IO, str, str) -> None
f.write(text + '\n')
f.write(char * len(text) + '\n')
def compile_regex_list(name, exps):
- # type: (unicode, unicode) -> List[Pattern]
+ # type: (str, str) -> List[Pattern]
lst = []
for exp in exps:
try:
@@ -59,19 +58,19 @@ class CoverageBuilder(Builder):
def init(self):
# type: () -> None
- self.c_sourcefiles = [] # type: List[unicode]
+ self.c_sourcefiles = [] # type: List[str]
for pattern in self.config.coverage_c_path:
pattern = path.join(self.srcdir, pattern)
self.c_sourcefiles.extend(glob.glob(pattern))
- self.c_regexes = [] # type: List[Tuple[unicode, Pattern]]
+ self.c_regexes = [] # type: List[Tuple[str, Pattern]]
for (name, exp) in self.config.coverage_c_regexes.items():
try:
self.c_regexes.append((name, re.compile(exp)))
except Exception:
logger.warning(__('invalid regex %r in coverage_c_regexes'), exp)
- self.c_ignorexps = {} # type: Dict[unicode, List[Pattern]]
+ self.c_ignorexps = {} # type: Dict[str, List[Pattern]]
for (name, exps) in self.config.coverage_ignore_c_items.items():
self.c_ignorexps[name] = compile_regex_list('coverage_ignore_c_items',
exps)
@@ -83,16 +82,16 @@ class CoverageBuilder(Builder):
self.config.coverage_ignore_functions)
def get_outdated_docs(self):
- # type: () -> unicode
+ # type: () -> str
return 'coverage overview'
def write(self, *ignored):
# type: (Any) -> None
- self.py_undoc = {} # type: Dict[unicode, Dict[unicode, Any]]
+ self.py_undoc = {} # type: Dict[str, Dict[str, Any]]
self.build_py_coverage()
self.write_py_coverage()
- self.c_undoc = {} # type: Dict[unicode, Set[Tuple[unicode, unicode]]]
+ self.c_undoc = {} # type: Dict[str, Set[Tuple[str, str]]]
self.build_c_coverage()
self.write_c_coverage()
@@ -101,7 +100,7 @@ class CoverageBuilder(Builder):
# Fetch all the info from the header files
c_objects = self.env.domaindata['c']['objects']
for filename in self.c_sourcefiles:
- undoc = set() # type: Set[Tuple[unicode, unicode]]
+ undoc = set() # type: Set[Tuple[str, str]]
with open(filename) as f:
for line in f:
for key, regex in self.c_regexes:
@@ -156,7 +155,7 @@ class CoverageBuilder(Builder):
continue
funcs = []
- classes = {} # type: Dict[unicode, List[unicode]]
+ classes = {} # type: Dict[str, List[str]]
for name, obj in inspect.getmembers(mod):
# diverse module attributes are ignored:
@@ -193,7 +192,7 @@ class CoverageBuilder(Builder):
classes[name] = []
continue
- attrs = [] # type: List[unicode]
+ attrs = [] # type: List[str]
for attr_name in dir(obj):
if attr_name not in obj.__dict__:
@@ -267,7 +266,7 @@ class CoverageBuilder(Builder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(CoverageBuilder)
app.add_config_value('coverage_ignore_modules', [], False)
app.add_config_value('coverage_ignore_functions', [], False)
diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py
index a4f7ef731..cd2698d99 100644
--- a/sphinx/ext/doctest.py
+++ b/sphinx/ext/doctest.py
@@ -16,13 +16,13 @@ import re
import sys
import time
import warnings
+from io import StringIO
from os import path
from docutils import nodes
from docutils.parsers.rst import directives
from packaging.specifiers import SpecifierSet, InvalidSpecifier
from packaging.version import Version
-from six import StringIO
import sphinx
from sphinx.builders import Builder
@@ -38,7 +38,6 @@ if False:
# For type annotation
from typing import Any, Callable, Dict, IO, Iterable, List, Optional, Sequence, Set, Tuple, Type # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -47,14 +46,14 @@ doctestopt_re = re.compile(r'#\s*doctest:.+$', re.MULTILINE)
def doctest_encode(text, encoding):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
warnings.warn('doctest_encode() is deprecated.',
RemovedInSphinx40Warning)
return text
def is_allowed_version(spec, version):
- # type: (unicode, unicode) -> bool
+ # type: (str, str) -> bool
"""Check `spec` satisfies `version` or not.
This obeys PEP-440 specifiers:
@@ -207,7 +206,7 @@ parser = doctest.DocTestParser()
class TestGroup:
def __init__(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
self.name = name
self.setup = [] # type: List[TestCode]
self.tests = [] # type: List[List[TestCode]]
@@ -233,14 +232,14 @@ class TestGroup:
raise RuntimeError(__('invalid TestCode type'))
def __repr__(self):
- # type: () -> unicode
+ # type: () -> str
return 'TestGroup(name=%r, setup=%r, cleanup=%r, tests=%r)' % (
self.name, self.setup, self.cleanup, self.tests)
class TestCode:
def __init__(self, code, type, filename, lineno, options=None):
- # type: (unicode, unicode, Optional[str], int, Optional[Dict]) -> None
+ # type: (str, str, Optional[str], int, Optional[Dict]) -> None
self.code = code
self.type = type
self.filename = filename
@@ -248,7 +247,7 @@ class TestCode:
self.options = options or {}
def __repr__(self):
- # type: () -> unicode
+ # type: () -> str
return 'TestCode(%r, %r, filename=%r, lineno=%r, options=%r)' % (
self.code, self.type, self.filename, self.lineno, self.options)
@@ -268,7 +267,7 @@ class SphinxDocTestRunner(doctest.DocTestRunner):
def _DocTestRunner__patched_linecache_getlines(self, filename,
module_globals=None):
- # type: (unicode, Any) -> Any
+ # type: (str, Any) -> Any
# this is overridden from DocTestRunner adding the try-except below
m = self._DocTestRunner__LINECACHE_FILENAME_RE.match(filename) # type: ignore
if m and m.group('name') == self.test.name:
@@ -325,12 +324,12 @@ class DocTestBuilder(Builder):
(date, '=' * len(date)))
def _out(self, text):
- # type: (unicode) -> None
+ # type: (str) -> None
logger.info(text, nonl=True)
self.outfile.write(text)
def _warn_out(self, text):
- # type: (unicode) -> None
+ # type: (str) -> None
if self.app.quiet or self.app.warningiserror:
logger.warning(text)
else:
@@ -338,18 +337,18 @@ class DocTestBuilder(Builder):
self.outfile.write(text)
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return ''
def get_outdated_docs(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
return self.env.found_docs
def finish(self):
# type: () -> None
# write executive summary
def s(v):
- # type: (int) -> unicode
+ # type: (int) -> str
return v != 1 and 's' or ''
repl = (self.total_tries, s(self.total_tries),
self.total_failures, s(self.total_failures),
@@ -369,7 +368,7 @@ Doctest summary
self.app.statuscode = 1
def write(self, build_docnames, updated_docnames, method='update'):
- # type: (Iterable[unicode], Sequence[unicode], unicode) -> None
+ # type: (Iterable[str], Sequence[str], str) -> None
if build_docnames is None:
build_docnames = sorted(self.env.all_docs)
@@ -380,7 +379,7 @@ Doctest summary
self.test_doc(docname, doctree)
def get_filename_for_node(self, node, docname):
- # type: (nodes.Node, unicode) -> unicode
+ # type: (nodes.Node, str) -> str
"""Try to get the file which actually contains the doctest, not the
filename of the document it's included in."""
try:
@@ -408,8 +407,8 @@ Doctest summary
return None
def test_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
- groups = {} # type: Dict[unicode, TestGroup]
+ # type: (str, nodes.Node) -> None
+ groups = {} # type: Dict[str, TestGroup]
add_to_all_groups = []
self.setup_runner = SphinxDocTestRunner(verbose=False,
optionflags=self.opt)
@@ -487,7 +486,7 @@ Doctest summary
self.cleanup_tries += res_t
def compile(self, code, name, type, flags, dont_inherit):
- # type: (unicode, unicode, unicode, Any, bool) -> Any
+ # type: (str, str, str, Any, bool) -> Any
return compile(code, name, self.type, flags, dont_inherit)
def test_group(self, group):
@@ -565,7 +564,7 @@ Doctest summary
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_directive('testsetup', TestsetupDirective)
app.add_directive('testcleanup', TestcleanupDirective)
app.add_directive('doctest', DoctestDirective)
diff --git a/sphinx/ext/extlinks.py b/sphinx/ext/extlinks.py
index d353e3116..f3173e4af 100644
--- a/sphinx/ext/extlinks.py
+++ b/sphinx/ext/extlinks.py
@@ -35,13 +35,12 @@ if False:
from docutils.parsers.rst.states import Inliner # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import RoleFunction # NOQA
- from sphinx.util.typing import unicode # NOQA
def make_link_role(base_url, prefix):
- # type: (unicode, unicode) -> RoleFunction
+ # type: (str, str) -> RoleFunction
def role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
text = utils.unescape(text)
has_explicit_title, title, part = split_explicit_title(text)
try:
@@ -69,7 +68,7 @@ def setup_link_roles(app):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_config_value('extlinks', {}, 'env')
app.connect('builder-inited', setup_link_roles)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/githubpages.py b/sphinx/ext/githubpages.py
index f11846e08..006d22c5a 100644
--- a/sphinx/ext/githubpages.py
+++ b/sphinx/ext/githubpages.py
@@ -18,7 +18,6 @@ if False:
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
def create_nojekyll(app, env):
@@ -29,6 +28,6 @@ def create_nojekyll(app, env):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.connect('env-updated', create_nojekyll)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py
index 3532377b1..878e4bfc3 100644
--- a/sphinx/ext/graphviz.py
+++ b/sphinx/ext/graphviz.py
@@ -36,7 +36,6 @@ if False:
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.docutils import SphinxTranslator # NOQA
- from sphinx.util.typing import unicode # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
from sphinx.writers.latex import LaTeXTranslator # NOQA
from sphinx.writers.manpage import ManualPageTranslator # NOQA
@@ -56,16 +55,16 @@ class ClickableMapDefinition:
href_re = re.compile('href=".*?"')
def __init__(self, filename, content, dot=''):
- # type: (unicode, unicode, unicode) -> None
- self.id = None # type: unicode
+ # type: (str, str, str) -> None
+ self.id = None # type: str
self.filename = filename
self.content = content.splitlines()
- self.clickable = [] # type: List[unicode]
+ self.clickable = [] # type: List[str]
self.parse(dot=dot)
def parse(self, dot=None):
- # type: (unicode) -> None
+ # type: (str) -> None
matched = self.maptag_re.match(self.content[0])
if not matched:
raise GraphvizError('Invalid clickable map file found: %s' % self.filename)
@@ -83,7 +82,7 @@ class ClickableMapDefinition:
self.clickable.append(line)
def generate_clickable_map(self):
- # type: () -> unicode
+ # type: () -> str
"""Generate clickable map tags if clickable item exists.
If not exists, this only returns empty string.
@@ -99,7 +98,7 @@ class graphviz(nodes.General, nodes.Inline, nodes.Element):
def figure_wrapper(directive, node, caption):
- # type: (Directive, graphviz, unicode) -> nodes.figure
+ # type: (Directive, graphviz, str) -> nodes.figure
figure_node = nodes.figure('', node)
if 'align' in node:
figure_node['align'] = node.attributes.pop('align')
@@ -218,7 +217,7 @@ class GraphvizSimple(SphinxDirective):
def render_dot(self, code, options, format, prefix='graphviz'):
- # type: (SphinxTranslator, unicode, Dict, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (SphinxTranslator, str, Dict, str, str) -> Tuple[str, str]
"""Render graphviz code into a PNG or PDF output file."""
graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
hashkey = (code + str(options) + str(graphviz_dot) +
@@ -279,7 +278,7 @@ def render_dot(self, code, options, format, prefix='graphviz'):
def render_dot_html(self, node, code, options, prefix='graphviz',
imgcls=None, alt=None):
- # type: (HTMLTranslator, graphviz, unicode, Dict, unicode, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (HTMLTranslator, graphviz, str, Dict, str, str, str) -> Tuple[str, str]
format = self.builder.config.graphviz_output_format
try:
if format not in ('png', 'svg'):
@@ -337,7 +336,7 @@ def html_visit_graphviz(self, node):
def render_dot_latex(self, node, code, options, prefix='graphviz'):
- # type: (LaTeXTranslator, graphviz, unicode, Dict, unicode) -> None
+ # type: (LaTeXTranslator, graphviz, str, Dict, str) -> None
try:
fname, outfn = render_dot(self, code, options, 'pdf', prefix)
except GraphvizError as exc:
@@ -375,7 +374,7 @@ def latex_visit_graphviz(self, node):
def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
- # type: (TexinfoTranslator, graphviz, unicode, Dict, unicode) -> None
+ # type: (TexinfoTranslator, graphviz, str, Dict, str) -> None
try:
fname, outfn = render_dot(self, code, options, 'png', prefix)
except GraphvizError as exc:
@@ -418,7 +417,7 @@ def on_build_finished(app, exc):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_node(graphviz,
html=(html_visit_graphviz, None),
latex=(latex_visit_graphviz, None),
diff --git a/sphinx/ext/ifconfig.py b/sphinx/ext/ifconfig.py
index bc6634557..89f8629af 100644
--- a/sphinx/ext/ifconfig.py
+++ b/sphinx/ext/ifconfig.py
@@ -30,7 +30,6 @@ if False:
# For type annotation
from typing import Any, Dict, List # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
class ifconfig(nodes.Element):
@@ -57,7 +56,7 @@ class IfConfig(SphinxDirective):
def process_ifconfig_nodes(app, doctree, docname):
- # type: (Sphinx, nodes.document, unicode) -> None
+ # type: (Sphinx, nodes.document, str) -> None
ns = dict((confval.name, confval.value) for confval in app.config)
ns.update(app.config.__dict__.copy())
ns['builder'] = app.builder.name
@@ -80,7 +79,7 @@ def process_ifconfig_nodes(app, doctree, docname):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_node(ifconfig)
app.add_directive('ifconfig', IfConfig)
app.connect('doctree-resolved', process_ifconfig_nodes)
diff --git a/sphinx/ext/imgconverter.py b/sphinx/ext/imgconverter.py
index b651e6b69..71d74cee3 100644
--- a/sphinx/ext/imgconverter.py
+++ b/sphinx/ext/imgconverter.py
@@ -21,7 +21,6 @@ if False:
# For type annotation
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -64,7 +63,7 @@ class ImagemagickConverter(ImageConverter):
return True
def convert(self, _from, _to):
- # type: (unicode, unicode) -> bool
+ # type: (str, str) -> bool
"""Converts the image to expected one."""
try:
if _from.lower().endswith('.gif'):
@@ -100,7 +99,7 @@ class ImagemagickConverter(ImageConverter):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_post_transform(ImagemagickConverter)
app.add_config_value('image_converter', 'convert', 'env')
app.add_config_value('image_converter_args', [], 'env')
diff --git a/sphinx/ext/imgmath.py b/sphinx/ext/imgmath.py
index 3b70cb8fa..5e2f22511 100644
--- a/sphinx/ext/imgmath.py
+++ b/sphinx/ext/imgmath.py
@@ -36,7 +36,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.config import Config # NOQA
- from sphinx.util.typing import unicode # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
logger = logging.getLogger(__name__)
@@ -46,7 +45,7 @@ class MathExtError(SphinxError):
category = 'Math extension error'
def __init__(self, msg, stderr=None, stdout=None):
- # type: (unicode, bytes, bytes) -> None
+ # type: (str, bytes, bytes) -> None
if stderr:
msg += '\n[stderr]\n' + stderr.decode(sys_encoding, 'replace')
if stdout:
@@ -91,7 +90,7 @@ depth_re = re.compile(br'\[\d+ depth=(-?\d+)\]')
def generate_latex_macro(math, config):
- # type: (unicode, Config) -> unicode
+ # type: (str, Config) -> str
"""Generate LaTeX macro."""
fontsize = config.imgmath_font_size
baselineskip = int(round(fontsize * 1.2))
@@ -106,7 +105,7 @@ def generate_latex_macro(math, config):
def ensure_tempdir(builder):
- # type: (Builder) -> unicode
+ # type: (Builder) -> str
"""Create temporary directory.
use only one tempdir per build -- the use of a directory is cleaner
@@ -120,7 +119,7 @@ def ensure_tempdir(builder):
def compile_math(latex, builder):
- # type: (unicode, Builder) -> unicode
+ # type: (str, Builder) -> str
"""Compile LaTeX macros for math to DVI."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.tex')
@@ -154,7 +153,7 @@ def compile_math(latex, builder):
def convert_dvi_to_image(command, name):
- # type: (List[unicode], unicode) -> Tuple[bytes, bytes]
+ # type: (List[str], str) -> Tuple[bytes, bytes]
"""Convert DVI file to specific image format."""
try:
p = Popen(command, stdout=PIPE, stderr=PIPE)
@@ -174,7 +173,7 @@ def convert_dvi_to_image(command, name):
def convert_dvi_to_png(dvipath, builder):
- # type: (unicode, Builder) -> Tuple[unicode, int]
+ # type: (str, Builder) -> Tuple[str, int]
"""Convert DVI file to PNG image."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.png')
@@ -201,7 +200,7 @@ def convert_dvi_to_png(dvipath, builder):
def convert_dvi_to_svg(dvipath, builder):
- # type: (unicode, Builder) -> Tuple[unicode, int]
+ # type: (str, Builder) -> Tuple[str, int]
"""Convert DVI file to SVG image."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.svg')
@@ -216,7 +215,7 @@ def convert_dvi_to_svg(dvipath, builder):
def render_math(self, math):
- # type: (HTMLTranslator, unicode) -> Tuple[unicode, int]
+ # type: (HTMLTranslator, str) -> Tuple[str, int]
"""Render the LaTeX math expression *math* using latex and dvipng or
dvisvgm.
@@ -285,7 +284,7 @@ def cleanup_tempdir(app, exc):
def get_tooltip(self, node):
- # type: (HTMLTranslator, Union[nodes.math, nodes.math_block]) -> unicode
+ # type: (HTMLTranslator, Union[nodes.math, nodes.math_block]) -> str
if self.builder.config.imgmath_add_tooltips:
return ' alt="%s"' % self.encode(node.astext()).strip()
return ''
@@ -347,7 +346,7 @@ def html_visit_displaymath(self, node):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_html_math_renderer('imgmath',
(html_visit_math, None),
(html_visit_displaymath, None))
diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py
index ab3cc4d0f..c315ba8d3 100644
--- a/sphinx/ext/inheritance_diagram.py
+++ b/sphinx/ext/inheritance_diagram.py
@@ -59,7 +59,6 @@ if False:
from typing import Any, Dict, List, Tuple, Dict, Optional # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
from sphinx.writers.latex import LaTeXTranslator # NOQA
from sphinx.writers.texinfo import TexinfoTranslator # NOQA
@@ -71,7 +70,7 @@ module_sig_re = re.compile(r'''^(?:([\w.]*)\.)? # module names
def try_import(objname):
- # type: (unicode) -> Any
+ # type: (str) -> Any
"""Import a object or module using *name* and *currentmodule*.
*name* should be a relative name from *currentmodule* or
a fully-qualified name.
@@ -99,7 +98,7 @@ def try_import(objname):
def import_classes(name, currmodule):
- # type: (unicode, unicode) -> Any
+ # type: (str, str) -> Any
"""Import a class using its fully-qualified *name*."""
target = None
@@ -142,7 +141,7 @@ class InheritanceGraph:
"""
def __init__(self, class_names, currmodule, show_builtins=False,
private_bases=False, parts=0, aliases=None, top_classes=[]):
- # type: (List[unicode], str, bool, bool, int, Optional[Dict[unicode, unicode]], List[Any]) -> None # NOQA
+ # type: (List[str], str, bool, bool, int, Optional[Dict[str, str]], List[Any]) -> None
"""*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
@@ -157,7 +156,7 @@ class InheritanceGraph:
'inheritance diagram')
def _import_classes(self, class_names, currmodule):
- # type: (List[unicode], str) -> List[Any]
+ # type: (List[str], str) -> List[Any]
"""Import a list of classes."""
classes = [] # type: List[Any]
for name in class_names:
@@ -165,7 +164,7 @@ class InheritanceGraph:
return classes
def _class_info(self, classes, show_builtins, private_bases, parts, aliases, top_classes):
- # type: (List[Any], bool, bool, int, Optional[Dict[unicode, unicode]], List[Any]) -> List[Tuple[unicode, unicode, List[unicode], unicode]] # NOQA
+ # type: (List[Any], bool, bool, int, Optional[Dict[str, str]], List[Any]) -> List[Tuple[str, str, List[str], str]] # NOQA
"""Return name and bases for all classes that are ancestors of
*classes*.
@@ -198,7 +197,7 @@ class InheritanceGraph:
except Exception: # might raise AttributeError for strange classes
pass
- baselist = [] # type: List[unicode]
+ baselist = [] # type: List[str]
all_classes[cls] = (nodename, fullname, baselist, tooltip)
if fullname in top_classes:
@@ -219,7 +218,7 @@ class InheritanceGraph:
return list(all_classes.values())
def class_name(self, cls, parts=0, aliases=None):
- # type: (Any, int, Optional[Dict[unicode, unicode]]) -> unicode
+ # type: (Any, int, Optional[Dict[str, str]]) -> str
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
@@ -240,7 +239,7 @@ class InheritanceGraph:
return result
def get_all_class_names(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""Get all of the class names involved in the graph."""
return [fullname for (_, fullname, _, _) in self.class_info]
@@ -263,16 +262,16 @@ class InheritanceGraph:
}
def _format_node_attrs(self, attrs):
- # type: (Dict) -> unicode
+ # type: (Dict) -> str
return ','.join(['%s=%s' % x for x in sorted(attrs.items())])
def _format_graph_attrs(self, attrs):
- # type: (Dict) -> unicode
+ # type: (Dict) -> str
return ''.join(['%s=%s;\n' % x for x in sorted(attrs.items())])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
- # type: (unicode, Dict, BuildEnvironment, Dict, Dict, Dict) -> unicode
+ # type: (str, Dict, BuildEnvironment, Dict, Dict, Dict) -> str
"""Generate a graphviz dot graph from the classes that were passed in
to __init__.
@@ -294,7 +293,7 @@ class InheritanceGraph:
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
- res = [] # type: List[unicode]
+ res = [] # type: List[str]
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
@@ -389,7 +388,7 @@ class InheritanceDiagram(SphinxDirective):
def get_graph_hash(node):
- # type: (inheritance_diagram) -> unicode
+ # type: (inheritance_diagram) -> str
encoded = (node['content'] + str(node['parts'])).encode('utf-8')
return md5(encoded).hexdigest()[-10:]
@@ -466,7 +465,7 @@ def skip(self, node):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,
diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py
index 2c7d7bf51..b40edabbf 100644
--- a/sphinx/ext/intersphinx.py
+++ b/sphinx/ext/intersphinx.py
@@ -24,8 +24,6 @@
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
-
import functools
import posixpath
import sys
@@ -49,7 +47,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
Inventory = Dict[text_type, Dict[text_type, Tuple[text_type, text_type, text_type, text_type]]] # NOQA
@@ -70,7 +67,7 @@ class InventoryAdapter:
@property
def cache(self):
- # type: () -> Dict[unicode, Tuple[unicode, int, Inventory]]
+ # type: () -> Dict[str, Tuple[str, int, Inventory]]
return self.env.intersphinx_cache # type: ignore
@property
@@ -80,7 +77,7 @@ class InventoryAdapter:
@property
def named_inventory(self):
- # type: () -> Dict[unicode, Inventory]
+ # type: () -> Dict[str, Inventory]
return self.env.intersphinx_named_inventory # type: ignore
def clear(self):
@@ -90,7 +87,7 @@ class InventoryAdapter:
def _strip_basic_auth(url):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Returns *url* with basic auth credentials removed. Also returns the
basic auth username and password if they're present in *url*.
@@ -112,7 +109,7 @@ def _strip_basic_auth(url):
def _read_from_url(url, config=None):
- # type: (unicode, Config) -> IO
+ # type: (str, Config) -> IO
"""Reads data from *url* with an HTTP *GET*.
This function supports fetching from resources which use basic HTTP auth as
@@ -138,7 +135,7 @@ def _read_from_url(url, config=None):
def _get_safe_url(url):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Gets version of *url* with basic auth passwords obscured. This function
returns results suitable for printing and logging.
@@ -164,7 +161,7 @@ def _get_safe_url(url):
def fetch_inventory(app, uri, inv):
- # type: (Sphinx, unicode, Any) -> Any
+ # type: (Sphinx, str, Any) -> Any
"""Fetch, parse and return an intersphinx inventory file."""
# both *uri* (base URI of the links to generate) and *inv* (actual
# location of the inventory file) can be local or remote URIs
@@ -211,9 +208,9 @@ def load_mappings(app):
inventories = InventoryAdapter(app.builder.env)
update = False
for key, value in app.config.intersphinx_mapping.items():
- name = None # type: unicode
- uri = None # type: unicode
- inv = None # type: Union[unicode, Tuple[unicode, ...]]
+ name = None # type: str
+ uri = None # type: str
+ inv = None # type: Union[str, Tuple[str, ...]]
if isinstance(value, (list, tuple)):
# new format
@@ -291,7 +288,7 @@ def missing_reference(app, env, node, contnode):
"""Attempt to resolve a missing reference via intersphinx references."""
target = node['reftarget']
inventories = InventoryAdapter(env)
- objtypes = None # type: List[unicode]
+ objtypes = None # type: List[str]
if node['reftype'] == 'any':
# we search anything!
objtypes = ['%s:%s' % (domain.name, objtype)
@@ -365,7 +362,7 @@ def missing_reference(app, env, node, contnode):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_config_value('intersphinx_mapping', {}, True)
app.add_config_value('intersphinx_cache_limit', 5, False)
app.add_config_value('intersphinx_timeout', None, False)
@@ -379,7 +376,7 @@ def setup(app):
def inspect_main(argv):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
"""Debug functionality to print out an inventory"""
if len(argv) < 1:
print("Print out an inventory file.\n"
@@ -396,7 +393,7 @@ def inspect_main(argv):
config = MockConfig()
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
print(msg, file=sys.stderr)
try:
diff --git a/sphinx/ext/jsmath.py b/sphinx/ext/jsmath.py
index ed4f7739e..b4312985c 100644
--- a/sphinx/ext/jsmath.py
+++ b/sphinx/ext/jsmath.py
@@ -27,7 +27,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
- from sphinx.util.typing import unicode # NOQA
def html_visit_math(self, node):
@@ -81,7 +80,7 @@ def install_jsmath(app, env):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_html_math_renderer('jsmath',
(html_visit_math, None),
(html_visit_displaymath, None))
diff --git a/sphinx/ext/linkcode.py b/sphinx/ext/linkcode.py
index f13ba7277..c9f36cfbe 100644
--- a/sphinx/ext/linkcode.py
+++ b/sphinx/ext/linkcode.py
@@ -20,7 +20,6 @@ if False:
# For type annotation
from typing import Any, Dict, Set # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
class LinkcodeError(SphinxError):
@@ -45,7 +44,7 @@ def doctree_read(app, doctree):
for objnode in doctree.traverse(addnodes.desc):
domain = objnode.get('domain')
- uris = set() # type: Set[unicode]
+ uris = set() # type: Set[str]
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
@@ -78,7 +77,7 @@ def doctree_read(app, doctree):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.connect('doctree-read', doctree_read)
app.add_config_value('linkcode_resolve', None, '')
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/mathbase.py b/sphinx/ext/mathbase.py
index 341f07486..e5ed2d6db 100644
--- a/sphinx/ext/mathbase.py
+++ b/sphinx/ext/mathbase.py
@@ -25,7 +25,6 @@ if False:
# For type annotation
from typing import Any, Callable, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
@@ -45,7 +44,7 @@ def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
def get_node_equation_number(writer, node):
- # type: (HTMLTranslator, nodes.math_block) -> unicode
+ # type: (HTMLTranslator, nodes.math_block) -> str
warnings.warn('sphinx.ext.mathbase.get_node_equation_number() is moved to '
'sphinx.util.math package.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -54,7 +53,7 @@ def get_node_equation_number(writer, node):
def wrap_displaymath(text, label, numbering):
- # type: (unicode, unicode, bool) -> unicode
+ # type: (str, str, bool) -> str
warnings.warn('sphinx.ext.mathbase.wrap_displaymath() is moved to '
'sphinx.util.math package.',
RemovedInSphinx30Warning, stacklevel=2)
diff --git a/sphinx/ext/mathjax.py b/sphinx/ext/mathjax.py
index d4303378c..dcaf385d7 100644
--- a/sphinx/ext/mathjax.py
+++ b/sphinx/ext/mathjax.py
@@ -29,7 +29,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
- from sphinx.util.typing import unicode # NOQA
def html_visit_math(self, node):
@@ -97,7 +96,7 @@ def install_mathjax(app, env):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_html_math_renderer('mathjax',
(html_visit_math, None),
(html_visit_displaymath, None))
diff --git a/sphinx/ext/napoleon/__init__.py b/sphinx/ext/napoleon/__init__.py
index cbe02b015..f001f4a4d 100644
--- a/sphinx/ext/napoleon/__init__.py
+++ b/sphinx/ext/napoleon/__init__.py
@@ -16,7 +16,6 @@ from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring
if False:
# For type annotation
from typing import Any, Dict, List # NOQA
- from sphinx.util.typing import unicode # NOQA
class Config:
@@ -278,7 +277,7 @@ class Config:
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
"""Sphinx extension setup function.
When the extension is loaded, Sphinx imports this module and executes
@@ -336,7 +335,7 @@ def _patch_python_domain():
def _process_docstring(app, what, name, obj, options, lines):
- # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
+ # type: (Sphinx, str, str, Any, Any, List[str]) -> None
"""Process the docstring for a given python object.
Called when autodoc has read and processed a docstring. `lines` is a list
@@ -386,7 +385,7 @@ def _process_docstring(app, what, name, obj, options, lines):
def _skip_member(app, what, name, obj, skip, options):
- # type: (Sphinx, unicode, unicode, Any, bool, Any) -> bool
+ # type: (Sphinx, str, str, Any, bool, Any) -> bool
"""Determine if private and special class members are included in docs.
The following settings in conf.py determine if private and special class
diff --git a/sphinx/ext/napoleon/docstring.py b/sphinx/ext/napoleon/docstring.py
index 5e5f96abe..3f971aa75 100644
--- a/sphinx/ext/napoleon/docstring.py
+++ b/sphinx/ext/napoleon/docstring.py
@@ -24,7 +24,6 @@ if False:
from typing import Any, Callable, Dict, List, Tuple, Type, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config as SphinxConfig # NOQA
- from sphinx.util.typing import unicode # NOQA
_directive_regex = re.compile(r'\.\. \S+::')
@@ -108,7 +107,7 @@ class GoogleDocstring(UnicodeMixin):
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
- # type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA
+ # type: (Union[str, List[str]], SphinxConfig, Sphinx, str, str, Any, Any) -> None
self._config = config
self._app = app
@@ -135,11 +134,11 @@ class GoogleDocstring(UnicodeMixin):
else:
lines = docstring
self._line_iter = modify_iter(lines, modifier=lambda s: s.rstrip())
- self._parsed_lines = [] # type: List[unicode]
+ self._parsed_lines = [] # type: List[str]
self._is_in_section = False
self._section_indent = 0
if not hasattr(self, '_directive_sections'):
- self._directive_sections = [] # type: List[unicode]
+ self._directive_sections = [] # type: List[str]
if not hasattr(self, '_sections'):
self._sections = {
'args': self._parse_parameters_section,
@@ -172,14 +171,14 @@ class GoogleDocstring(UnicodeMixin):
'warns': self._parse_warns_section,
'yield': self._parse_yields_section,
'yields': self._parse_yields_section,
- } # type: Dict[unicode, Callable]
+ } # type: Dict[str, Callable]
self._load_custom_sections()
self._parse()
def __unicode__(self):
- # type: () -> unicode
+ # type: () -> str
"""Return the parsed docstring in reStructuredText format.
Returns
@@ -191,7 +190,7 @@ class GoogleDocstring(UnicodeMixin):
return '\n'.join(self.lines())
def lines(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
@@ -203,7 +202,7 @@ class GoogleDocstring(UnicodeMixin):
return self._parsed_lines
def _consume_indented_block(self, indent=1):
- # type: (int) -> List[unicode]
+ # type: (int) -> List[str]
lines = []
line = self._line_iter.peek()
while(not self._is_section_break() and
@@ -213,7 +212,7 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _consume_contiguous(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
lines = []
while (self._line_iter.has_next() and
self._line_iter.peek() and
@@ -222,7 +221,7 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _consume_empty(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and not line:
@@ -231,11 +230,11 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _consume_field(self, parse_type=True, prefer_type=False):
- # type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
+ # type: (bool, bool) -> Tuple[str, str, List[str]]
line = next(self._line_iter)
before, colon, after = self._partition_field_on_colon(line)
- _name, _type, _desc = before, '', after # type: unicode, unicode, unicode
+ _name, _type, _desc = before, '', after
if parse_type:
match = _google_typed_arg_regex.match(before)
@@ -253,7 +252,7 @@ class GoogleDocstring(UnicodeMixin):
return _name, _type, _descs
def _consume_fields(self, parse_type=True, prefer_type=False):
- # type: (bool, bool) -> List[Tuple[unicode, unicode, List[unicode]]]
+ # type: (bool, bool) -> List[Tuple[str, str, List[str]]]
self._consume_empty()
fields = []
while not self._is_section_break():
@@ -263,7 +262,7 @@ class GoogleDocstring(UnicodeMixin):
return fields
def _consume_inline_attribute(self):
- # type: () -> Tuple[unicode, List[unicode]]
+ # type: () -> Tuple[str, List[str]]
line = next(self._line_iter)
_type, colon, _desc = self._partition_field_on_colon(line)
if not colon or not _desc:
@@ -274,11 +273,11 @@ class GoogleDocstring(UnicodeMixin):
return _type, _descs
def _consume_returns_section(self):
- # type: () -> List[Tuple[unicode, unicode, List[unicode]]]
+ # type: () -> List[Tuple[str, str, List[str]]]
lines = self._dedent(self._consume_to_next_section())
if lines:
before, colon, after = self._partition_field_on_colon(lines[0])
- _name, _type, _desc = '', '', lines # type: unicode, unicode, List[unicode]
+ _name, _type, _desc = '', '', lines
if colon:
if after:
@@ -294,12 +293,12 @@ class GoogleDocstring(UnicodeMixin):
return []
def _consume_usage_section(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
lines = self._dedent(self._consume_to_next_section())
return lines
def _consume_section_header(self):
- # type: () -> unicode
+ # type: () -> str
section = next(self._line_iter)
stripped_section = section.strip(':')
if stripped_section.lower() in self._sections:
@@ -307,14 +306,14 @@ class GoogleDocstring(UnicodeMixin):
return section
def _consume_to_end(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
lines = []
while self._line_iter.has_next():
lines.append(next(self._line_iter))
return lines
def _consume_to_next_section(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
self._consume_empty()
lines = []
while not self._is_section_break():
@@ -322,7 +321,7 @@ class GoogleDocstring(UnicodeMixin):
return lines + self._consume_empty()
def _dedent(self, lines, full=False):
- # type: (List[unicode], bool) -> List[unicode]
+ # type: (List[str], bool) -> List[str]
if full:
return [line.lstrip() for line in lines]
else:
@@ -330,7 +329,7 @@ class GoogleDocstring(UnicodeMixin):
return [line[min_indent:] for line in lines]
def _escape_args_and_kwargs(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if name[:2] == '**':
return r'\*\*' + name[2:]
elif name[:1] == '*':
@@ -339,7 +338,7 @@ class GoogleDocstring(UnicodeMixin):
return name
def _fix_field_desc(self, desc):
- # type: (List[unicode]) -> List[unicode]
+ # type: (List[str]) -> List[str]
if self._is_list(desc):
desc = [u''] + desc
elif desc[0].endswith('::'):
@@ -353,7 +352,7 @@ class GoogleDocstring(UnicodeMixin):
return desc
def _format_admonition(self, admonition, lines):
- # type: (unicode, List[unicode]) -> List[unicode]
+ # type: (str, List[str]) -> List[str]
lines = self._strip_empty(lines)
if len(lines) == 1:
return ['.. %s:: %s' % (admonition, lines[0].strip()), '']
@@ -364,7 +363,7 @@ class GoogleDocstring(UnicodeMixin):
return [u'.. %s::' % admonition, u'']
def _format_block(self, prefix, lines, padding=None):
- # type: (unicode, List[unicode], unicode) -> List[unicode]
+ # type: (str, List[str], str) -> List[str]
if lines:
if padding is None:
padding = ' ' * len(prefix)
@@ -382,7 +381,7 @@ class GoogleDocstring(UnicodeMixin):
def _format_docutils_params(self, fields, field_role='param',
type_role='type'):
- # type: (List[Tuple[unicode, unicode, List[unicode]]], unicode, unicode) -> List[unicode] # NOQA
+ # type: (List[Tuple[str, str, List[str]]], str, str) -> List[str]
lines = []
for _name, _type, _desc in fields:
_desc = self._strip_empty(_desc)
@@ -398,14 +397,14 @@ class GoogleDocstring(UnicodeMixin):
return lines + ['']
def _format_field(self, _name, _type, _desc):
- # type: (unicode, unicode, List[unicode]) -> List[unicode]
+ # type: (str, str, List[str]) -> List[str]
_desc = self._strip_empty(_desc)
has_desc = any(_desc)
separator = has_desc and ' -- ' or ''
if _name:
if _type:
if '`' in _type:
- field = '**%s** (%s)%s' % (_name, _type, separator) # type: unicode
+ field = '**%s** (%s)%s' % (_name, _type, separator)
else:
field = '**%s** (*%s*)%s' % (_name, _type, separator)
else:
@@ -428,11 +427,11 @@ class GoogleDocstring(UnicodeMixin):
return [field]
def _format_fields(self, field_type, fields):
- # type: (unicode, List[Tuple[unicode, unicode, List[unicode]]]) -> List[unicode]
+ # type: (str, List[Tuple[str, str, List[str]]]) -> List[str]
field_type = ':%s:' % field_type.strip()
padding = ' ' * len(field_type)
multi = len(fields) > 1
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
for _name, _type, _desc in fields:
field = self._format_field(_name, _type, _desc)
if multi:
@@ -457,21 +456,21 @@ class GoogleDocstring(UnicodeMixin):
return 0
def _get_indent(self, line):
- # type: (unicode) -> int
+ # type: (str) -> int
for i, s in enumerate(line):
if not s.isspace():
return i
return len(line)
def _get_initial_indent(self, lines):
- # type: (List[unicode]) -> int
+ # type: (List[str]) -> int
for line in lines:
if line:
return self._get_indent(line)
return 0
def _get_min_indent(self, lines):
- # type: (List[unicode]) -> int
+ # type: (List[str]) -> int
min_indent = None
for line in lines:
if line:
@@ -483,11 +482,11 @@ class GoogleDocstring(UnicodeMixin):
return min_indent or 0
def _indent(self, lines, n=4):
- # type: (List[unicode], int) -> List[unicode]
+ # type: (List[str], int) -> List[str]
return [(' ' * n) + line for line in lines]
def _is_indented(self, line, indent=1):
- # type: (unicode, int) -> bool
+ # type: (str, int) -> bool
for i, s in enumerate(line):
if i >= indent:
return True
@@ -496,7 +495,7 @@ class GoogleDocstring(UnicodeMixin):
return False
def _is_list(self, lines):
- # type: (List[unicode]) -> bool
+ # type: (List[str]) -> bool
if not lines:
return False
if _bullet_list_regex.match(lines[0]):
@@ -561,7 +560,7 @@ class GoogleDocstring(UnicodeMixin):
if self._name and (self._what == 'attribute' or self._what == 'data'):
# Implicit stop using StopIteration no longer allowed in
# Python 3.7; see PEP 479
- res = [] # type: List[unicode]
+ res = [] # type: List[str]
try:
res = self._parse_attribute_docstring()
except StopIteration:
@@ -590,12 +589,12 @@ class GoogleDocstring(UnicodeMixin):
self._parsed_lines.extend(lines)
def _parse_admonition(self, admonition, section):
- # type (unicode, unicode) -> List[unicode]
+ # type (str, str) -> List[str]
lines = self._consume_to_next_section()
return self._format_admonition(admonition, lines)
def _parse_attribute_docstring(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
_type, _desc = self._consume_inline_attribute()
lines = self._format_field('', '', _desc)
if _type:
@@ -603,12 +602,12 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_attributes_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
lines = []
for _name, _type, _desc in self._consume_fields():
if self._config.napoleon_use_ivar:
_name = self._qualify_name(_name, self._obj)
- field = ':ivar %s: ' % _name # type: unicode
+ field = ':ivar %s: ' % _name
lines.extend(self._format_block(field, _desc))
if _type:
lines.append(':vartype %s: %s' % (_name, _type))
@@ -625,11 +624,11 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_examples_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
labels = {
'example': _('Example'),
'examples': _('Examples'),
- } # type: Dict[unicode, unicode]
+ }
use_admonition = self._config.napoleon_use_admonition_for_examples
label = labels.get(section.lower(), section)
return self._parse_generic_section(label, use_admonition)
@@ -639,19 +638,19 @@ class GoogleDocstring(UnicodeMixin):
return self._parse_generic_section(section, False)
def _parse_usage_section(self, section):
- # type: (unicode) -> List[unicode]
- header = ['.. rubric:: Usage:', ''] # type: List[unicode]
- block = ['.. code-block:: python', ''] # type: List[unicode]
+ # type: (str) -> List[str]
+ header = ['.. rubric:: Usage:', '']
+ block = ['.. code-block:: python', '']
lines = self._consume_usage_section()
lines = self._indent(lines, 3)
return header + block + lines + ['']
def _parse_generic_section(self, section, use_admonition):
- # type: (unicode, bool) -> List[unicode]
+ # type: (str, bool) -> List[str]
lines = self._strip_empty(self._consume_to_next_section())
lines = self._dedent(lines)
if use_admonition:
- header = '.. admonition:: %s' % section # type: unicode
+ header = '.. admonition:: %s' % section
lines = self._indent(lines, 3)
else:
header = '.. rubric:: %s' % section
@@ -661,7 +660,7 @@ class GoogleDocstring(UnicodeMixin):
return [header, '']
def _parse_keyword_arguments_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_fields()
if self._config.napoleon_use_keyword:
return self._format_docutils_params(
@@ -672,8 +671,8 @@ class GoogleDocstring(UnicodeMixin):
return self._format_fields(_('Keyword Arguments'), fields)
def _parse_methods_section(self, section):
- # type: (unicode) -> List[unicode]
- lines = [] # type: List[unicode]
+ # type: (str) -> List[str]
+ lines = [] # type: List[str]
for _name, _type, _desc in self._consume_fields(parse_type=False):
lines.append('.. method:: %s' % _name)
if _desc:
@@ -682,16 +681,16 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_notes_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
use_admonition = self._config.napoleon_use_admonition_for_notes
return self._parse_generic_section(_('Notes'), use_admonition)
def _parse_other_parameters_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
return self._format_fields(_('Other Parameters'), self._consume_fields())
def _parse_parameters_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_fields()
if self._config.napoleon_use_param:
return self._format_docutils_params(fields)
@@ -699,9 +698,9 @@ class GoogleDocstring(UnicodeMixin):
return self._format_fields(_('Parameters'), fields)
def _parse_raises_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_fields(parse_type=False, prefer_type=True)
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
for _name, _type, _desc in fields:
m = self._name_rgx.match(_type).groupdict()
if m['role']:
@@ -715,12 +714,12 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_references_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
use_admonition = self._config.napoleon_use_admonition_for_references
return self._parse_generic_section(_('References'), use_admonition)
def _parse_returns_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_returns_section()
multi = len(fields) > 1
if multi:
@@ -728,7 +727,7 @@ class GoogleDocstring(UnicodeMixin):
else:
use_rtype = self._config.napoleon_use_rtype
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
for _name, _type, _desc in fields:
if use_rtype:
field = self._format_field(_name, '', _desc)
@@ -749,23 +748,23 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_see_also_section(self, section):
- # type (unicode) -> List[unicode]
+ # type (str) -> List[str]
return self._parse_admonition('seealso', section)
def _parse_warns_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
return self._format_fields(_('Warns'), self._consume_fields())
def _parse_yields_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_returns_section()
return self._format_fields(_('Yields'), fields)
def _partition_field_on_colon(self, line):
- # type: (unicode) -> Tuple[unicode, unicode, unicode]
+ # type: (str) -> Tuple[str, str, str]
before_colon = []
after_colon = []
- colon = '' # type: unicode
+ colon = ''
found_colon = False
for i, source in enumerate(_xref_regex.split(line)):
if found_colon:
@@ -785,7 +784,7 @@ class GoogleDocstring(UnicodeMixin):
"".join(after_colon).strip())
def _qualify_name(self, attr_name, klass):
- # type: (unicode, Type) -> unicode
+ # type: (str, Type) -> str
if klass and '.' not in attr_name:
if attr_name.startswith('~'):
attr_name = attr_name[1:]
@@ -797,7 +796,7 @@ class GoogleDocstring(UnicodeMixin):
return attr_name
def _strip_empty(self, lines):
- # type: (List[unicode]) -> List[unicode]
+ # type: (List[str]) -> List[str]
if lines:
start = -1
for i, line in enumerate(lines):
@@ -912,13 +911,13 @@ class NumpyDocstring(GoogleDocstring):
"""
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
- # type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA
+ # type: (Union[str, List[str]], SphinxConfig, Sphinx, str, str, Any, Any) -> None
self._directive_sections = ['.. index::']
super(NumpyDocstring, self).__init__(docstring, config, app, what,
name, obj, options)
def _consume_field(self, parse_type=True, prefer_type=False):
- # type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
+ # type: (bool, bool) -> Tuple[str, str, List[str]]
line = next(self._line_iter)
if parse_type:
_name, _, _type = self._partition_field_on_colon(line)
@@ -935,11 +934,11 @@ class NumpyDocstring(GoogleDocstring):
return _name, _type, _desc
def _consume_returns_section(self):
- # type: () -> List[Tuple[unicode, unicode, List[unicode]]]
+ # type: () -> List[Tuple[str, str, List[str]]]
return self._consume_fields(prefer_type=True)
def _consume_section_header(self):
- # type: () -> unicode
+ # type: () -> str
section = next(self._line_iter)
if not _directive_regex.match(section):
# Consume the header underline
@@ -970,7 +969,7 @@ class NumpyDocstring(GoogleDocstring):
return False
def _parse_see_also_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
lines = self._consume_to_next_section()
try:
return self._parse_numpydoc_see_also_section(lines)
@@ -978,7 +977,7 @@ class NumpyDocstring(GoogleDocstring):
return self._format_admonition('seealso', lines)
def _parse_numpydoc_see_also_section(self, content):
- # type: (List[unicode]) -> List[unicode]
+ # type: (List[str]) -> List[str]
"""
Derived from the NumpyDoc implementation of _parse_see_also.
@@ -993,7 +992,7 @@ class NumpyDocstring(GoogleDocstring):
items = []
def parse_item_name(text):
- # type: (unicode) -> Tuple[unicode, unicode]
+ # type: (str) -> Tuple[str, str]
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
@@ -1005,7 +1004,7 @@ class NumpyDocstring(GoogleDocstring):
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
- # type: (unicode, List[unicode]) -> None
+ # type: (str, List[str]) -> None
if not name:
return
name, role = parse_item_name(name)
@@ -1013,7 +1012,7 @@ class NumpyDocstring(GoogleDocstring):
del rest[:]
current_func = None
- rest = [] # type: List[unicode]
+ rest = [] # type: List[str]
for line in content:
if not line.strip():
@@ -1059,12 +1058,12 @@ class NumpyDocstring(GoogleDocstring):
'const': 'const',
'attribute': 'attr',
'attr': 'attr'
- } # type: Dict[unicode, unicode]
+ }
if self._what is None:
- func_role = 'obj' # type: unicode
+ func_role = 'obj'
else:
func_role = roles.get(self._what, '')
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
last_had_desc = True
for func, desc, role in items:
if role:
diff --git a/sphinx/ext/todo.py b/sphinx/ext/todo.py
index 00ff00a26..fb5f19e54 100644
--- a/sphinx/ext/todo.py
+++ b/sphinx/ext/todo.py
@@ -31,7 +31,6 @@ if False:
from typing import Any, Dict, Iterable, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
from sphinx.writers.latex import LaTeXTranslator # NOQA
@@ -133,7 +132,7 @@ class TodoList(SphinxDirective):
def process_todo_nodes(app, doctree, fromdocname):
- # type: (Sphinx, nodes.document, unicode) -> None
+ # type: (Sphinx, nodes.document, str) -> None
node = None # type: nodes.Element
if not app.config['todo_include_todos']:
for node in doctree.traverse(todo_node):
@@ -201,7 +200,7 @@ def process_todo_nodes(app, doctree, fromdocname):
def purge_todos(app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
if not hasattr(env, 'todo_all_todos'):
return
env.todo_all_todos = [todo for todo in env.todo_all_todos # type: ignore
@@ -209,7 +208,7 @@ def purge_todos(app, env, docname):
def merge_info(app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Iterable[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Iterable[str], BuildEnvironment) -> None
if not hasattr(other, 'todo_all_todos'):
return
if not hasattr(env, 'todo_all_todos'):
@@ -247,7 +246,7 @@ def latex_depart_todo_node(self, node):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_event('todo-defined')
app.add_config_value('todo_include_todos', False, 'html')
app.add_config_value('todo_link_only', False, 'html')
diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py
index ec75c2166..409ff4a86 100644
--- a/sphinx/ext/viewcode.py
+++ b/sphinx/ext/viewcode.py
@@ -29,13 +29,12 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
def _get_full_modname(app, modname, attribute):
- # type: (Sphinx, str, unicode) -> unicode
+ # type: (Sphinx, str, str) -> str
try:
return get_full_modname(modname, attribute)
except AttributeError:
@@ -97,7 +96,7 @@ def doctree_read(app, doctree):
for objnode in doctree.traverse(addnodes.desc):
if objnode.get('domain') != 'py':
continue
- names = set() # type: Set[unicode]
+ names = set() # type: Set[str]
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
@@ -130,7 +129,7 @@ def doctree_read(app, doctree):
def env_merge_info(app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Iterable[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Iterable[str], BuildEnvironment) -> None
if not hasattr(other, '_viewcode_modules'):
return
# create a _viewcode_modules dict on the main environment
@@ -151,7 +150,7 @@ def missing_reference(app, env, node, contnode):
def collect_pages(app):
- # type: (Sphinx) -> Iterator[Tuple[unicode, Dict[unicode, Any], unicode]]
+ # type: (Sphinx) -> Iterator[Tuple[str, Dict[str, Any], str]]
env = app.builder.env
if not hasattr(env, '_viewcode_modules'):
return
@@ -216,7 +215,7 @@ def collect_pages(app):
'title': modname,
'body': (_('<h1>Source code for %s</h1>') % modname +
'\n'.join(lines)),
- } # type: Dict[unicode, Any]
+ }
yield (pagename, context, 'page.html')
if not modnames:
@@ -257,7 +256,7 @@ def migrate_viewcode_import(app, config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_config_value('viewcode_import', None, False)
app.add_config_value('viewcode_enable_epub', False, False)
app.add_config_value('viewcode_follow_imported_members', True, False)
diff --git a/sphinx/extension.py b/sphinx/extension.py
index c6f1d044f..9df449eef 100644
--- a/sphinx/extension.py
+++ b/sphinx/extension.py
@@ -18,14 +18,13 @@ if False:
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
class Extension:
def __init__(self, name, module, **kwargs):
- # type: (unicode, Any, Any) -> None
+ # type: (str, Any, Any) -> None
self.name = name
self.module = module
self.metadata = kwargs
@@ -63,7 +62,7 @@ def verify_needs_extensions(app, config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.connect('config-inited', verify_needs_extensions)
return {
diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py
index 72d4942a0..40244215f 100644
--- a/sphinx/highlighting.py
+++ b/sphinx/highlighting.py
@@ -34,7 +34,6 @@ if False:
# For type annotation
from typing import Any, Dict # NOQA
from pygments.formatter import Formatter # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -47,7 +46,7 @@ lexers = {
'pycon3': PythonConsoleLexer(python3=True, stripnl=False),
'rest': RstLexer(stripnl=False),
'c': CLexer(stripnl=False),
-} # type: Dict[unicode, Lexer]
+} # type: Dict[str, Lexer]
for _lexer in lexers.values():
_lexer.add_filter('raiseonerror')
@@ -70,7 +69,7 @@ class PygmentsBridge:
latex_formatter = LatexFormatter
def __init__(self, dest='html', stylename='sphinx', trim_doctest_flags=None):
- # type: (unicode, unicode, bool) -> None
+ # type: (str, str, bool) -> None
self.dest = dest
if stylename is None or stylename == 'sphinx':
style = SphinxStyle
@@ -82,7 +81,7 @@ class PygmentsBridge:
stylename)
else:
style = get_style_by_name(stylename)
- self.formatter_args = {'style': style} # type: Dict[unicode, Any]
+ self.formatter_args = {'style': style} # type: Dict[str, Any]
if dest == 'html':
self.formatter = self.html_formatter
else:
@@ -100,7 +99,7 @@ class PygmentsBridge:
return self.formatter(**kwargs)
def unhighlighted(self, source):
- # type: (unicode) -> unicode
+ # type: (str) -> str
warnings.warn('PygmentsBridge.unhighlighted() is now deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
if self.dest == 'html':
@@ -114,7 +113,7 @@ class PygmentsBridge:
source + '\\end{Verbatim}\n'
def highlight_block(self, source, lang, opts=None, location=None, force=False, **kwargs):
- # type: (unicode, unicode, Any, Any, bool, Any) -> unicode
+ # type: (str, str, Any, Any, bool, Any) -> str
if not isinstance(source, text_type):
source = source.decode()
@@ -174,7 +173,7 @@ class PygmentsBridge:
return hlsource.translate(tex_hl_escape_map_new)
def get_stylesheet(self):
- # type: () -> unicode
+ # type: () -> str
formatter = self.get_formatter()
if self.dest == 'html':
return formatter.get_style_defs('.highlight')
diff --git a/sphinx/io.py b/sphinx/io.py
index b4a99fe41..d405081b1 100644
--- a/sphinx/io.py
+++ b/sphinx/io.py
@@ -48,7 +48,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -170,7 +169,7 @@ class SphinxBaseFileInput(FileInput):
super(SphinxBaseFileInput, self).__init__(*args, **kwds)
def read(self):
- # type: () -> unicode
+ # type: () -> str
"""Reads the contents from file.
After reading, it emits Sphinx event ``source-read``.
@@ -213,7 +212,7 @@ class SphinxRSTFileInput(SphinxBaseFileInput):
supported = ('restructuredtext',)
def prepend_prolog(self, text, prolog):
- # type: (StringList, unicode) -> None
+ # type: (StringList, str) -> None
docinfo = self.count_docinfo_lines(text)
if docinfo:
# insert a blank line after docinfo
@@ -227,7 +226,7 @@ class SphinxRSTFileInput(SphinxBaseFileInput):
text.insert(docinfo + lineno + 1, '', '<generated>', 0)
def append_epilog(self, text, epilog):
- # type: (StringList, unicode) -> None
+ # type: (StringList, str) -> None
# append a blank line and rst_epilog
text.append('', '<generated>', 0)
for lineno, line in enumerate(epilog.splitlines()):
@@ -265,7 +264,7 @@ class FiletypeNotFoundError(Exception):
def get_filetype(source_suffix, filename):
- # type: (Dict[unicode, unicode], unicode) -> unicode
+ # type: (Dict[str, str], str) -> str
for suffix, filetype in source_suffix.items():
if filename.endswith(suffix):
# If default filetype (None), considered as restructuredtext.
@@ -275,7 +274,7 @@ def get_filetype(source_suffix, filename):
def read_doc(app, env, filename):
- # type: (Sphinx, BuildEnvironment, unicode) -> nodes.document
+ # type: (Sphinx, BuildEnvironment, str) -> nodes.document
"""Parse a document and convert to doctree."""
# set up error_handler for the target document
error_handler = UnicodeDecodeErrorHandler(env.docname)
@@ -307,7 +306,7 @@ def read_doc(app, env, filename):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.registry.add_source_input(SphinxFileInput)
return {
diff --git a/sphinx/jinja2glue.py b/sphinx/jinja2glue.py
index b6826f3db..8544937d5 100644
--- a/sphinx/jinja2glue.py
+++ b/sphinx/jinja2glue.py
@@ -28,18 +28,17 @@ if False:
from jinja2.environment import Environment # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.theming import Theme # NOQA
- from sphinx.util.typing import unicode # NOQA
def _tobool(val):
- # type: (unicode) -> bool
+ # type: (str) -> bool
if isinstance(val, str):
return val.lower() in ('true', '1', 'yes', 'on')
return bool(val)
def _toint(val):
- # type: (unicode) -> int
+ # type: (str) -> int
try:
return int(val)
except ValueError:
@@ -47,7 +46,7 @@ def _toint(val):
def _todim(val):
- # type: (Union[int, unicode]) -> unicode
+ # type: (Union[int, str]) -> str
"""
Make val a css dimension. In particular the following transformations
are performed:
@@ -88,7 +87,7 @@ def _slice_index(values, slices):
def accesskey(context, key):
- # type: (Any, unicode) -> unicode
+ # type: (Any, str) -> str
"""Helper to output each access key only once."""
if '_accesskeys' not in context:
context.vars['_accesskeys'] = {}
@@ -116,7 +115,7 @@ class idgen:
@contextfunction
def warning(context, message, *args, **kwargs):
- # type: (Dict, unicode, Any, Any) -> unicode
+ # type: (Dict, str, Any, Any) -> str
if 'pagename' in context:
filename = context.get('pagename') + context.get('file_suffix', '')
message = 'in rendering %s: %s' % (filename, message)
@@ -132,7 +131,7 @@ class SphinxFileSystemLoader(FileSystemLoader):
"""
def get_source(self, environment, template):
- # type: (Environment, unicode) -> Tuple[unicode, unicode, Callable]
+ # type: (Environment, str) -> Tuple[str, str, Callable]
for searchpath in self.searchpath:
filename = path.join(searchpath, template)
f = open_if_exists(filename)
@@ -161,7 +160,7 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
# TemplateBridge interface
def init(self, builder, theme=None, dirs=None):
- # type: (Builder, Theme, List[unicode]) -> None
+ # type: (Builder, Theme, List[str]) -> None
# create a chain of paths to search
if theme:
# the theme's own dir and its bases' dirs
@@ -205,11 +204,11 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
self.environment.install_gettext_translations(builder.app.translator) # type: ignore # NOQA
def render(self, template, context): # type: ignore
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
return self.environment.get_template(template).render(context)
def render_string(self, source, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
return self.environment.from_string(source).render(context)
def newest_template_mtime(self):
@@ -219,7 +218,7 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
# Loader interface
def get_source(self, environment, template):
- # type: (Environment, unicode) -> Tuple[unicode, unicode, Callable]
+ # type: (Environment, str) -> Tuple[str, str, Callable]
loaders = self.loaders
# exclamation mark starts search from theme
if template.startswith('!'):
diff --git a/sphinx/locale/__init__.py b/sphinx/locale/__init__.py
index e10bcfdf4..162a2f0e4 100644
--- a/sphinx/locale/__init__.py
+++ b/sphinx/locale/__init__.py
@@ -12,18 +12,16 @@
import gettext
import locale
import warnings
-from collections import defaultdict
+from collections import UserString, defaultdict
from gettext import NullTranslations
from six import text_type
-from six.moves import UserString
from sphinx.deprecation import RemovedInSphinx30Warning
if False:
# For type annotation
from typing import Any, Callable, Dict, Iterator, List, Tuple # NOQA
- from sphinx.util.typing import unicode # NOQA
class _TranslationProxy(UserString):
@@ -41,7 +39,7 @@ class _TranslationProxy(UserString):
__slots__ = ('_func', '_args')
def __new__(cls, func, *args):
- # type: (Callable, unicode) -> object
+ # type: (Callable, str) -> object
if not args:
# not called with "function" and "arguments", but a plain string
return text_type(func)
@@ -52,20 +50,20 @@ class _TranslationProxy(UserString):
return (self._func,) + self._args # type: ignore
def __init__(self, func, *args):
- # type: (Callable, unicode) -> None
+ # type: (Callable, str) -> None
self._func = func
self._args = args
@property
def data(self): # type: ignore
- # type: () -> unicode
+ # type: () -> str
return self._func(*self._args)
# replace function from UserString; it instantiates a self.__class__
# for the encoding result
def encode(self, encoding=None, errors=None): # type: ignore
- # type: (unicode, unicode) -> bytes
+ # type: (str, str) -> bytes
if encoding:
if errors:
return self.data.encode(encoding, errors)
@@ -83,45 +81,45 @@ class _TranslationProxy(UserString):
return str(self.data)
def __unicode__(self):
- # type: () -> unicode
+ # type: () -> str
return text_type(self.data)
def __add__(self, other): # type: ignore
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.data + other
def __radd__(self, other):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return other + self.data
def __mod__(self, other): # type: ignore
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.data % other
def __rmod__(self, other):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return other % self.data
def __mul__(self, other): # type: ignore
- # type: (Any) -> unicode
+ # type: (Any) -> str
return self.data * other
def __rmul__(self, other):
- # type: (Any) -> unicode
+ # type: (Any) -> str
return other * self.data
def __getattr__(self, name):
- # type: (unicode) -> Any
+ # type: (str) -> Any
if name == '__members__':
return self.__dir__()
return getattr(self.data, name)
def __getstate__(self):
- # type: () -> Tuple[Callable, Tuple[unicode, ...]]
+ # type: () -> Tuple[Callable, Tuple[str, ...]]
return self._func, self._args
def __setstate__(self, tup):
- # type: (Tuple[Callable, Tuple[unicode]]) -> None
+ # type: (Tuple[Callable, Tuple[str]]) -> None
self._func, self._args = tup
def __copy__(self):
@@ -137,7 +135,7 @@ class _TranslationProxy(UserString):
def mygettext(string):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Used instead of _ when creating TranslationProxies, because _ is
not bound yet at that time.
"""
@@ -147,7 +145,7 @@ def mygettext(string):
def lazy_gettext(string):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""A lazy version of `gettext`."""
# if isinstance(string, _TranslationProxy):
# return string
@@ -156,11 +154,11 @@ def lazy_gettext(string):
return _TranslationProxy(mygettext, string) # type: ignore
-translators = defaultdict(NullTranslations) # type: Dict[Tuple[unicode, unicode], NullTranslations] # NOQA
+translators = defaultdict(NullTranslations) # type: Dict[Tuple[str, str], NullTranslations]
def init(locale_dirs, language, catalog='sphinx', namespace='general'):
- # type: (List[unicode], unicode, unicode, unicode) -> Tuple[NullTranslations, bool]
+ # type: (List[str], str, str, str) -> Tuple[NullTranslations, bool]
"""Look for message catalogs in `locale_dirs` and *ensure* that there is at
least a NullTranslations catalog set in `translators`. If called multiple
times or if several ``.mo`` files are found, their contents are merged
@@ -202,7 +200,7 @@ def init(locale_dirs, language, catalog='sphinx', namespace='general'):
def init_console(locale_dir, catalog):
- # type: (unicode, unicode) -> Tuple[NullTranslations, bool]
+ # type: (str, str) -> Tuple[NullTranslations, bool]
"""Initialize locale for console.
.. versionadded:: 1.8
@@ -218,17 +216,17 @@ def init_console(locale_dir, catalog):
def get_translator(catalog='sphinx', namespace='general'):
- # type: (unicode, unicode) -> NullTranslations
+ # type: (str, str) -> NullTranslations
return translators[(namespace, catalog)]
def is_translator_registered(catalog='sphinx', namespace='general'):
- # type: (unicode, unicode) -> bool
+ # type: (str, str) -> bool
return (namespace, catalog) in translators
def _lazy_translate(catalog, namespace, message):
- # type: (unicode, unicode, unicode) -> unicode
+ # type: (str, str, str) -> str
"""Used instead of _ when creating TranslationProxy, because _ is
not bound yet at that time.
"""
@@ -261,7 +259,7 @@ def get_translation(catalog, namespace='general'):
.. versionadded:: 1.8
"""
def gettext(message, *args):
- # type: (unicode, *Any) -> unicode
+ # type: (str, *Any) -> str
if not is_translator_registered(catalog, namespace):
# not initialized yet
return _TranslationProxy(_lazy_translate, catalog, namespace, message) # type: ignore # NOQA
@@ -302,10 +300,10 @@ admonitionlabels = {
'seealso': _('See also'),
'tip': _('Tip'),
'warning': _('Warning'),
-} # type: Dict[unicode, unicode]
+}
# Moved to sphinx.directives.other (will be overrided later)
-versionlabels = {} # type: Dict[unicode, unicode]
+versionlabels = {} # type: Dict[str, str]
# Moved to sphinx.domains.python (will be overrided later)
-pairindextypes = {} # type: Dict[unicode, unicode]
+pairindextypes = {} # type: Dict[str, str]
diff --git a/sphinx/parsers.py b/sphinx/parsers.py
index c3fb2f1da..142bd5d6a 100644
--- a/sphinx/parsers.py
+++ b/sphinx/parsers.py
@@ -23,7 +23,6 @@ if False:
from docutils import nodes # NOQA
from docutils.transforms import Transform # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
class Parser(docutils.parsers.Parser):
@@ -104,7 +103,7 @@ class RSTParser(docutils.parsers.rst.Parser, Parser):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_source_parser(RSTParser)
return {
diff --git a/sphinx/project.py b/sphinx/project.py
index b0277723f..d5f299598 100644
--- a/sphinx/project.py
+++ b/sphinx/project.py
@@ -20,17 +20,16 @@ from sphinx.util.osutil import SEP, relpath
if TYPE_CHECKING:
from typing import Dict, List, Set # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
-EXCLUDE_PATHS = ['**/_sources', '.#*', '**/.#*', '*.lproj/**'] # type: List[unicode]
+EXCLUDE_PATHS = ['**/_sources', '.#*', '**/.#*', '*.lproj/**']
class Project(object):
"""A project is source code set of Sphinx document."""
def __init__(self, srcdir, source_suffix):
- # type: (unicode, Dict[unicode, unicode]) -> None
+ # type: (str, Dict[str, str]) -> None
#: Source directory.
self.srcdir = srcdir
@@ -38,7 +37,7 @@ class Project(object):
self.source_suffix = source_suffix
#: The name of documents belongs to this project.
- self.docnames = set() # type: Set[unicode]
+ self.docnames = set() # type: Set[str]
def restore(self, other):
# type: (Project) -> None
@@ -46,7 +45,7 @@ class Project(object):
self.docnames = other.docnames
def discover(self, exclude_paths=[]):
- # type: (List[unicode]) -> Set[unicode]
+ # type: (List[str]) -> Set[str]
"""Find all document files in the source directory and put them in
:attr:`docnames`.
"""
@@ -63,7 +62,7 @@ class Project(object):
return self.docnames
def path2doc(self, filename):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Return the docname for the filename if the file is document.
*filename* should be absolute or relative to the source directory.
@@ -78,7 +77,7 @@ class Project(object):
return None
def doc2path(self, docname, basedir=True):
- # type: (unicode, bool) -> unicode
+ # type: (str, bool) -> str
"""Return the filename for the document name.
If *basedir* is True, return as an absolute path.
diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py
index fe6b5f7e1..e3e80772c 100644
--- a/sphinx/pycode/__init__.py
+++ b/sphinx/pycode/__init__.py
@@ -8,14 +8,11 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import re
-from io import BytesIO
+from io import BytesIO, StringIO
from zipfile import ZipFile
-from six import StringIO
-
from sphinx.errors import PycodeError
from sphinx.pycode.parser import Parser
from sphinx.util import get_module_source, detect_encoding
@@ -23,23 +20,22 @@ from sphinx.util import get_module_source, detect_encoding
if False:
# For type annotation
from typing import Any, Dict, IO, List, Tuple # NOQA
- from sphinx.util.typing import unicode # NOQA
class ModuleAnalyzer:
# cache for analyzer objects -- caches both by module and file name
- cache = {} # type: Dict[Tuple[unicode, unicode], Any]
+ cache = {} # type: Dict[Tuple[str, str], Any]
@classmethod
def for_string(cls, string, modname, srcname='<string>'):
- # type: (unicode, unicode, unicode) -> ModuleAnalyzer
+ # type: (str, str, str) -> ModuleAnalyzer
if isinstance(string, bytes):
return cls(BytesIO(string), modname, srcname)
return cls(StringIO(string), modname, srcname, decoded=True)
@classmethod
def for_file(cls, filename, modname):
- # type: (unicode, unicode) -> ModuleAnalyzer
+ # type: (str, str) -> ModuleAnalyzer
if ('file', filename) in cls.cache:
return cls.cache['file', filename]
try:
@@ -55,7 +51,7 @@ class ModuleAnalyzer:
@classmethod
def for_egg(cls, filename, modname):
- # type: (unicode, unicode) -> ModuleAnalyzer
+ # type: (str, str) -> ModuleAnalyzer
eggpath, relpath = re.split('(?<=\\.egg)/', filename)
try:
with ZipFile(eggpath) as egg:
@@ -86,7 +82,7 @@ class ModuleAnalyzer:
return obj
def __init__(self, source, modname, srcname, decoded=False):
- # type: (IO, unicode, unicode, bool) -> None
+ # type: (IO, str, str, bool) -> None
self.modname = modname # name of the module
self.srcname = srcname # name of the source file
@@ -101,9 +97,9 @@ class ModuleAnalyzer:
self.code = source.read()
# will be filled by parse()
- self.attr_docs = None # type: Dict[Tuple[unicode, unicode], List[unicode]]
- self.tagorder = None # type: Dict[unicode, int]
- self.tags = None # type: Dict[unicode, Tuple[unicode, int, int]]
+ self.attr_docs = None # type: Dict[Tuple[str, str], List[str]]
+ self.tagorder = None # type: Dict[str, int]
+ self.tags = None # type: Dict[str, Tuple[str, int, int]]
def parse(self):
# type: () -> None
@@ -125,7 +121,7 @@ class ModuleAnalyzer:
raise PycodeError('parsing %r failed: %r' % (self.srcname, exc))
def find_attr_docs(self):
- # type: () -> Dict[Tuple[unicode, unicode], List[unicode]]
+ # type: () -> Dict[Tuple[str, str], List[str]]
"""Find class and module-level attributes and their documentation."""
if self.attr_docs is None:
self.parse()
@@ -133,7 +129,7 @@ class ModuleAnalyzer:
return self.attr_docs
def find_tags(self):
- # type: () -> Dict[unicode, Tuple[unicode, int, int]]
+ # type: () -> Dict[str, Tuple[str, int, int]]
"""Find class, function and method definitions and their location."""
if self.tags is None:
self.parse()
diff --git a/sphinx/pycode/parser.py b/sphinx/pycode/parser.py
index e0ebe164e..71d7df781 100644
--- a/sphinx/pycode/parser.py
+++ b/sphinx/pycode/parser.py
@@ -22,7 +22,6 @@ from six import text_type
if False:
# For type annotation
from typing import Any, Dict, IO, List, Tuple # NOQA
- from sphinx.util.typing import unicode # NOQA
comment_re = re.compile(u'^\\s*#: ?(.*)\r?\n?$')
indent_re = re.compile(u'^\\s*$')
@@ -36,7 +35,7 @@ else:
def filter_whitespace(code):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return code.replace('\f', ' ') # replace FF (form feed) with whitespace
@@ -50,7 +49,7 @@ def get_assign_targets(node):
def get_lvar_names(node, self=None):
- # type: (ast.AST, ast.arg) -> List[unicode]
+ # type: (ast.AST, ast.arg) -> List[str]
"""Convert assignment-AST to variable names.
This raises `TypeError` if the assignment does not create new variable::
@@ -93,7 +92,7 @@ def get_lvar_names(node, self=None):
def dedent_docstring(s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Remove common leading indentation from docstring."""
def dummy():
# type: () -> None
@@ -109,7 +108,7 @@ class Token:
"""Better token wrapper for tokenize module."""
def __init__(self, kind, value, start, end, source):
- # type: (int, Any, Tuple[int, int], Tuple[int, int], unicode) -> None # NOQA
+ # type: (int, Any, Tuple[int, int], Tuple[int, int], str) -> None
self.kind = kind
self.value = value
self.start = start
@@ -141,7 +140,7 @@ class Token:
class TokenProcessor:
def __init__(self, buffers):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
lines = iter(buffers)
self.buffers = buffers
self.tokens = tokenize.generate_tokens(lambda: next(lines))
@@ -149,7 +148,7 @@ class TokenProcessor:
self.previous = None # type: Token
def get_line(self, lineno):
- # type: (int) -> unicode
+ # type: (int) -> str
"""Returns specified line."""
return self.buffers[lineno - 1]
@@ -196,9 +195,9 @@ class AfterCommentParser(TokenProcessor):
"""
def __init__(self, lines):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
super(AfterCommentParser, self).__init__(lines)
- self.comment = None # type: unicode
+ self.comment = None # type: str
def fetch_rvalue(self):
# type: () -> List[Token]
@@ -240,20 +239,20 @@ class VariableCommentPicker(ast.NodeVisitor):
"""Python source code parser to pick up variable comments."""
def __init__(self, buffers, encoding):
- # type: (List[unicode], unicode) -> None
+ # type: (List[str], str) -> None
self.counter = itertools.count()
self.buffers = buffers
self.encoding = encoding
- self.context = [] # type: List[unicode]
- self.current_classes = [] # type: List[unicode]
+ self.context = [] # type: List[str]
+ self.current_classes = [] # type: List[str]
self.current_function = None # type: ast.FunctionDef
- self.comments = {} # type: Dict[Tuple[unicode, unicode], unicode]
+ self.comments = {} # type: Dict[Tuple[str, str], str]
self.previous = None # type: ast.AST
- self.deforders = {} # type: Dict[unicode, int]
+ self.deforders = {} # type: Dict[str, int]
super(VariableCommentPicker, self).__init__()
def add_entry(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
if self.current_function:
if self.current_classes and self.context[-1] == "__init__":
# store variable comments inside __init__ method of classes
@@ -266,7 +265,7 @@ class VariableCommentPicker(ast.NodeVisitor):
self.deforders[".".join(definition)] = next(self.counter)
def add_variable_comment(self, name, comment):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
if self.current_function:
if self.current_classes and self.context[-1] == "__init__":
# store variable comments inside __init__ method of classes
@@ -287,7 +286,7 @@ class VariableCommentPicker(ast.NodeVisitor):
return None
def get_line(self, lineno):
- # type: (int) -> unicode
+ # type: (int) -> str
"""Returns specified line."""
return self.buffers[lineno - 1]
@@ -388,15 +387,15 @@ class VariableCommentPicker(ast.NodeVisitor):
class DefinitionFinder(TokenProcessor):
def __init__(self, lines):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
super(DefinitionFinder, self).__init__(lines)
self.decorator = None # type: Token
- self.context = [] # type: List[unicode]
+ self.context = [] # type: List[str]
self.indents = [] # type: List
- self.definitions = {} # type: Dict[unicode, Tuple[unicode, int, int]]
+ self.definitions = {} # type: Dict[str, Tuple[str, int, int]]
def add_definition(self, name, entry):
- # type: (unicode, Tuple[unicode, int, int]) -> None
+ # type: (str, Tuple[str, int, int]) -> None
if self.indents and self.indents[-1][0] == 'def' and entry[0] == 'def':
# ignore definition of inner function
pass
@@ -425,7 +424,7 @@ class DefinitionFinder(TokenProcessor):
self.finalize_block()
def parse_definition(self, typ):
- # type: (unicode) -> None
+ # type: (str) -> None
name = self.fetch_token()
self.context.append(name.value)
funcname = '.'.join(self.context)
@@ -465,12 +464,12 @@ class Parser:
"""
def __init__(self, code, encoding='utf-8'):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
self.code = filter_whitespace(code)
self.encoding = encoding
- self.comments = {} # type: Dict[Tuple[unicode, unicode], unicode]
- self.deforders = {} # type: Dict[unicode, int]
- self.definitions = {} # type: Dict[unicode, Tuple[unicode, int, int]]
+ self.comments = {} # type: Dict[Tuple[str, str], str]
+ self.deforders = {} # type: Dict[str, int]
+ self.definitions = {} # type: Dict[str, Tuple[str, int, int]]
def parse(self):
# type: () -> None
diff --git a/sphinx/registry.py b/sphinx/registry.py
index 4ba90d5f7..89e87cdbd 100644
--- a/sphinx/registry.py
+++ b/sphinx/registry.py
@@ -8,7 +8,6 @@
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import traceback
import warnings
@@ -44,7 +43,7 @@ if False:
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
from sphinx.io import SphinxFileInput # NOQA
- from sphinx.util.typing import RoleFunction, TitleGetter, unicode # NOQA
+ from sphinx.util.typing import RoleFunction, TitleGetter # NOQA
logger = logging.getLogger(__name__)
@@ -52,76 +51,76 @@ logger = logging.getLogger(__name__)
# Values are Sphinx version that merge the extension.
EXTENSION_BLACKLIST = {
"sphinxjp.themecore": "1.2"
-} # type: Dict[unicode, unicode]
+}
class SphinxComponentRegistry:
def __init__(self):
# type: () -> None
#: special attrgetter for autodoc; class object -> attrgetter
- self.autodoc_attrgettrs = {} # type: Dict[Type, Callable[[Any, unicode, Any], Any]]
+ self.autodoc_attrgettrs = {} # type: Dict[Type, Callable[[Any, str, Any], Any]]
#: builders; a dict of builder name -> bulider class
- self.builders = {} # type: Dict[unicode, Type[Builder]]
+ self.builders = {} # type: Dict[str, Type[Builder]]
#: autodoc documenters; a dict of documenter name -> documenter class
- self.documenters = {} # type: Dict[unicode, Type[Documenter]]
+ self.documenters = {} # type: Dict[str, Type[Documenter]]
#: css_files; a list of tuple of filename and attributes
- self.css_files = [] # type: List[Tuple[unicode, Dict[unicode, unicode]]]
+ self.css_files = [] # type: List[Tuple[str, Dict[str, str]]]
#: domains; a dict of domain name -> domain class
- self.domains = {} # type: Dict[unicode, Type[Domain]]
+ self.domains = {} # type: Dict[str, Type[Domain]]
#: additional directives for domains
#: a dict of domain name -> dict of directive name -> directive
- self.domain_directives = {} # type: Dict[unicode, Dict[unicode, Any]]
+ self.domain_directives = {} # type: Dict[str, Dict[str, Any]]
#: additional indices for domains
#: a dict of domain name -> list of index class
- self.domain_indices = {} # type: Dict[unicode, List[Type[Index]]]
+ self.domain_indices = {} # type: Dict[str, List[Type[Index]]]
#: additional object types for domains
#: a dict of domain name -> dict of objtype name -> objtype
- self.domain_object_types = {} # type: Dict[unicode, Dict[unicode, ObjType]]
+ self.domain_object_types = {} # type: Dict[str, Dict[str, ObjType]]
#: additional roles for domains
#: a dict of domain name -> dict of role name -> role impl.
- self.domain_roles = {} # type: Dict[unicode, Dict[unicode, Union[RoleFunction, XRefRole]]] # NOQA
+ self.domain_roles = {} # type: Dict[str, Dict[str, Union[RoleFunction, XRefRole]]] # NOQA
#: additional enumerable nodes
#: a dict of node class -> tuple of figtype and title_getter function
- self.enumerable_nodes = {} # type: Dict[Type[nodes.Node], Tuple[unicode, TitleGetter]] # NOQA
+ self.enumerable_nodes = {} # type: Dict[Type[nodes.Node], Tuple[str, TitleGetter]]
#: HTML inline and block math renderers
#: a dict of name -> tuple of visit function and depart function
- self.html_inline_math_renderers = {} # type: Dict[unicode, Tuple[Callable, Callable]] # NOQA
- self.html_block_math_renderers = {} # type: Dict[unicode, Tuple[Callable, Callable]] # NOQA
+ self.html_inline_math_renderers = {} # type: Dict[str, Tuple[Callable, Callable]]
+ self.html_block_math_renderers = {} # type: Dict[str, Tuple[Callable, Callable]]
#: js_files; list of JS paths or URLs
- self.js_files = [] # type: List[Tuple[unicode, Dict[unicode, unicode]]]
+ self.js_files = [] # type: List[Tuple[str, Dict[str, str]]]
#: LaTeX packages; list of package names and its options
- self.latex_packages = [] # type: List[Tuple[unicode, unicode]]
+ self.latex_packages = [] # type: List[Tuple[str, str]]
#: post transforms; list of transforms
self.post_transforms = [] # type: List[Type[Transform]]
#: source paresrs; file type -> parser class
- self.source_parsers = {} # type: Dict[unicode, Type[Parser]]
+ self.source_parsers = {} # type: Dict[str, Type[Parser]]
#: source inputs; file type -> input class
- self.source_inputs = {} # type: Dict[unicode, Type[Input]]
+ self.source_inputs = {} # type: Dict[str, Type[Input]]
#: source suffix: suffix -> file type
- self.source_suffix = {} # type: Dict[unicode, unicode]
+ self.source_suffix = {} # type: Dict[str, str]
#: custom translators; builder name -> translator class
- self.translators = {} # type: Dict[unicode, Type[nodes.NodeVisitor]]
+ self.translators = {} # type: Dict[str, Type[nodes.NodeVisitor]]
#: custom handlers for translators
#: a dict of builder name -> dict of node name -> visitor and departure functions
- self.translation_handlers = {} # type: Dict[unicode, Dict[unicode, Tuple[Callable, Callable]]] # NOQA
+ self.translation_handlers = {} # type: Dict[str, Dict[str, Tuple[Callable, Callable]]]
#: additional transforms; list of transforms
self.transforms = [] # type: List[Type[Transform]]
@@ -137,7 +136,7 @@ class SphinxComponentRegistry:
self.builders[builder.name] = builder
def preload_builder(self, app, name):
- # type: (Sphinx, unicode) -> None
+ # type: (Sphinx, str) -> None
if name is None:
return
@@ -152,7 +151,7 @@ class SphinxComponentRegistry:
self.load_extension(app, entry_point.module_name)
def create_builder(self, app, name):
- # type: (Sphinx, unicode) -> Builder
+ # type: (Sphinx, str) -> Builder
if name not in self.builders:
raise SphinxError(__('Builder name %s not registered') % name)
@@ -166,7 +165,7 @@ class SphinxComponentRegistry:
self.domains[domain.name] = domain
def has_domain(self, domain):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return domain in self.domains
def create_domains(self, env):
@@ -192,7 +191,7 @@ class SphinxComponentRegistry:
def add_directive_to_domain(self, domain, name, obj, has_content=None, argument_spec=None,
override=False, **option_spec):
- # type: (unicode, unicode, Any, bool, Any, bool, Any) -> None
+ # type: (str, str, Any, bool, Any, bool, Any) -> None
logger.debug('[app] adding directive to domain: %r',
(domain, name, obj, has_content, argument_spec, option_spec))
if domain not in self.domains:
@@ -208,7 +207,7 @@ class SphinxComponentRegistry:
directives[name] = obj
def add_role_to_domain(self, domain, name, role, override=False):
- # type: (unicode, unicode, Union[RoleFunction, XRefRole], bool) -> None
+ # type: (str, str, Union[RoleFunction, XRefRole], bool) -> None
logger.debug('[app] adding role to domain: %r', (domain, name, role))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
@@ -219,7 +218,7 @@ class SphinxComponentRegistry:
roles[name] = role
def add_index_to_domain(self, domain, index, override=False):
- # type: (unicode, Type[Index], bool) -> None
+ # type: (str, Type[Index], bool) -> None
logger.debug('[app] adding index to domain: %r', (domain, index))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
@@ -232,7 +231,7 @@ class SphinxComponentRegistry:
def add_object_type(self, directivename, rolename, indextemplate='',
parse_node=None, ref_nodeclass=None, objname='',
doc_field_types=[], override=False):
- # type: (unicode, unicode, unicode, Callable, Type[nodes.TextElement], unicode, List, bool) -> None # NOQA
+ # type: (str, str, str, Callable, Type[nodes.TextElement], str, List, bool) -> None
logger.debug('[app] adding object type: %r',
(directivename, rolename, indextemplate, parse_node,
ref_nodeclass, objname, doc_field_types))
@@ -255,7 +254,7 @@ class SphinxComponentRegistry:
def add_crossref_type(self, directivename, rolename, indextemplate='',
ref_nodeclass=None, objname='', override=False):
- # type: (unicode, unicode, unicode, Type[nodes.TextElement], unicode, bool) -> None
+ # type: (str, str, str, Type[nodes.TextElement], str, bool) -> None
logger.debug('[app] adding crossref type: %r',
(directivename, rolename, indextemplate, ref_nodeclass, objname))
@@ -274,7 +273,7 @@ class SphinxComponentRegistry:
object_types[directivename] = ObjType(objname or directivename, rolename)
def add_source_suffix(self, suffix, filetype, override=False):
- # type: (unicode, unicode, bool) -> None
+ # type: (str, str, bool) -> None
logger.debug('[app] adding source_suffix: %r, %r', suffix, filetype)
if suffix in self.source_suffix and not override:
raise ExtensionError(__('source_suffix %r is already registered') % suffix)
@@ -286,7 +285,7 @@ class SphinxComponentRegistry:
logger.debug('[app] adding search source_parser: %r', args)
if len(args) == 1:
# new sytle arguments: (source_parser)
- suffix = None # type: unicode
+ suffix = None # type: str
parser = args[0] # type: Type[Parser]
else:
# old style arguments: (suffix, source_parser)
@@ -319,18 +318,18 @@ class SphinxComponentRegistry:
self.source_parsers[suffix] = parser
def get_source_parser(self, filetype):
- # type: (unicode) -> Type[Parser]
+ # type: (str) -> Type[Parser]
try:
return self.source_parsers[filetype]
except KeyError:
raise SphinxError(__('Source parser for %s not registered') % filetype)
def get_source_parsers(self):
- # type: () -> Dict[unicode, Type[Parser]]
+ # type: () -> Dict[str, Type[Parser]]
return self.source_parsers
def create_source_parser(self, app, filename):
- # type: (Sphinx, unicode) -> Parser
+ # type: (Sphinx, str) -> Parser
parser_class = self.get_source_parser(filename)
parser = parser_class()
if isinstance(parser, SphinxParser):
@@ -346,7 +345,7 @@ class SphinxComponentRegistry:
self.source_inputs[filetype] = input_class
def get_source_input(self, filetype):
- # type: (unicode) -> Type[Input]
+ # type: (str) -> Type[Input]
try:
return self.source_inputs[filetype]
except KeyError:
@@ -357,7 +356,7 @@ class SphinxComponentRegistry:
raise SphinxError(__('source_input for %s not registered') % filetype)
def add_translator(self, name, translator, override=False):
- # type: (unicode, Type[nodes.NodeVisitor], bool) -> None
+ # type: (str, Type[nodes.NodeVisitor], bool) -> None
logger.debug('[app] Change of translator for the %s builder.' % name)
if name in self.translators and not override:
raise ExtensionError(__('Translator for %r already exists') % name)
@@ -418,35 +417,35 @@ class SphinxComponentRegistry:
return self.post_transforms
def add_documenter(self, objtype, documenter):
- # type: (unicode, Type[Documenter]) -> None
+ # type: (str, Type[Documenter]) -> None
self.documenters[objtype] = documenter
def add_autodoc_attrgetter(self, typ, attrgetter):
- # type: (Type, Callable[[Any, unicode, Any], Any]) -> None
+ # type: (Type, Callable[[Any, str, Any], Any]) -> None
self.autodoc_attrgettrs[typ] = attrgetter
def add_css_files(self, filename, **attributes):
self.css_files.append((filename, attributes))
def add_js_file(self, filename, **attributes):
- # type: (unicode, **unicode) -> None
+ # type: (str, **str) -> None
logger.debug('[app] adding js_file: %r, %r', filename, attributes)
self.js_files.append((filename, attributes))
def add_latex_package(self, name, options):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
logger.debug('[app] adding latex package: %r', name)
self.latex_packages.append((name, options))
def add_enumerable_node(self, node, figtype, title_getter=None, override=False):
- # type: (Type[nodes.Node], unicode, TitleGetter, bool) -> None
+ # type: (Type[nodes.Node], str, TitleGetter, bool) -> None
logger.debug('[app] adding enumerable node: (%r, %r, %r)', node, figtype, title_getter)
if node in self.enumerable_nodes and not override:
raise ExtensionError(__('enumerable_node %r already registered') % node)
self.enumerable_nodes[node] = (figtype, title_getter)
def add_html_math_renderer(self, name, inline_renderers, block_renderers):
- # type: (unicode, Tuple[Callable, Callable], Tuple[Callable, Callable]) -> None
+ # type: (str, Tuple[Callable, Callable], Tuple[Callable, Callable]) -> None
logger.debug('[app] adding html_math_renderer: %s, %r, %r',
name, inline_renderers, block_renderers)
if name in self.html_inline_math_renderers:
@@ -456,7 +455,7 @@ class SphinxComponentRegistry:
self.html_block_math_renderers[name] = block_renderers
def load_extension(self, app, extname):
- # type: (Sphinx, unicode) -> None
+ # type: (Sphinx, str) -> None
"""Load a Sphinx extension."""
if extname in app.extensions: # alread loaded
return
@@ -478,7 +477,7 @@ class SphinxComponentRegistry:
if not hasattr(mod, 'setup'):
logger.warning(__('extension %r has no setup() function; is it really '
'a Sphinx extension module?'), extname)
- metadata = {} # type: Dict[unicode, Any]
+ metadata = {} # type: Dict[str, Any]
else:
try:
metadata = mod.setup(app)
@@ -501,7 +500,7 @@ class SphinxComponentRegistry:
app.extensions[extname] = Extension(extname, mod, **metadata)
def get_envversion(self, app):
- # type: (Sphinx) -> Dict[unicode, unicode]
+ # type: (Sphinx) -> Dict[str, str]
from sphinx.environment import ENV_VERSION
envversion = {ext.name: ext.metadata['env_version'] for ext in app.extensions.values()
if ext.metadata.get('env_version')}
@@ -525,7 +524,7 @@ def merge_source_suffix(app, config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.connect('config-inited', merge_source_suffix)
return {
diff --git a/sphinx/roles.py b/sphinx/roles.py
index 9f66478dc..59f5b297e 100644
--- a/sphinx/roles.py
+++ b/sphinx/roles.py
@@ -26,7 +26,7 @@ if False:
from docutils.parsers.rst.states import Inliner # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import RoleFunction, unicode # NOQA
+ from sphinx.util.typing import RoleFunction # NOQA
generic_docroles = {
@@ -84,7 +84,7 @@ class XRefRole:
self.innernodeclass = innernodeclass
def _fix_parens(self, env, has_explicit_title, title, target):
- # type: (BuildEnvironment, bool, unicode, unicode) -> Tuple[unicode, unicode]
+ # type: (BuildEnvironment, bool, str, str) -> Tuple[str, str]
if not has_explicit_title:
if title.endswith('()'):
# remove parentheses
@@ -99,7 +99,7 @@ class XRefRole:
def __call__(self, typ, rawtext, text, lineno, inliner,
options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
env = inliner.document.settings.env
if not typ:
typ = env.temp_data.get('default_role')
@@ -110,7 +110,7 @@ class XRefRole:
else:
typ = typ.lower()
if ':' not in typ:
- domain, role = '', typ # type: unicode, unicode
+ domain, role = '', typ
classes = ['xref', role]
else:
domain, role = typ.split(':', 1)
@@ -150,7 +150,7 @@ class XRefRole:
# methods that can be overwritten
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.Element, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
"""Called after parsing title and target text, and creating the
reference node (given in *refnode*). This method can alter the
reference node and must return a new (or the same) ``(title, target)``
@@ -170,7 +170,7 @@ class XRefRole:
class AnyXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.Element, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
result = super(AnyXRefRole, self).process_link(env, refnode, has_explicit_title,
title, target)
# add all possible context info (i.e. std:program, py:module etc.)
@@ -179,7 +179,7 @@ class AnyXRefRole(XRefRole):
def indexmarkup_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
"""Role for PEP/RFC references that generate an index entry."""
env = inliner.document.settings.env
if not typ:
@@ -199,7 +199,7 @@ def indexmarkup_role(typ, rawtext, text, lineno, inliner, options={}, content=[]
indexnode['entries'] = [
('single', _('Python Enhancement Proposals; PEP %s') % target,
targetid, '', None)]
- anchor = '' # type: unicode
+ anchor = ''
anchorindex = target.find('#')
if anchorindex > 0:
target, anchor = target[:anchorindex], target[anchorindex:]
@@ -248,7 +248,7 @@ _amp_re = re.compile(r'(?<!&)&(?![&\s])')
def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
env = inliner.document.settings.env
if not typ:
assert env.temp_data['default_role']
@@ -287,7 +287,7 @@ parens_re = re.compile(r'(\\*{|\\*})')
def emph_literal_role(typ, rawtext, text, lineno, inliner,
options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
env = inliner.document.settings.env
if not typ:
assert env.temp_data['default_role']
@@ -339,20 +339,20 @@ _abbr_re = re.compile(r'\((.*)\)$', re.S)
def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
text = utils.unescape(text)
m = _abbr_re.search(text)
if m is None:
- return [addnodes.abbreviation(text, text, **options)], []
+ return [nodes.abbreviation(text, text, **options)], []
abbr = text[:m.start()].strip()
expl = m.group(1)
options = options.copy()
options['explanation'] = expl
- return [addnodes.abbreviation(abbr, abbr, **options)], []
+ return [nodes.abbreviation(abbr, abbr, **options)], []
def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
# create new reference target
env = inliner.document.settings.env
targetid = 'index-%s' % env.new_serialno('index')
@@ -398,7 +398,7 @@ specific_docroles = {
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
from docutils.parsers.rst import roles
for rolename, nodeclass in generic_docroles.items():
diff --git a/sphinx/search/__init__.py b/sphinx/search/__init__.py
index 6e852682e..7d8156bea 100644
--- a/sphinx/search/__init__.py
+++ b/sphinx/search/__init__.py
@@ -29,7 +29,6 @@ if False:
from typing import Any, Dict, IO, Iterable, List, Tuple, Type, Set # NOQA
from docutils import nodes # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
class SearchLanguage:
@@ -54,10 +53,10 @@ class SearchLanguage:
This class is used to preprocess search word which Sphinx HTML readers
type, before searching index. Default implementation does nothing.
"""
- lang = None # type: unicode
- language_name = None # type: unicode
- stopwords = set() # type: Set[unicode]
- js_stemmer_rawcode = None # type: unicode
+ lang = None # type: str
+ language_name = None # type: str
+ stopwords = set() # type: Set[str]
+ js_stemmer_rawcode = None # type: str
js_stemmer_code = """
/**
* Dummy stemmer for languages without stemming rules.
@@ -67,7 +66,7 @@ var Stemmer = function() {
return w;
}
}
-""" # type: unicode
+"""
_word_re = re.compile(r'(?u)\w+')
@@ -83,7 +82,7 @@ var Stemmer = function() {
"""
def split(self, input):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
"""
This method splits a sentence into words. Default splitter splits input
at white spaces, which should be enough for most languages except CJK
@@ -92,7 +91,7 @@ var Stemmer = function() {
return self._word_re.findall(input)
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""
This method implements stemming algorithm of the Python version.
@@ -106,7 +105,7 @@ var Stemmer = function() {
return word
def word_filter(self, word):
- # type: (unicode) -> bool
+ # type: (str) -> bool
"""
Return true if the target word should be registered in the search index.
This method is called after stemming.
@@ -124,13 +123,13 @@ from sphinx.search.en import SearchEnglish
def parse_stop_word(source):
- # type: (unicode) -> Set[unicode]
+ # type: (str) -> Set[str]
"""
parse snowball style word list like this:
* http://snowball.tartarus.org/algorithms/finnish/stop.txt
"""
- result = set() # type: Set[unicode]
+ result = set() # type: Set[str]
for line in source.splitlines():
line = line.split('|')[0] # remove comment
result.update(line.split())
@@ -156,7 +155,7 @@ languages = {
'sv': 'sphinx.search.sv.SearchSwedish',
'tr': 'sphinx.search.tr.SearchTurkish',
'zh': 'sphinx.search.zh.SearchChinese',
-} # type: Dict[unicode, Any]
+} # type: Dict[str, Any]
class _JavaScriptIndex:
@@ -169,7 +168,7 @@ class _JavaScriptIndex:
SUFFIX = ')'
def dumps(self, data):
- # type: (Any) -> unicode
+ # type: (Any) -> str
return self.PREFIX + jsdump.dumps(data) + self.SUFFIX
def loads(self, s):
@@ -200,8 +199,8 @@ class WordCollector(nodes.NodeVisitor):
def __init__(self, document, lang):
# type: (nodes.document, SearchLanguage) -> None
super(WordCollector, self).__init__(document)
- self.found_words = [] # type: List[unicode]
- self.found_title_words = [] # type: List[unicode]
+ self.found_words = [] # type: List[str]
+ self.found_title_words = [] # type: List[str]
self.lang = lang
def is_meta_keywords(self, node, nodetype=None):
@@ -251,24 +250,24 @@ class IndexBuilder:
formats = {
'jsdump': jsdump,
'pickle': pickle
- } # type: Dict[unicode, Any]
+ }
def __init__(self, env, lang, options, scoring):
- # type: (BuildEnvironment, unicode, Dict, unicode) -> None
+ # type: (BuildEnvironment, str, Dict, str) -> None
self.env = env
- self._titles = {} # type: Dict[unicode, unicode]
+ self._titles = {} # type: Dict[str, str]
# docname -> title
- self._filenames = {} # type: Dict[unicode, unicode]
+ self._filenames = {} # type: Dict[str, str]
# docname -> filename
- self._mapping = {} # type: Dict[unicode, Set[unicode]]
+ self._mapping = {} # type: Dict[str, Set[str]]
# stemmed word -> set(docname)
- self._title_mapping = {} # type: Dict[unicode, Set[unicode]]
+ self._title_mapping = {} # type: Dict[str, Set[str]]
# stemmed words in titles -> set(docname)
- self._stem_cache = {} # type: Dict[unicode, unicode]
+ self._stem_cache = {} # type: Dict[str, str]
# word -> stemmed word
- self._objtypes = {} # type: Dict[Tuple[unicode, unicode], int]
+ self._objtypes = {} # type: Dict[Tuple[str, str], int]
# objtype -> index
- self._objnames = {} # type: Dict[int, Tuple[unicode, unicode, unicode]]
+ self._objnames = {} # type: Dict[int, Tuple[str, str, str]]
# objtype index -> (domain, type, objname (localized))
lang_class = languages.get(lang) # type: Type[SearchLanguage]
# add language-specific SearchLanguage instance
@@ -310,7 +309,7 @@ class IndexBuilder:
self._titles = dict(zip(index2fn, frozen['titles']))
def load_terms(mapping):
- # type: (Dict[unicode, Any]) -> Dict[unicode, Set[unicode]]
+ # type: (Dict[str, Any]) -> Dict[str, Set[str]]
rv = {}
for k, v in mapping.items():
if isinstance(v, int):
@@ -331,8 +330,8 @@ class IndexBuilder:
format.dump(self.freeze(), stream)
def get_objects(self, fn2index):
- # type: (Dict[unicode, int]) -> Dict[unicode, Dict[unicode, Tuple[int, int, int, unicode]]] # NOQA
- rv = {} # type: Dict[unicode, Dict[unicode, Tuple[int, int, int, unicode]]]
+ # type: (Dict[str, int]) -> Dict[str, Dict[str, Tuple[int, int, int, str]]]
+ rv = {} # type: Dict[str, Dict[str, Tuple[int, int, int, str]]]
otypes = self._objtypes
onames = self._objnames
for domainname, domain in sorted(self.env.domains.items()):
@@ -359,7 +358,7 @@ class IndexBuilder:
else:
onames[typeindex] = (domainname, type, type)
if anchor == fullname:
- shortanchor = '' # type: unicode
+ shortanchor = ''
elif anchor == type + '-' + fullname:
shortanchor = '-'
else:
@@ -368,8 +367,8 @@ class IndexBuilder:
return rv
def get_terms(self, fn2index):
- # type: (Dict) -> Tuple[Dict[unicode, List[unicode]], Dict[unicode, List[unicode]]]
- rvs = {}, {} # type: Tuple[Dict[unicode, List[unicode]], Dict[unicode, List[unicode]]]
+ # type: (Dict) -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]
+ rvs = {}, {} # type: Tuple[Dict[str, List[str]], Dict[str, List[str]]]
for rv, mapping in zip(rvs, (self._mapping, self._title_mapping)):
for k, v in mapping.items():
if len(v) == 1:
@@ -381,7 +380,7 @@ class IndexBuilder:
return rvs
def freeze(self):
- # type: () -> Dict[unicode, Any]
+ # type: () -> Dict[str, Any]
"""Create a usable data structure for serializing."""
docnames, titles = zip(*sorted(self._titles.items()))
filenames = [self._filenames.get(docname) for docname in docnames]
@@ -397,11 +396,11 @@ class IndexBuilder:
titleterms=title_terms, envversion=self.env.version)
def label(self):
- # type: () -> unicode
+ # type: () -> str
return "%s (code: %s)" % (self.lang.language_name, self.lang.lang)
def prune(self, docnames):
- # type: (Iterable[unicode]) -> None
+ # type: (Iterable[str]) -> None
"""Remove data for all docnames not in the list."""
new_titles = {}
new_filenames = {}
@@ -417,7 +416,7 @@ class IndexBuilder:
wordnames.intersection_update(docnames)
def feed(self, docname, filename, title, doctree):
- # type: (unicode, unicode, unicode, nodes.document) -> None
+ # type: (str, str, str, nodes.document) -> None
"""Feed a doctree to the index."""
self._titles[docname] = title
self._filenames[docname] = filename
@@ -427,7 +426,7 @@ class IndexBuilder:
# memoize self.lang.stem
def stem(word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
try:
return self._stem_cache[word]
except KeyError:
@@ -452,7 +451,7 @@ class IndexBuilder:
self._mapping.setdefault(stemmed_word, set()).add(docname)
def context_for_searchtool(self):
- # type: () -> Dict[unicode, Any]
+ # type: () -> Dict[str, Any]
return {
'search_language_stemming_code': self.lang.js_stemmer_code,
'search_language_stop_words': jsdump.dumps(sorted(self.lang.stopwords)),
@@ -461,7 +460,7 @@ class IndexBuilder:
}
def get_js_stemmer_rawcode(self):
- # type: () -> unicode
+ # type: () -> str
if self.lang.js_stemmer_rawcode:
return path.join(package_dir, 'search', 'non-minified-js',
self.lang.js_stemmer_rawcode)
diff --git a/sphinx/search/da.py b/sphinx/search/da.py
index 3ab763ec5..4a0131957 100644
--- a/sphinx/search/da.py
+++ b/sphinx/search/da.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
- from sphinx.util.typing import unicode # NOQA
danish_stopwords = parse_stop_word(u'''
@@ -135,5 +134,5 @@ class SearchDanish(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('danish')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/de.py b/sphinx/search/de.py
index df758d697..3aead0886 100644
--- a/sphinx/search/de.py
+++ b/sphinx/search/de.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
- from sphinx.util.typing import unicode # NOQA
german_stopwords = parse_stop_word(u'''
@@ -318,5 +317,5 @@ class SearchGerman(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('german')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/en.py b/sphinx/search/en.py
index 384c2bb48..835d8ff9f 100644
--- a/sphinx/search/en.py
+++ b/sphinx/search/en.py
@@ -15,7 +15,6 @@ from sphinx.util.stemmer import get_stemmer
if False:
# For type annotation
from typing import Dict # NOQA
- from sphinx.util.typing import unicode # NOQA
english_stopwords = set(u"""
a and are as at
@@ -227,5 +226,5 @@ class SearchEnglish(SearchLanguage):
self.stemmer = get_stemmer()
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stem(word.lower())
diff --git a/sphinx/search/es.py b/sphinx/search/es.py
index 8e7f44983..b8d291d25 100644
--- a/sphinx/search/es.py
+++ b/sphinx/search/es.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
- from sphinx.util.typing import unicode # NOQA
spanish_stopwords = parse_stop_word(u'''
@@ -378,5 +377,5 @@ class SearchSpanish(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('spanish')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/fi.py b/sphinx/search/fi.py
index b92bb450b..d6f07d477 100644
--- a/sphinx/search/fi.py
+++ b/sphinx/search/fi.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
- from sphinx.util.typing import unicode # NOQA
finnish_stopwords = parse_stop_word(u'''
@@ -128,5 +127,5 @@ class SearchFinnish(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('finnish')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/fr.py b/sphinx/search/fr.py
index 6d23fa685..5479cc07e 100644
--- a/sphinx/search/fr.py
+++ b/sphinx/search/fr.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
- from sphinx.util.typing import unicode # NOQA
french_stopwords = parse_stop_word(u'''
@@ -214,5 +213,5 @@ class SearchFrench(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('french')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/hu.py b/sphinx/search/hu.py
index 77b9d5dac..bd7d05f04 100644
--- a/sphinx/search/hu.py
+++ b/sphinx/search/hu.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
- from sphinx.util.typing import unicode # NOQA
hungarian_stopwords = parse_stop_word(u'''
@@ -242,5 +241,5 @@ class SearchHungarian(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('hungarian')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/it.py b/sphinx/search/it.py
index 800aa6df4..6749863b8 100644
--- a/sphinx/search/it.py
+++ b/sphinx/search/it.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
- from sphinx.util.typing import unicode # NOQA
italian_stopwords = parse_stop_word(u'''
@@ -331,5 +330,5 @@ class SearchItalian(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('italian')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/ja.py b/sphinx/search/ja.py
index 0a1b389c3..7c87ffe44 100644
--- a/sphinx/search/ja.py
+++ b/sphinx/search/ja.py
@@ -42,7 +42,6 @@ from sphinx.util import import_object
if False:
# For type annotation
from typing import Any, Dict, List # NOQA
- from sphinx.util.typing import unicode # NOQA
class BaseSplitter:
@@ -52,7 +51,7 @@ class BaseSplitter:
self.options = options
def split(self, input):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
"""
:param str input:
@@ -75,7 +74,7 @@ class MecabSplitter(BaseSplitter):
self.dict_encode = options.get('dic_enc', 'utf-8')
def split(self, input):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
if native_module:
result = self.native.parse(input)
else:
@@ -151,7 +150,7 @@ class JanomeSplitter(BaseSplitter):
self.tokenizer = janome.tokenizer.Tokenizer(udic=self.user_dict, udic_enc=self.user_dict_enc)
def split(self, input):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
result = u' '.join(token.surface for token in self.tokenizer.tokenize(input))
return result.split(u' ')
@@ -428,7 +427,7 @@ class DefaultSplitter(BaseSplitter):
# ctype_
def ctype_(self, char):
- # type: (unicode) -> unicode
+ # type: (str) -> str
for pattern, value in self.patterns_.items():
if pattern.match(char):
return value
@@ -436,14 +435,14 @@ class DefaultSplitter(BaseSplitter):
# ts_
def ts_(self, dict, key):
- # type: (Dict[unicode, int], unicode) -> int
+ # type: (Dict[str, int], str) -> int
if key in dict:
return dict[key]
return 0
# segment
def split(self, input):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
if not input:
return []
@@ -568,13 +567,13 @@ class SearchJapanese(SearchLanguage):
dotted_path)
def split(self, input):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
return self.splitter.split(input)
def word_filter(self, stemmed_word):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return len(stemmed_word) > 1
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return word
diff --git a/sphinx/search/nl.py b/sphinx/search/nl.py
index 0ed58a485..421b47c1e 100644
--- a/sphinx/search/nl.py
+++ b/sphinx/search/nl.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
- from sphinx.util.typing import unicode # NOQA
dutch_stopwords = parse_stop_word(u'''
@@ -142,5 +141,5 @@ class SearchDutch(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('dutch')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/no.py b/sphinx/search/no.py
index cf689f473..b0512467d 100644
--- a/sphinx/search/no.py
+++ b/sphinx/search/no.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
- from sphinx.util.typing import unicode # NOQA
norwegian_stopwords = parse_stop_word(u'''
@@ -217,5 +216,5 @@ class SearchNorwegian(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('norwegian')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/pt.py b/sphinx/search/pt.py
index 94d72bb49..068d56bc4 100644
--- a/sphinx/search/pt.py
+++ b/sphinx/search/pt.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
- from sphinx.util.typing import unicode # NOQA
portuguese_stopwords = parse_stop_word(u'''
@@ -277,5 +276,5 @@ class SearchPortuguese(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('portuguese')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/ro.py b/sphinx/search/ro.py
index 1ff72c207..ff88807df 100644
--- a/sphinx/search/ro.py
+++ b/sphinx/search/ro.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Dict, Set # NOQA
- from sphinx.util.typing import unicode # NOQA
js_stemmer = u"""
var JSX={};(function(j){function l(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function L(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function h(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function M(a,b,c){return a[b]=a[b]/c|0}var E=parseInt;var C=parseFloat;function N(a){return a!==a}var A=isFinite;var z=encodeURIComponent;var y=decodeURIComponent;var x=encodeURI;var w=decodeURI;var u=Object.prototype.toString;var D=Object.prototype.hasOwnProperty;function k(){}j.require=function(b){var a=r[b];return a!==undefined?a:null};j.profilerIsRunning=function(){return k.getResults!=null};j.getProfileResults=function(){return(k.getResults||function(){return{}})()};j.postProfileResults=function(a,b){if(k.postResults==null)throw new Error('profiler has not been turned on');return k.postResults(a,b)};j.resetProfileResults=function(){if(k.resetResults==null)throw new Error('profiler has not been turned on');return k.resetResults()};j.DEBUG=false;function t(){};l([t],Error);function a(a,b,c){this.F=a.length;this.K=a;this.L=b;this.I=c;this.H=null;this.P=null};l([a],Object);function n(){};l([n],Object);function g(){var a;var b;var c;this.G={};a=this.E='';b=this._=0;c=this.A=a.length;this.D=0;this.B=b;this.C=c};l([g],n);function v(a,b){a.E=b.E;a._=b._;a.A=b.A;a.D=b.D;a.B=b.B;a.C=b.C};function d(b,d,c,e){var a;if(b._>=b.A){return false}a=b.E.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function e(a,d,c,e){var b;if(a._>=a.A){return false}b=a.E.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function p(a,d,c,e){var b;if(a._<=a.D){return false}b=a.E.charCodeAt(a._-1);if(b>e||b<c){a._--;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._--;return true}return false};function m(a,b,d){var c;if(a.A-a._<b){return false}if(a.E.slice(c=a._,c+b)!==d){return false}a._+=b;return true};function i(a,b,d){var c;if(a._-a.D<b){return false}if(a.E.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function q(f,m,p){var b;var d;var e;var n;var g;var k;var l;var i;var h;var c;var a;var j;var o;b=0;d=p;e=f._;n=f.A;g=0;k=0;l=false;while(true){i=b+(d-b>>>1);h=0;c=g<k?g:k;a=m[i];for(j=c;j<a.F;j++){if(e+c===n){h=-1;break}h=f.E.charCodeAt(e+c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){d=i;k=c}else{b=i;g=c}if(d-b<=1){if(b>0){break}if(d===b){break}if(l){break}l=true}}while(true){a=m[b];if(g>=a.F){f._=e+a.F|0;if(a.H==null){return a.I}o=a.H(a.P);f._=e+a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function f(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.D;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.F-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.E.charCodeAt(e-1-c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.F){d._=e-a.F|0;if(a.H==null){return a.I}o=a.H(d);d._=e-a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function s(a,b,d,e){var c;c=e.length-(d-b);a.E=a.E.slice(0,b)+e+a.E.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function c(a,f){var b;var c;var d;var e;b=false;if((c=a.B)<0||c>(d=a.C)||d>(e=a.A)||e>a.E.length?false:true){s(a,a.B,a.C,f);b=true}return b};g.prototype.J=function(){return false};g.prototype.b=function(b){var a;var c;var d;var e;a=this.G['.'+b];if(a==null){c=this.E=b;d=this._=0;e=this.A=c.length;this.D=0;this.B=d;this.C=e;this.J();a=this.E;this.G['.'+b]=a}return a};g.prototype.stemWord=g.prototype.b;g.prototype.c=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.G['.'+c];if(a==null){f=this.E=c;g=this._=0;h=this.A=f.length;this.D=0;this.B=g;this.C=h;this.J();a=this.E;this.G['.'+c]=a}d.push(a)}return d};g.prototype.stemWords=g.prototype.c;function b(){g.call(this);this.B_standard_suffix_removed=false;this.I_p2=0;this.I_p1=0;this.I_pV=0};l([b],g);b.prototype.M=function(a){this.B_standard_suffix_removed=a.B_standard_suffix_removed;this.I_p2=a.I_p2;this.I_p1=a.I_p1;this.I_pV=a.I_pV;v(this,a)};b.prototype.copy_from=b.prototype.M;b.prototype.W=function(){var i;var a;var j;var e;var f;var g;var h;var k;b:while(true){i=this._;e=true;d:while(e===true){e=false;e:while(true){a=this._;f=true;a:while(f===true){f=false;if(!d(this,b.g_v,97,259)){break a}this.B=this._;g=true;f:while(g===true){g=false;j=this._;h=true;c:while(h===true){h=false;if(!m(this,1,'u')){break c}this.C=this._;if(!d(this,b.g_v,97,259)){break c}if(!c(this,'U')){return false}break f}this._=j;if(!m(this,1,'i')){break a}this.C=this._;if(!d(this,b.g_v,97,259)){break a}if(!c(this,'I')){return false}}this._=a;break e}k=this._=a;if(k>=this.A){break d}this._++}continue b}this._=i;break b}return true};b.prototype.r_prelude=b.prototype.W;function G(a){var j;var e;var k;var f;var g;var h;var i;var l;b:while(true){j=a._;f=true;d:while(f===true){f=false;e:while(true){e=a._;g=true;a:while(g===true){g=false;if(!d(a,b.g_v,97,259)){break a}a.B=a._;h=true;f:while(h===true){h=false;k=a._;i=true;c:while(i===true){i=false;if(!m(a,1,'u')){break c}a.C=a._;if(!d(a,b.g_v,97,259)){break c}if(!c(a,'U')){return false}break f}a._=k;if(!m(a,1,'i')){break a}a.C=a._;if(!d(a,b.g_v,97,259)){break a}if(!c(a,'I')){return false}}a._=e;break e}l=a._=e;if(l>=a.A){break d}a._++}continue b}a._=j;break b}return true};b.prototype.U=function(){var u;var w;var x;var y;var t;var l;var f;var g;var h;var i;var c;var j;var k;var a;var m;var n;var o;var p;var q;var r;var s;var v;this.I_pV=s=this.A;this.I_p1=s;this.I_p2=s;u=this._;l=true;a:while(l===true){l=false;f=true;g:while(f===true){f=false;w=this._;g=true;b:while(g===true){g=false;if(!d(this,b.g_v,97,259)){break b}h=true;f:while(h===true){h=false;x=this._;i=true;c:while(i===true){i=false;if(!e(this,b.g_v,97,259)){break c}d:while(true){c=true;e:while(c===true){c=false;if(!d(this,b.g_v,97,259)){break e}break d}if(this._>=this.A){break c}this._++}break f}this._=x;if(!d(this,b.g_v,97,259)){break b}c:while(true){j=true;d:while(j===true){j=false;if(!e(this,b.g_v,97,259)){break d}break c}if(this._>=this.A){break b}this._++}}break g}this._=w;if(!e(this,b.g_v,97,259)){break a}k=true;c:while(k===true){k=false;y=this._;a=true;b:while(a===true){a=false;if(!e(this,b.g_v,97,259)){break b}e:while(true){m=true;d:while(m===true){m=false;if(!d(this,b.g_v,97,259)){break d}break e}if(this._>=this.A){break b}this._++}break c}this._=y;if(!d(this,b.g_v,97,259)){break a}if(this._>=this.A){break a}this._++}}this.I_pV=this._}v=this._=u;t=v;n=true;a:while(n===true){n=false;b:while(true){o=true;c:while(o===true){o=false;if(!d(this,b.g_v,97,259)){break c}break b}if(this._>=this.A){break a}this._++}b:while(true){p=true;c:while(p===true){p=false;if(!e(this,b.g_v,97,259)){break c}break b}if(this._>=this.A){break a}this._++}this.I_p1=this._;b:while(true){q=true;c:while(q===true){q=false;if(!d(this,b.g_v,97,259)){break c}break b}if(this._>=this.A){break a}this._++}c:while(true){r=true;b:while(r===true){r=false;if(!e(this,b.g_v,97,259)){break b}break c}if(this._>=this.A){break a}this._++}this.I_p2=this._}this._=t;return true};b.prototype.r_mark_regions=b.prototype.U;function H(a){var x;var y;var z;var u;var v;var l;var f;var g;var h;var i;var j;var k;var c;var m;var n;var o;var p;var q;var r;var s;var t;var w;a.I_pV=t=a.A;a.I_p1=t;a.I_p2=t;x=a._;l=true;a:while(l===true){l=false;f=true;g:while(f===true){f=false;y=a._;g=true;b:while(g===true){g=false;if(!d(a,b.g_v,97,259)){break b}h=true;f:while(h===true){h=false;z=a._;i=true;c:while(i===true){i=false;if(!e(a,b.g_v,97,259)){break c}d:while(true){j=true;e:while(j===true){j=false;if(!d(a,b.g_v,97,259)){break e}break d}if(a._>=a.A){break c}a._++}break f}a._=z;if(!d(a,b.g_v,97,259)){break b}c:while(true){k=true;d:while(k===true){k=false;if(!e(a,b.g_v,97,259)){break d}break c}if(a._>=a.A){break b}a._++}}break g}a._=y;if(!e(a,b.g_v,97,259)){break a}c=true;c:while(c===true){c=false;u=a._;m=true;b:while(m===true){m=false;if(!e(a,b.g_v,97,259)){break b}e:while(true){n=true;d:while(n===true){n=false;if(!d(a,b.g_v,97,259)){break d}break e}if(a._>=a.A){break b}a._++}break c}a._=u;if(!d(a,b.g_v,97,259)){break a}if(a._>=a.A){break a}a._++}}a.I_pV=a._}w=a._=x;v=w;o=true;a:while(o===true){o=false;b:while(true){p=true;c:while(p===true){p=false;if(!d(a,b.g_v,97,259)){break c}break b}if(a._>=a.A){break a}a._++}b:while(true){q=true;c:while(q===true){q=false;if(!e(a,b.g_v,97,259)){break c}break b}if(a._>=a.A){break a}a._++}a.I_p1=a._;b:while(true){r=true;c:while(r===true){r=false;if(!d(a,b.g_v,97,259)){break c}break b}if(a._>=a.A){break a}a._++}c:while(true){s=true;b:while(s===true){s=false;if(!e(a,b.g_v,97,259)){break b}break c}if(a._>=a.A){break a}a._++}a.I_p2=a._}a._=v;return true};b.prototype.V=function(){var a;var e;var d;b:while(true){e=this._;d=true;a:while(d===true){d=false;this.B=this._;a=q(this,b.a_0,3);if(a===0){break a}this.C=this._;switch(a){case 0:break a;case 1:if(!c(this,'i')){return false}break;case 2:if(!c(this,'u')){return false}break;case 3:if(this._>=this.A){break a}this._++;break}continue b}this._=e;break b}return true};b.prototype.r_postlude=b.prototype.V;function I(a){var d;var f;var e;b:while(true){f=a._;e=true;a:while(e===true){e=false;a.B=a._;d=q(a,b.a_0,3);if(d===0){break a}a.C=a._;switch(d){case 0:break a;case 1:if(!c(a,'i')){return false}break;case 2:if(!c(a,'u')){return false}break;case 3:if(a._>=a.A){break a}a._++;break}continue b}a._=f;break b}return true};b.prototype.S=function(){return!(this.I_pV<=this._)?false:true};b.prototype.r_RV=b.prototype.S;b.prototype.Q=function(){return!(this.I_p1<=this._)?false:true};b.prototype.r_R1=b.prototype.Q;b.prototype.R=function(){return!(this.I_p2<=this._)?false:true};b.prototype.r_R2=b.prototype.R;b.prototype.Y=function(){var a;var e;var d;var g;this.C=this._;a=f(this,b.a_1,16);if(a===0){return false}this.B=g=this._;if(!(!(this.I_p1<=g)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break;case 2:if(!c(this,'a')){return false}break;case 3:if(!c(this,'e')){return false}break;case 4:if(!c(this,'i')){return false}break;case 5:e=this.A-this._;d=true;a:while(d===true){d=false;if(!i(this,2,'ab')){break a}return false}this._=this.A-e;if(!c(this,'i')){return false}break;case 6:if(!c(this,'at')){return false}break;case 7:if(!c(this,'aţi')){return false}break}return true};b.prototype.r_step_0=b.prototype.Y;function J(a){var d;var g;var e;var h;a.C=a._;d=f(a,b.a_1,16);if(d===0){return false}a.B=h=a._;if(!(!(a.I_p1<=h)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break;case 2:if(!c(a,'a')){return false}break;case 3:if(!c(a,'e')){return false}break;case 4:if(!c(a,'i')){return false}break;case 5:g=a.A-a._;e=true;a:while(e===true){e=false;if(!i(a,2,'ab')){break a}return false}a._=a.A-g;if(!c(a,'i')){return false}break;case 6:if(!c(a,'at')){return false}break;case 7:if(!c(a,'aţi')){return false}break}return true};b.prototype.T=function(){var a;var d;var e;var g;d=this.A-(e=this._);this.C=e;a=f(this,b.a_2,46);if(a===0){return false}this.B=g=this._;if(!(!(this.I_p1<=g)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'abil')){return false}break;case 2:if(!c(this,'ibil')){return false}break;case 3:if(!c(this,'iv')){return false}break;case 4:if(!c(this,'ic')){return false}break;case 5:if(!c(this,'at')){return false}break;case 6:if(!c(this,'it')){return false}break}this.B_standard_suffix_removed=true;this._=this.A-d;return true};b.prototype.r_combo_suffix=b.prototype.T;function o(a){var d;var e;var g;var h;e=a.A-(g=a._);a.C=g;d=f(a,b.a_2,46);if(d===0){return false}a.B=h=a._;if(!(!(a.I_p1<=h)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'abil')){return false}break;case 2:if(!c(a,'ibil')){return false}break;case 3:if(!c(a,'iv')){return false}break;case 4:if(!c(a,'ic')){return false}break;case 5:if(!c(a,'at')){return false}break;case 6:if(!c(a,'it')){return false}break}a.B_standard_suffix_removed=true;a._=a.A-e;return true};b.prototype.X=function(){var a;var e;var d;var g;this.B_standard_suffix_removed=false;a:while(true){e=this.A-this._;d=true;b:while(d===true){d=false;if(!o(this)){break b}continue a}this._=this.A-e;break a}this.C=this._;a=f(this,b.a_3,62);if(a===0){return false}this.B=g=this._;if(!(!(this.I_p2<=g)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break;case 2:if(!i(this,1,'ţ')){return false}this.B=this._;if(!c(this,'t')){return false}break;case 3:if(!c(this,'ist')){return false}break}this.B_standard_suffix_removed=true;return true};b.prototype.r_standard_suffix=b.prototype.X;function K(a){var d;var g;var e;var h;a.B_standard_suffix_removed=false;a:while(true){g=a.A-a._;e=true;b:while(e===true){e=false;if(!o(a)){break b}continue a}a._=a.A-g;break a}a.C=a._;d=f(a,b.a_3,62);if(d===0){return false}a.B=h=a._;if(!(!(a.I_p2<=h)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break;case 2:if(!i(a,1,'ţ')){return false}a.B=a._;if(!c(a,'t')){return false}break;case 3:if(!c(a,'ist')){return false}break}a.B_standard_suffix_removed=true;return true};b.prototype.Z=function(){var d;var h;var a;var j;var e;var g;var k;var l;var m;h=this.A-(k=this._);if(k<this.I_pV){return false}l=this._=this.I_pV;a=this.D;this.D=l;m=this._=this.A-h;this.C=m;d=f(this,b.a_4,94);if(d===0){this.D=a;return false}this.B=this._;switch(d){case 0:this.D=a;return false;case 1:e=true;a:while(e===true){e=false;j=this.A-this._;g=true;b:while(g===true){g=false;if(!p(this,b.g_v,97,259)){break b}break a}this._=this.A-j;if(!i(this,1,'u')){this.D=a;return false}}if(!c(this,'')){return false}break;case 2:if(!c(this,'')){return false}break}this.D=a;return true};b.prototype.r_verb_suffix=b.prototype.Z;function F(a){var e;var l;var d;var j;var g;var h;var m;var n;var k;l=a.A-(m=a._);if(m<a.I_pV){return false}n=a._=a.I_pV;d=a.D;a.D=n;k=a._=a.A-l;a.C=k;e=f(a,b.a_4,94);if(e===0){a.D=d;return false}a.B=a._;switch(e){case 0:a.D=d;return false;case 1:g=true;a:while(g===true){g=false;j=a.A-a._;h=true;b:while(h===true){h=false;if(!p(a,b.g_v,97,259)){break b}break a}a._=a.A-j;if(!i(a,1,'u')){a.D=d;return false}}if(!c(a,'')){return false}break;case 2:if(!c(a,'')){return false}break}a.D=d;return true};b.prototype.a=function(){var a;var d;this.C=this._;a=f(this,b.a_5,5);if(a===0){return false}this.B=d=this._;if(!(!(this.I_pV<=d)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break}return true};b.prototype.r_vowel_suffix=b.prototype.a;function B(a){var d;var e;a.C=a._;d=f(a,b.a_5,5);if(d===0){return false}a.B=e=a._;if(!(!(a.I_pV<=e)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break}return true};b.prototype.J=function(){var n;var j;var k;var l;var m;var o;var p;var b;var c;var d;var e;var f;var a;var g;var h;var i;var r;var s;var t;var u;var v;var w;var x;var y;var q;n=this._;b=true;a:while(b===true){b=false;if(!G(this)){break a}}r=this._=n;j=r;c=true;a:while(c===true){c=false;if(!H(this)){break a}}s=this._=j;this.D=s;u=this._=t=this.A;k=t-u;d=true;a:while(d===true){d=false;if(!J(this)){break a}}w=this._=(v=this.A)-k;l=v-w;e=true;a:while(e===true){e=false;if(!K(this)){break a}}y=this._=(x=this.A)-l;m=x-y;f=true;a:while(f===true){f=false;a=true;b:while(a===true){a=false;o=this.A-this._;g=true;c:while(g===true){g=false;if(!this.B_standard_suffix_removed){break c}break b}this._=this.A-o;if(!F(this)){break a}}}this._=this.A-m;h=true;a:while(h===true){h=false;if(!B(this)){break a}}q=this._=this.D;p=q;i=true;a:while(i===true){i=false;if(!I(this)){break a}}this._=p;return true};b.prototype.stem=b.prototype.J;b.prototype.N=function(a){return a instanceof b};b.prototype.equals=b.prototype.N;b.prototype.O=function(){var c;var a;var b;var d;c='RomanianStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};b.prototype.hashCode=b.prototype.O;b.serialVersionUID=1;h(b,'methodObject',function(){return new b});h(b,'a_0',function(){return[new a('',-1,3),new a('I',0,1),new a('U',0,2)]});h(b,'a_1',function(){return[new a('ea',-1,3),new a('aţia',-1,7),new a('aua',-1,2),new a('iua',-1,4),new a('aţie',-1,7),new a('ele',-1,3),new a('ile',-1,5),new a('iile',6,4),new a('iei',-1,4),new a('atei',-1,6),new a('ii',-1,4),new a('ului',-1,1),new a('ul',-1,1),new a('elor',-1,3),new a('ilor',-1,4),new a('iilor',14,4)]});h(b,'a_2',function(){return[new a('icala',-1,4),new a('iciva',-1,4),new a('ativa',-1,5),new a('itiva',-1,6),new a('icale',-1,4),new a('aţiune',-1,5),new a('iţiune',-1,6),new a('atoare',-1,5),new a('itoare',-1,6),new a('ătoare',-1,5),new a('icitate',-1,4),new a('abilitate',-1,1),new a('ibilitate',-1,2),new a('ivitate',-1,3),new a('icive',-1,4),new a('ative',-1,5),new a('itive',-1,6),new a('icali',-1,4),new a('atori',-1,5),new a('icatori',18,4),new a('itori',-1,6),new a('ători',-1,5),new a('icitati',-1,4),new a('abilitati',-1,1),new a('ivitati',-1,3),new a('icivi',-1,4),new a('ativi',-1,5),new a('itivi',-1,6),new a('icităi',-1,4),new a('abilităi',-1,1),new a('ivităi',-1,3),new a('icităţi',-1,4),new a('abilităţi',-1,1),new a('ivităţi',-1,3),new a('ical',-1,4),new a('ator',-1,5),new a('icator',35,4),new a('itor',-1,6),new a('ător',-1,5),new a('iciv',-1,4),new a('ativ',-1,5),new a('itiv',-1,6),new a('icală',-1,4),new a('icivă',-1,4),new a('ativă',-1,5),new a('itivă',-1,6)]});h(b,'a_3',function(){return[new a('ica',-1,1),new a('abila',-1,1),new a('ibila',-1,1),new a('oasa',-1,1),new a('ata',-1,1),new a('ita',-1,1),new a('anta',-1,1),new a('ista',-1,3),new a('uta',-1,1),new a('iva',-1,1),new a('ic',-1,1),new a('ice',-1,1),new a('abile',-1,1),new a('ibile',-1,1),new a('isme',-1,3),new a('iune',-1,2),new a('oase',-1,1),new a('ate',-1,1),new a('itate',17,1),new a('ite',-1,1),new a('ante',-1,1),new a('iste',-1,3),new a('ute',-1,1),new a('ive',-1,1),new a('ici',-1,1),new a('abili',-1,1),new a('ibili',-1,1),new a('iuni',-1,2),new a('atori',-1,1),new a('osi',-1,1),new a('ati',-1,1),new a('itati',30,1),new a('iti',-1,1),new a('anti',-1,1),new a('isti',-1,3),new a('uti',-1,1),new a('işti',-1,3),new a('ivi',-1,1),new a('ităi',-1,1),new a('oşi',-1,1),new a('ităţi',-1,1),new a('abil',-1,1),new a('ibil',-1,1),new a('ism',-1,3),new a('ator',-1,1),new a('os',-1,1),new a('at',-1,1),new a('it',-1,1),new a('ant',-1,1),new a('ist',-1,3),new a('ut',-1,1),new a('iv',-1,1),new a('ică',-1,1),new a('abilă',-1,1),new a('ibilă',-1,1),new a('oasă',-1,1),new a('ată',-1,1),new a('ită',-1,1),new a('antă',-1,1),new a('istă',-1,3),new a('ută',-1,1),new a('ivă',-1,1)]});h(b,'a_4',function(){return[new a('ea',-1,1),new a('ia',-1,1),new a('esc',-1,1),new a('ăsc',-1,1),new a('ind',-1,1),new a('ând',-1,1),new a('are',-1,1),new a('ere',-1,1),new a('ire',-1,1),new a('âre',-1,1),new a('se',-1,2),new a('ase',10,1),new a('sese',10,2),new a('ise',10,1),new a('use',10,1),new a('âse',10,1),new a('eşte',-1,1),new a('ăşte',-1,1),new a('eze',-1,1),new a('ai',-1,1),new a('eai',19,1),new a('iai',19,1),new a('sei',-1,2),new a('eşti',-1,1),new a('ăşti',-1,1),new a('ui',-1,1),new a('ezi',-1,1),new a('âi',-1,1),new a('aşi',-1,1),new a('seşi',-1,2),new a('aseşi',29,1),new a('seseşi',29,2),new a('iseşi',29,1),new a('useşi',29,1),new a('âseşi',29,1),new a('işi',-1,1),new a('uşi',-1,1),new a('âşi',-1,1),new a('aţi',-1,2),new a('eaţi',38,1),new a('iaţi',38,1),new a('eţi',-1,2),new a('iţi',-1,2),new a('âţi',-1,2),new a('arăţi',-1,1),new a('serăţi',-1,2),new a('aserăţi',45,1),new a('seserăţi',45,2),new a('iserăţi',45,1),new a('userăţi',45,1),new a('âserăţi',45,1),new a('irăţi',-1,1),new a('urăţi',-1,1),new a('ârăţi',-1,1),new a('am',-1,1),new a('eam',54,1),new a('iam',54,1),new a('em',-1,2),new a('asem',57,1),new a('sesem',57,2),new a('isem',57,1),new a('usem',57,1),new a('âsem',57,1),new a('im',-1,2),new a('âm',-1,2),new a('ăm',-1,2),new a('arăm',65,1),new a('serăm',65,2),new a('aserăm',67,1),new a('seserăm',67,2),new a('iserăm',67,1),new a('userăm',67,1),new a('âserăm',67,1),new a('irăm',65,1),new a('urăm',65,1),new a('ârăm',65,1),new a('au',-1,1),new a('eau',76,1),new a('iau',76,1),new a('indu',-1,1),new a('ându',-1,1),new a('ez',-1,1),new a('ească',-1,1),new a('ară',-1,1),new a('seră',-1,2),new a('aseră',84,1),new a('seseră',84,2),new a('iseră',84,1),new a('useră',84,1),new a('âseră',84,1),new a('iră',-1,1),new a('ură',-1,1),new a('âră',-1,1),new a('ează',-1,1)]});h(b,'a_5',function(){return[new a('a',-1,1),new a('e',-1,1),new a('ie',1,1),new a('i',-1,1),new a('ă',-1,1)]});h(b,'g_v',function(){return[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,2,32,0,0,4]});var r={'src/stemmer.jsx':{Stemmer:n},'src/romanian-stemmer.jsx':{RomanianStemmer:b}}}(JSX))
@@ -29,12 +28,12 @@ class SearchRomanian(SearchLanguage):
language_name = 'Romanian'
js_stemmer_rawcode = 'romanian-stemmer.js'
js_stemmer_code = js_stemmer
- stopwords = set() # type: Set[unicode]
+ stopwords = set() # type: Set[str]
def init(self, options):
# type: (Dict) -> None
self.stemmer = snowballstemmer.stemmer('romanian')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/ru.py b/sphinx/search/ru.py
index 62c178544..e3a9db7ce 100644
--- a/sphinx/search/ru.py
+++ b/sphinx/search/ru.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
- from sphinx.util.typing import unicode # NOQA
russian_stopwords = parse_stop_word(u'''
@@ -266,5 +265,5 @@ class SearchRussian(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('russian')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/sv.py b/sphinx/search/sv.py
index 06f69269f..80911beb6 100644
--- a/sphinx/search/sv.py
+++ b/sphinx/search/sv.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any
- from sphinx.util.typing import unicode # NOQA
swedish_stopwords = parse_stop_word(u'''
| source: http://snowball.tartarus.org/algorithms/swedish/stop.txt
@@ -154,5 +153,5 @@ class SearchSwedish(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('swedish')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/tr.py b/sphinx/search/tr.py
index 8a182ff18..3574f1e20 100644
--- a/sphinx/search/tr.py
+++ b/sphinx/search/tr.py
@@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Dict, Set # NOQA
- from sphinx.util.typing import unicode # NOQA
js_stemmer = u"""
var JSX={};(function(q){function r(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function Q(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function j(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function R(a,b,c){return a[b]=a[b]/c|0}var M=parseInt;var K=parseFloat;function P(a){return a!==a}var A=isFinite;var G=encodeURIComponent;var F=decodeURIComponent;var E=encodeURI;var D=decodeURI;var C=Object.prototype.toString;var H=Object.prototype.hasOwnProperty;function p(){}q.require=function(b){var a=y[b];return a!==undefined?a:null};q.profilerIsRunning=function(){return p.getResults!=null};q.getProfileResults=function(){return(p.getResults||function(){return{}})()};q.postProfileResults=function(a,b){if(p.postResults==null)throw new Error('profiler has not been turned on');return p.postResults(a,b)};q.resetProfileResults=function(){if(p.resetResults==null)throw new Error('profiler has not been turned on');return p.resetResults()};q.DEBUG=false;function I(){};r([I],Error);function d(a,b,c){this.G=a.length;this.A_=a;this.D_=b;this.J=c;this.I=null;this.E_=null};r([d],Object);function u(){};r([u],Object);function m(){var a;var b;var c;this.F={};a=this.E='';b=this._=0;c=this.A=a.length;this.D=0;this.B=b;this.C=c};r([m],u);function B(a,b){a.E=b.E;a._=b._;a.A=b.A;a.D=b.D;a.B=b.B;a.C=b.C};function v(b,d,c,e){var a;if(b._>=b.A){return false}a=b.E.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function f(b,d,c,e){var a;if(b._<=b.D){return false}a=b.E.charCodeAt(b._-1);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._--;return true};function t(a,d,c,e){var b;if(a._<=a.D){return false}b=a.E.charCodeAt(a._-1);if(b>e||b<c){a._--;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._--;return true}return false};function s(a,b,d){var c;if(a.A-a._<b){return false}if(a.E.slice(c=a._,c+b)!==d){return false}a._+=b;return true};function g(a,b,d){var c;if(a._-a.D<b){return false}if(a.E.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function b(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.D;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.G-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.E.charCodeAt(e-1-c)-a.A_.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.G){d._=e-a.G|0;if(a.I==null){return a.J}o=a.I(d);d._=e-a.G|0;if(o){return a.J}}b=a.D_;if(b<0){return 0}}return-1};function n(a,b,d,e){var c;c=e.length-(d-b);a.E=a.E.slice(0,b)+e+a.E.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function e(a,f){var b;var c;var d;var e;b=false;if((c=a.B)<0||c>(d=a.C)||d>(e=a.A)||e>a.E.length?false:true){n(a,a.B,a.C,f);b=true}return b};m.prototype.H=function(){return false};m.prototype.B_=function(b){var a;var c;var d;var e;a=this.F['.'+b];if(a==null){c=this.E=b;d=this._=0;e=this.A=c.length;this.D=0;this.B=d;this.C=e;this.H();a=this.E;this.F['.'+b]=a}return a};m.prototype.stemWord=m.prototype.B_;m.prototype.C_=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.F['.'+c];if(a==null){f=this.E=c;g=this._=0;h=this.A=f.length;this.D=0;this.B=g;this.C=h;this.H();a=this.E;this.F['.'+c]=a}d.push(a)}return d};m.prototype.stemWords=m.prototype.C_;function a(){m.call(this);this.B_continue_stemming_noun_suffixes=false;this.I_strlen=0};r([a],m);a.prototype.K=function(a){this.B_continue_stemming_noun_suffixes=a.B_continue_stemming_noun_suffixes;this.I_strlen=a.I_strlen;B(this,a)};a.prototype.copy_from=a.prototype.K;a.prototype.O=function(){var E;var q;var b;var e;var h;var i;var j;var k;var l;var m;var n;var o;var p;var c;var r;var s;var t;var u;var d;var v;var w;var x;var y;var z;var A;var B;var C;var D;var G;var H;var I;var J;var K;var L;var M;var N;var F;E=this.A-this._;b:while(true){q=this.A-this._;o=true;a:while(o===true){o=false;if(!f(this,a.g_vowel,97,305)){break a}this._=this.A-q;break b}G=this._=this.A-q;if(G<=this.D){return false}this._--}p=true;a:while(p===true){p=false;b=this.A-this._;c=true;b:while(c===true){c=false;if(!g(this,1,'a')){break b}c:while(true){e=this.A-this._;r=true;d:while(r===true){r=false;if(!f(this,a.g_vowel1,97,305)){break d}this._=this.A-e;break c}H=this._=this.A-e;if(H<=this.D){break b}this._--}break a}this._=this.A-b;s=true;b:while(s===true){s=false;if(!g(this,1,'e')){break b}c:while(true){h=this.A-this._;t=true;d:while(t===true){t=false;if(!f(this,a.g_vowel2,101,252)){break d}this._=this.A-h;break c}I=this._=this.A-h;if(I<=this.D){break b}this._--}break a}this._=this.A-b;u=true;b:while(u===true){u=false;if(!g(this,1,'ı')){break b}c:while(true){i=this.A-this._;d=true;d:while(d===true){d=false;if(!f(this,a.g_vowel3,97,305)){break d}this._=this.A-i;break c}J=this._=this.A-i;if(J<=this.D){break b}this._--}break a}this._=this.A-b;v=true;b:while(v===true){v=false;if(!g(this,1,'i')){break b}c:while(true){j=this.A-this._;w=true;d:while(w===true){w=false;if(!f(this,a.g_vowel4,101,105)){break d}this._=this.A-j;break c}K=this._=this.A-j;if(K<=this.D){break b}this._--}break a}this._=this.A-b;x=true;b:while(x===true){x=false;if(!g(this,1,'o')){break b}c:while(true){k=this.A-this._;y=true;d:while(y===true){y=false;if(!f(this,a.g_vowel5,111,117)){break d}this._=this.A-k;break c}L=this._=this.A-k;if(L<=this.D){break b}this._--}break a}this._=this.A-b;z=true;b:while(z===true){z=false;if(!g(this,1,'ö')){break b}c:while(true){l=this.A-this._;A=true;d:while(A===true){A=false;if(!f(this,a.g_vowel6,246,252)){break d}this._=this.A-l;break c}M=this._=this.A-l;if(M<=this.D){break b}this._--}break a}this._=this.A-b;B=true;b:while(B===true){B=false;if(!g(this,1,'u')){break b}c:while(true){m=this.A-this._;C=true;d:while(C===true){C=false;if(!f(this,a.g_vowel5,111,117)){break d}this._=this.A-m;break c}N=this._=this.A-m;if(N<=this.D){break b}this._--}break a}this._=this.A-b;if(!g(this,1,'ü')){return false}b:while(true){n=this.A-this._;D=true;c:while(D===true){D=false;if(!f(this,a.g_vowel6,246,252)){break c}this._=this.A-n;break b}F=this._=this.A-n;if(F<=this.D){return false}this._--}}this._=this.A-E;return true};a.prototype.r_check_vowel_harmony=a.prototype.O;function c(b){var F;var r;var c;var e;var h;var i;var j;var k;var l;var m;var n;var o;var p;var q;var d;var s;var t;var u;var v;var w;var x;var y;var z;var A;var B;var C;var D;var E;var H;var I;var J;var K;var L;var M;var N;var O;var G;F=b.A-b._;b:while(true){r=b.A-b._;o=true;a:while(o===true){o=false;if(!f(b,a.g_vowel,97,305)){break a}b._=b.A-r;break b}H=b._=b.A-r;if(H<=b.D){return false}b._--}p=true;a:while(p===true){p=false;c=b.A-b._;q=true;b:while(q===true){q=false;if(!g(b,1,'a')){break b}c:while(true){e=b.A-b._;d=true;d:while(d===true){d=false;if(!f(b,a.g_vowel1,97,305)){break d}b._=b.A-e;break c}I=b._=b.A-e;if(I<=b.D){break b}b._--}break a}b._=b.A-c;s=true;b:while(s===true){s=false;if(!g(b,1,'e')){break b}c:while(true){h=b.A-b._;t=true;d:while(t===true){t=false;if(!f(b,a.g_vowel2,101,252)){break d}b._=b.A-h;break c}J=b._=b.A-h;if(J<=b.D){break b}b._--}break a}b._=b.A-c;u=true;b:while(u===true){u=false;if(!g(b,1,'ı')){break b}c:while(true){i=b.A-b._;v=true;d:while(v===true){v=false;if(!f(b,a.g_vowel3,97,305)){break d}b._=b.A-i;break c}K=b._=b.A-i;if(K<=b.D){break b}b._--}break a}b._=b.A-c;w=true;b:while(w===true){w=false;if(!g(b,1,'i')){break b}c:while(true){j=b.A-b._;x=true;d:while(x===true){x=false;if(!f(b,a.g_vowel4,101,105)){break d}b._=b.A-j;break c}L=b._=b.A-j;if(L<=b.D){break b}b._--}break a}b._=b.A-c;y=true;b:while(y===true){y=false;if(!g(b,1,'o')){break b}c:while(true){k=b.A-b._;z=true;d:while(z===true){z=false;if(!f(b,a.g_vowel5,111,117)){break d}b._=b.A-k;break c}M=b._=b.A-k;if(M<=b.D){break b}b._--}break a}b._=b.A-c;A=true;b:while(A===true){A=false;if(!g(b,1,'ö')){break b}c:while(true){l=b.A-b._;B=true;d:while(B===true){B=false;if(!f(b,a.g_vowel6,246,252)){break d}b._=b.A-l;break c}N=b._=b.A-l;if(N<=b.D){break b}b._--}break a}b._=b.A-c;C=true;b:while(C===true){C=false;if(!g(b,1,'u')){break b}c:while(true){m=b.A-b._;D=true;d:while(D===true){D=false;if(!f(b,a.g_vowel5,111,117)){break d}b._=b.A-m;break c}O=b._=b.A-m;if(O<=b.D){break b}b._--}break a}b._=b.A-c;if(!g(b,1,'ü')){return false}b:while(true){n=b.A-b._;E=true;c:while(E===true){E=false;if(!f(b,a.g_vowel6,246,252)){break c}b._=b.A-n;break b}G=b._=b.A-n;if(G<=b.D){return false}b._--}}b._=b.A-F;return true};a.prototype.j=function(){var k;var h;var l;var i;var m;var j;var b;var e;var d;var n;var o;var p;var q;var c;b=true;b:while(b===true){b=false;k=this.A-this._;e=true;a:while(e===true){e=false;h=this.A-this._;if(!g(this,1,'n')){break a}n=this._=this.A-h;if(n<=this.D){break a}this._--;l=this.A-this._;if(!f(this,a.g_vowel,97,305)){break a}this._=this.A-l;break b}p=this._=(o=this.A)-k;i=o-p;d=true;a:while(d===true){d=false;m=this.A-this._;if(!g(this,1,'n')){break a}this._=this.A-m;return false}c=this._=(q=this.A)-i;j=q-c;if(c<=this.D){return false}this._--;if(!f(this,a.g_vowel,97,305)){return false}this._=this.A-j}return true};a.prototype.r_mark_suffix_with_optional_n_consonant=a.prototype.j;function o(b){var i;var m;var l;var j;var n;var k;var c;var e;var d;var o;var p;var q;var r;var h;c=true;b:while(c===true){c=false;i=b.A-b._;e=true;a:while(e===true){e=false;m=b.A-b._;if(!g(b,1,'n')){break a}o=b._=b.A-m;if(o<=b.D){break a}b._--;l=b.A-b._;if(!f(b,a.g_vowel,97,305)){break a}b._=b.A-l;break b}q=b._=(p=b.A)-i;j=p-q;d=true;a:while(d===true){d=false;n=b.A-b._;if(!g(b,1,'n')){break a}b._=b.A-n;return false}h=b._=(r=b.A)-j;k=r-h;if(h<=b.D){return false}b._--;if(!f(b,a.g_vowel,97,305)){return false}b._=b.A-k}return true};a.prototype.k=function(){var k;var h;var l;var i;var m;var j;var b;var e;var d;var n;var o;var p;var q;var c;b=true;b:while(b===true){b=false;k=this.A-this._;e=true;a:while(e===true){e=false;h=this.A-this._;if(!g(this,1,'s')){break a}n=this._=this.A-h;if(n<=this.D){break a}this._--;l=this.A-this._;if(!f(this,a.g_vowel,97,305)){break a}this._=this.A-l;break b}p=this._=(o=this.A)-k;i=o-p;d=true;a:while(d===true){d=false;m=this.A-this._;if(!g(this,1,'s')){break a}this._=this.A-m;return false}c=this._=(q=this.A)-i;j=q-c;if(c<=this.D){return false}this._--;if(!f(this,a.g_vowel,97,305)){return false}this._=this.A-j}return true};a.prototype.r_mark_suffix_with_optional_s_consonant=a.prototype.k;function l(b){var i;var m;var l;var j;var n;var k;var c;var e;var d;var o;var p;var q;var r;var h;c=true;b:while(c===true){c=false;i=b.A-b._;e=true;a:while(e===true){e=false;m=b.A-b._;if(!g(b,1,'s')){break a}o=b._=b.A-m;if(o<=b.D){break a}b._--;l=b.A-b._;if(!f(b,a.g_vowel,97,305)){break a}b._=b.A-l;break b}q=b._=(p=b.A)-i;j=p-q;d=true;a:while(d===true){d=false;n=b.A-b._;if(!g(b,1,'s')){break a}b._=b.A-n;return false}h=b._=(r=b.A)-j;k=r-h;if(h<=b.D){return false}b._--;if(!f(b,a.g_vowel,97,305)){return false}b._=b.A-k}return true};a.prototype.l=function(){var k;var h;var l;var i;var m;var j;var b;var e;var d;var n;var o;var p;var q;var c;b=true;b:while(b===true){b=false;k=this.A-this._;e=true;a:while(e===true){e=false;h=this.A-this._;if(!g(this,1,'y')){break a}n=this._=this.A-h;if(n<=this.D){break a}this._--;l=this.A-this._;if(!f(this,a.g_vowel,97,305)){break a}this._=this.A-l;break b}p=this._=(o=this.A)-k;i=o-p;d=true;a:while(d===true){d=false;m=this.A-this._;if(!g(this,1,'y')){break a}this._=this.A-m;return false}c=this._=(q=this.A)-i;j=q-c;if(c<=this.D){return false}this._--;if(!f(this,a.g_vowel,97,305)){return false}this._=this.A-j}return true};a.prototype.r_mark_suffix_with_optional_y_consonant=a.prototype.l;function h(b){var i;var m;var l;var j;var n;var k;var c;var e;var d;var o;var p;var q;var r;var h;c=true;b:while(c===true){c=false;i=b.A-b._;e=true;a:while(e===true){e=false;m=b.A-b._;if(!g(b,1,'y')){break a}o=b._=b.A-m;if(o<=b.D){break a}b._--;l=b.A-b._;if(!f(b,a.g_vowel,97,305)){break a}b._=b.A-l;break b}q=b._=(p=b.A)-i;j=p-q;d=true;a:while(d===true){d=false;n=b.A-b._;if(!g(b,1,'y')){break a}b._=b.A-n;return false}h=b._=(r=b.A)-j;k=r-h;if(h<=b.D){return false}b._--;if(!f(b,a.g_vowel,97,305)){return false}b._=b.A-k}return true};a.prototype.i=function(){var j;var g;var k;var h;var l;var i;var b;var e;var d;var m;var n;var o;var p;var c;b=true;b:while(b===true){b=false;j=this.A-this._;e=true;a:while(e===true){e=false;g=this.A-this._;if(!f(this,a.g_U,105,305)){break a}m=this._=this.A-g;if(m<=this.D){break a}this._--;k=this.A-this._;if(!t(this,a.g_vowel,97,305)){break a}this._=this.A-k;break b}o=this._=(n=this.A)-j;h=n-o;d=true;a:while(d===true){d=false;l=this.A-this._;if(!f(this,a.g_U,105,305)){break a}this._=this.A-l;return false}c=this._=(p=this.A)-h;i=p-c;if(c<=this.D){return false}this._--;if(!t(this,a.g_vowel,97,305)){return false}this._=this.A-i}return true};a.prototype.r_mark_suffix_with_optional_U_vowel=a.prototype.i;function k(b){var h;var l;var k;var i;var m;var j;var c;var e;var d;var n;var o;var p;var q;var g;c=true;b:while(c===true){c=false;h=b.A-b._;e=true;a:while(e===true){e=false;l=b.A-b._;if(!f(b,a.g_U,105,305)){break a}n=b._=b.A-l;if(n<=b.D){break a}b._--;k=b.A-b._;if(!t(b,a.g_vowel,97,305)){break a}b._=b.A-k;break b}p=b._=(o=b.A)-h;i=o-p;d=true;a:while(d===true){d=false;m=b.A-b._;if(!f(b,a.g_U,105,305)){break a}b._=b.A-m;return false}g=b._=(q=b.A)-i;j=q-g;if(g<=b.D){return false}b._--;if(!t(b,a.g_vowel,97,305)){return false}b._=b.A-j}return true};a.prototype.e=function(){return b(this,a.a_0,10)===0?false:!k(this)?false:true};a.prototype.r_mark_possessives=a.prototype.e;a.prototype.f=function(){return!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true};a.prototype.r_mark_sU=a.prototype.f;a.prototype.W=function(){return b(this,a.a_1,2)===0?false:true};a.prototype.r_mark_lArI=a.prototype.W;a.prototype.o=function(){return!c(this)?false:!f(this,a.g_U,105,305)?false:!h(this)?false:true};a.prototype.r_mark_yU=a.prototype.o;a.prototype.Y=function(){return!c(this)?false:b(this,a.a_2,4)===0?false:true};a.prototype.r_mark_nU=a.prototype.Y;a.prototype.Z=function(){return!c(this)?false:b(this,a.a_3,4)===0?false:!o(this)?false:true};a.prototype.r_mark_nUn=a.prototype.Z;a.prototype.m=function(){return!c(this)?false:b(this,a.a_4,2)===0?false:!h(this)?false:true};a.prototype.r_mark_yA=a.prototype.m;a.prototype.X=function(){return!c(this)?false:b(this,a.a_5,2)===0?false:true};a.prototype.r_mark_nA=a.prototype.X;a.prototype.Q=function(){return!c(this)?false:b(this,a.a_6,4)===0?false:true};a.prototype.r_mark_DA=a.prototype.Q;a.prototype.c=function(){return!c(this)?false:b(this,a.a_7,2)===0?false:true};a.prototype.r_mark_ndA=a.prototype.c;a.prototype.R=function(){return!c(this)?false:b(this,a.a_8,4)===0?false:true};a.prototype.r_mark_DAn=a.prototype.R;a.prototype.d=function(){return!c(this)?false:b(this,a.a_9,2)===0?false:true};a.prototype.r_mark_ndAn=a.prototype.d;a.prototype.s=function(){return!c(this)?false:b(this,a.a_10,2)===0?false:!h(this)?false:true};a.prototype.r_mark_ylA=a.prototype.s;a.prototype.U=function(){return!g(this,2,'ki')?false:true};a.prototype.r_mark_ki=a.prototype.U;a.prototype.b=function(){return!c(this)?false:b(this,a.a_11,2)===0?false:!o(this)?false:true};a.prototype.r_mark_ncA=a.prototype.b;a.prototype.p=function(){return!c(this)?false:b(this,a.a_12,4)===0?false:!h(this)?false:true};a.prototype.r_mark_yUm=a.prototype.p;a.prototype.g=function(){return!c(this)?false:b(this,a.a_13,4)===0?false:true};a.prototype.r_mark_sUn=a.prototype.g;a.prototype.q=function(){return!c(this)?false:b(this,a.a_14,4)===0?false:!h(this)?false:true};a.prototype.r_mark_yUz=a.prototype.q;a.prototype.h=function(){return b(this,a.a_15,4)===0?false:true};a.prototype.r_mark_sUnUz=a.prototype.h;a.prototype.V=function(){return!c(this)?false:b(this,a.a_16,2)===0?false:true};a.prototype.r_mark_lAr=a.prototype.V;a.prototype.a=function(){return!c(this)?false:b(this,a.a_17,4)===0?false:true};a.prototype.r_mark_nUz=a.prototype.a;a.prototype.S=function(){return!c(this)?false:b(this,a.a_18,8)===0?false:true};a.prototype.r_mark_DUr=a.prototype.S;a.prototype.T=function(){return b(this,a.a_19,2)===0?false:true};a.prototype.r_mark_cAsInA=a.prototype.T;a.prototype.n=function(){return!c(this)?false:b(this,a.a_20,32)===0?false:!h(this)?false:true};a.prototype.r_mark_yDU=a.prototype.n;a.prototype.u=function(){return b(this,a.a_21,8)===0?false:!h(this)?false:true};a.prototype.r_mark_ysA=a.prototype.u;a.prototype.t=function(){return!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true};a.prototype.r_mark_ymUs_=a.prototype.t;a.prototype.r=function(){return!g(this,3,'ken')?false:!h(this)?false:true};a.prototype.r_mark_yken=a.prototype.r;a.prototype.y=function(){var i;var j;var d;var Y;var k;var X;var l;var W;var V;var f;var r;var s;var t;var u;var v;var w;var x;var y;var z;var A;var B;var C;var m;var E;var F;var G;var H;var I;var J;var K;var L;var M;var N;var O;var P;var Q;var R;var S;var T;var U;var p;var o;var D;var n;var q;this.C=this._;this.B_continue_stemming_noun_suffixes=true;r=true;a:while(r===true){r=false;i=this.A-this._;s=true;d:while(s===true){s=false;t=true;b:while(t===true){t=false;j=this.A-this._;u=true;c:while(u===true){u=false;if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-j;v=true;c:while(v===true){v=false;if(!(!c(this)?false:b(this,a.a_20,32)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-j;w=true;c:while(w===true){w=false;if(!(b(this,a.a_21,8)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-j;if(!(!g(this,3,'ken')?false:!h(this)?false:true)){break d}}break a}this._=this.A-i;x=true;c:while(x===true){x=false;if(!(b(this,a.a_19,2)===0?false:true)){break c}y=true;b:while(y===true){y=false;d=this.A-this._;z=true;d:while(z===true){z=false;if(!(b(this,a.a_15,4)===0?false:true)){break d}break b}this._=this.A-d;A=true;d:while(A===true){A=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break d}break b}this._=this.A-d;B=true;d:while(B===true){B=false;if(!(!c(this)?false:b(this,a.a_12,4)===0?false:!h(this)?false:true)){break d}break b}this._=this.A-d;C=true;d:while(C===true){C=false;if(!(!c(this)?false:b(this,a.a_13,4)===0?false:true)){break d}break b}this._=this.A-d;m=true;d:while(m===true){m=false;if(!(!c(this)?false:b(this,a.a_14,4)===0?false:!h(this)?false:true)){break d}break b}this._=this.A-d}if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){break c}break a}this._=this.A-i;E=true;c:while(E===true){E=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break c}this.B=this._;if(!e(this,'')){return false}Y=this.A-this._;F=true;d:while(F===true){F=false;this.C=this._;G=true;b:while(G===true){G=false;k=this.A-this._;H=true;e:while(H===true){H=false;if(!(!c(this)?false:b(this,a.a_18,8)===0?false:true)){break e}break b}this._=this.A-k;I=true;e:while(I===true){I=false;if(!(!c(this)?false:b(this,a.a_20,32)===0?false:!h(this)?false:true)){break e}break b}this._=this.A-k;J=true;e:while(J===true){J=false;if(!(b(this,a.a_21,8)===0?false:!h(this)?false:true)){break e}break b}this._=this.A-k;if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){this._=this.A-Y;break d}}}this.B_continue_stemming_noun_suffixes=false;break a}this._=this.A-i;K=true;b:while(K===true){K=false;if(!(!c(this)?false:b(this,a.a_17,4)===0?false:true)){break b}L=true;c:while(L===true){L=false;X=this.A-this._;M=true;d:while(M===true){M=false;if(!(!c(this)?false:b(this,a.a_20,32)===0?false:!h(this)?false:true)){break d}break c}this._=this.A-X;if(!(b(this,a.a_21,8)===0?false:!h(this)?false:true)){break b}}break a}this._=this.A-i;N=true;c:while(N===true){N=false;O=true;b:while(O===true){O=false;l=this.A-this._;P=true;d:while(P===true){P=false;if(!(b(this,a.a_15,4)===0?false:true)){break d}break b}this._=this.A-l;Q=true;d:while(Q===true){Q=false;if(!(!c(this)?false:b(this,a.a_14,4)===0?false:!h(this)?false:true)){break d}break b}this._=this.A-l;R=true;d:while(R===true){R=false;if(!(!c(this)?false:b(this,a.a_13,4)===0?false:true)){break d}break b}this._=this.A-l;if(!(!c(this)?false:b(this,a.a_12,4)===0?false:!h(this)?false:true)){break c}}this.B=this._;if(!e(this,'')){return false}W=this.A-this._;S=true;b:while(S===true){S=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){this._=this.A-W;break b}}break a}this._=this.A-i;if(!(!c(this)?false:b(this,a.a_18,8)===0?false:true)){return false}this.B=this._;if(!e(this,'')){return false}V=this.A-this._;T=true;d:while(T===true){T=false;this.C=this._;U=true;b:while(U===true){U=false;f=this.A-this._;p=true;c:while(p===true){p=false;if(!(b(this,a.a_15,4)===0?false:true)){break c}break b}this._=this.A-f;o=true;c:while(o===true){o=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break c}break b}this._=this.A-f;D=true;c:while(D===true){D=false;if(!(!c(this)?false:b(this,a.a_12,4)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-f;n=true;c:while(n===true){n=false;if(!(!c(this)?false:b(this,a.a_13,4)===0?false:true)){break c}break b}this._=this.A-f;q=true;c:while(q===true){q=false;if(!(!c(this)?false:b(this,a.a_14,4)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-f}if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){this._=this.A-V;break d}}}this.B=this._;return!e(this,'')?false:true};a.prototype.r_stem_nominal_verb_suffixes=a.prototype.y;function J(d){var f;var k;var i;var Z;var l;var Y;var m;var X;var W;var j;var s;var t;var u;var v;var w;var x;var y;var z;var A;var B;var C;var n;var E;var F;var G;var H;var I;var J;var K;var L;var M;var N;var O;var P;var Q;var R;var S;var T;var U;var V;var q;var p;var D;var o;var r;d.C=d._;d.B_continue_stemming_noun_suffixes=true;s=true;a:while(s===true){s=false;f=d.A-d._;t=true;d:while(t===true){t=false;u=true;b:while(u===true){u=false;k=d.A-d._;v=true;c:while(v===true){v=false;if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-k;w=true;c:while(w===true){w=false;if(!(!c(d)?false:b(d,a.a_20,32)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-k;x=true;c:while(x===true){x=false;if(!(b(d,a.a_21,8)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-k;if(!(!g(d,3,'ken')?false:!h(d)?false:true)){break d}}break a}d._=d.A-f;y=true;c:while(y===true){y=false;if(!(b(d,a.a_19,2)===0?false:true)){break c}z=true;b:while(z===true){z=false;i=d.A-d._;A=true;d:while(A===true){A=false;if(!(b(d,a.a_15,4)===0?false:true)){break d}break b}d._=d.A-i;B=true;d:while(B===true){B=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break d}break b}d._=d.A-i;C=true;d:while(C===true){C=false;if(!(!c(d)?false:b(d,a.a_12,4)===0?false:!h(d)?false:true)){break d}break b}d._=d.A-i;n=true;d:while(n===true){n=false;if(!(!c(d)?false:b(d,a.a_13,4)===0?false:true)){break d}break b}d._=d.A-i;E=true;d:while(E===true){E=false;if(!(!c(d)?false:b(d,a.a_14,4)===0?false:!h(d)?false:true)){break d}break b}d._=d.A-i}if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){break c}break a}d._=d.A-f;F=true;c:while(F===true){F=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break c}d.B=d._;if(!e(d,'')){return false}Z=d.A-d._;G=true;d:while(G===true){G=false;d.C=d._;H=true;b:while(H===true){H=false;l=d.A-d._;I=true;e:while(I===true){I=false;if(!(!c(d)?false:b(d,a.a_18,8)===0?false:true)){break e}break b}d._=d.A-l;J=true;e:while(J===true){J=false;if(!(!c(d)?false:b(d,a.a_20,32)===0?false:!h(d)?false:true)){break e}break b}d._=d.A-l;K=true;e:while(K===true){K=false;if(!(b(d,a.a_21,8)===0?false:!h(d)?false:true)){break e}break b}d._=d.A-l;if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){d._=d.A-Z;break d}}}d.B_continue_stemming_noun_suffixes=false;break a}d._=d.A-f;L=true;b:while(L===true){L=false;if(!(!c(d)?false:b(d,a.a_17,4)===0?false:true)){break b}M=true;c:while(M===true){M=false;Y=d.A-d._;N=true;d:while(N===true){N=false;if(!(!c(d)?false:b(d,a.a_20,32)===0?false:!h(d)?false:true)){break d}break c}d._=d.A-Y;if(!(b(d,a.a_21,8)===0?false:!h(d)?false:true)){break b}}break a}d._=d.A-f;O=true;c:while(O===true){O=false;P=true;b:while(P===true){P=false;m=d.A-d._;Q=true;d:while(Q===true){Q=false;if(!(b(d,a.a_15,4)===0?false:true)){break d}break b}d._=d.A-m;R=true;d:while(R===true){R=false;if(!(!c(d)?false:b(d,a.a_14,4)===0?false:!h(d)?false:true)){break d}break b}d._=d.A-m;S=true;d:while(S===true){S=false;if(!(!c(d)?false:b(d,a.a_13,4)===0?false:true)){break d}break b}d._=d.A-m;if(!(!c(d)?false:b(d,a.a_12,4)===0?false:!h(d)?false:true)){break c}}d.B=d._;if(!e(d,'')){return false}X=d.A-d._;T=true;b:while(T===true){T=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){d._=d.A-X;break b}}break a}d._=d.A-f;if(!(!c(d)?false:b(d,a.a_18,8)===0?false:true)){return false}d.B=d._;if(!e(d,'')){return false}W=d.A-d._;U=true;d:while(U===true){U=false;d.C=d._;V=true;b:while(V===true){V=false;j=d.A-d._;q=true;c:while(q===true){q=false;if(!(b(d,a.a_15,4)===0?false:true)){break c}break b}d._=d.A-j;p=true;c:while(p===true){p=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break c}break b}d._=d.A-j;D=true;c:while(D===true){D=false;if(!(!c(d)?false:b(d,a.a_12,4)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-j;o=true;c:while(o===true){o=false;if(!(!c(d)?false:b(d,a.a_13,4)===0?false:true)){break c}break b}d._=d.A-j;r=true;c:while(r===true){r=false;if(!(!c(d)?false:b(d,a.a_14,4)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-j}if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){d._=d.A-W;break d}}}d.B=d._;return!e(d,'')?false:true};a.prototype.__=function(){var z;var N;var M;var L;var p;var K;var r;var J;var t;var u;var v;var w;var x;var y;var d;var A;var B;var C;var D;var E;var F;var G;var H;var I;var s;var q;var n;var m;var j;var h;this.C=this._;if(!(!g(this,2,'ki')?false:true)){return false}w=true;b:while(w===true){w=false;z=this.A-this._;x=true;c:while(x===true){x=false;if(!(!c(this)?false:b(this,a.a_6,4)===0?false:true)){break c}this.B=this._;if(!e(this,'')){return false}N=this.A-this._;y=true;f:while(y===true){y=false;this.C=this._;d=true;e:while(d===true){d=false;M=this.A-this._;A=true;d:while(A===true){A=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break d}this.B=this._;if(!e(this,'')){return false}L=this.A-this._;B=true;a:while(B===true){B=false;if(!i(this)){this._=this.A-L;break a}}break e}this._=this.A-M;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){this._=this.A-N;break f}this.B=this._;if(!e(this,'')){return false}p=this.A-this._;C=true;a:while(C===true){C=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-p;break a}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-p;break a}}}}break b}this._=this.A-z;D=true;d:while(D===true){D=false;if(!(!c(this)?false:b(this,a.a_3,4)===0?false:!o(this)?false:true)){break d}this.B=this._;if(!e(this,'')){return false}K=this.A-this._;E=true;e:while(E===true){E=false;this.C=this._;F=true;a:while(F===true){F=false;r=this.A-this._;G=true;c:while(G===true){G=false;if(!(b(this,a.a_1,2)===0?false:true)){break c}this.B=this._;if(!e(this,'')){return false}break a}this._=this.A-r;H=true;f:while(H===true){H=false;this.C=this._;I=true;g:while(I===true){I=false;J=this.A-this._;s=true;c:while(s===true){s=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break c}break g}this._=this.A-J;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break f}}this.B=this._;if(!e(this,'')){return false}t=this.A-this._;q=true;c:while(q===true){q=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-t;break c}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-t;break c}}break a}this._=this.A-r;if(!i(this)){this._=this.A-K;break e}}}break b}this._=this.A-z;if(!(!c(this)?false:b(this,a.a_7,2)===0?false:true)){return false}n=true;a:while(n===true){n=false;u=this.A-this._;m=true;c:while(m===true){m=false;if(!(b(this,a.a_1,2)===0?false:true)){break c}this.B=this._;if(!e(this,'')){return false}break a}this._=this.A-u;j=true;d:while(j===true){j=false;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break d}this.B=this._;if(!e(this,'')){return false}v=this.A-this._;h=true;c:while(h===true){h=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-v;break c}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-v;break c}}break a}this._=this.A-u;if(!i(this)){return false}}}return true};a.prototype.r_stem_suffix_chain_before_ki=a.prototype.__;function i(d){var j;var O;var N;var M;var q;var L;var s;var K;var u;var v;var w;var x;var y;var z;var h;var B;var C;var D;var E;var F;var G;var H;var I;var J;var t;var r;var p;var n;var m;var A;d.C=d._;if(!(!g(d,2,'ki')?false:true)){return false}x=true;b:while(x===true){x=false;j=d.A-d._;y=true;c:while(y===true){y=false;if(!(!c(d)?false:b(d,a.a_6,4)===0?false:true)){break c}d.B=d._;if(!e(d,'')){return false}O=d.A-d._;z=true;f:while(z===true){z=false;d.C=d._;h=true;e:while(h===true){h=false;N=d.A-d._;B=true;d:while(B===true){B=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break d}d.B=d._;if(!e(d,'')){return false}M=d.A-d._;C=true;a:while(C===true){C=false;if(!i(d)){d._=d.A-M;break a}}break e}d._=d.A-N;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){d._=d.A-O;break f}d.B=d._;if(!e(d,'')){return false}q=d.A-d._;D=true;a:while(D===true){D=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-q;break a}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-q;break a}}}}break b}d._=d.A-j;E=true;d:while(E===true){E=false;if(!(!c(d)?false:b(d,a.a_3,4)===0?false:!o(d)?false:true)){break d}d.B=d._;if(!e(d,'')){return false}L=d.A-d._;F=true;e:while(F===true){F=false;d.C=d._;G=true;a:while(G===true){G=false;s=d.A-d._;H=true;c:while(H===true){H=false;if(!(b(d,a.a_1,2)===0?false:true)){break c}d.B=d._;if(!e(d,'')){return false}break a}d._=d.A-s;I=true;f:while(I===true){I=false;d.C=d._;J=true;g:while(J===true){J=false;K=d.A-d._;t=true;c:while(t===true){t=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break c}break g}d._=d.A-K;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break f}}d.B=d._;if(!e(d,'')){return false}u=d.A-d._;r=true;c:while(r===true){r=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-u;break c}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-u;break c}}break a}d._=d.A-s;if(!i(d)){d._=d.A-L;break e}}}break b}d._=d.A-j;if(!(!c(d)?false:b(d,a.a_7,2)===0?false:true)){return false}p=true;a:while(p===true){p=false;v=d.A-d._;n=true;c:while(n===true){n=false;if(!(b(d,a.a_1,2)===0?false:true)){break c}d.B=d._;if(!e(d,'')){return false}break a}d._=d.A-v;m=true;d:while(m===true){m=false;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break d}d.B=d._;if(!e(d,'')){return false}w=d.A-d._;A=true;c:while(A===true){A=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-w;break c}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-w;break c}}break a}d._=d.A-v;if(!i(d)){return false}}}return true};a.prototype.z=function(){var d;var ar;var S;var j;var av;var m;var aq;var n;var p;var ax;var ay;var q;var ap;var r;var s;var as;var at;var au;var t;var aw;var u;var v;var w;var aA;var aB;var ao;var x;var y;var z;var A;var B;var C;var D;var E;var F;var G;var H;var I;var J;var K;var L;var M;var N;var O;var P;var Q;var R;var g;var T;var U;var V;var W;var X;var Y;var Z;var _;var $;var a0;var a1;var a2;var a3;var a4;var a5;var a6;var a7;var a8;var a9;var aa;var ab;var ac;var ad;var ae;var af;var ag;var ah;var ai;var aj;var ak;var al;var am;var an;var aC;var az;y=true;a:while(y===true){y=false;d=this.A-this._;z=true;b:while(z===true){z=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break b}this.B=this._;if(!e(this,'')){return false}ar=this.A-this._;A=true;c:while(A===true){A=false;if(!i(this)){this._=this.A-ar;break c}}break a}this._=this.A-d;B=true;g:while(B===true){B=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_11,2)===0?false:!o(this)?false:true)){break g}this.B=this._;if(!e(this,'')){return false}S=this.A-this._;C=true;b:while(C===true){C=false;D=true;c:while(D===true){D=false;j=this.A-this._;E=true;d:while(E===true){E=false;this.C=this._;if(!(b(this,a.a_1,2)===0?false:true)){break d}this.B=this._;if(!e(this,'')){return false}break c}this._=this.A-j;F=true;f:while(F===true){F=false;this.C=this._;G=true;d:while(G===true){G=false;av=this.A-this._;H=true;e:while(H===true){H=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break e}break d}this._=this.A-av;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break f}}this.B=this._;if(!e(this,'')){return false}m=this.A-this._;I=true;d:while(I===true){I=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-m;break d}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-m;break d}}break c}aC=this._=this.A-j;this.C=aC;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-S;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-S;break b}}}break a}this._=this.A-d;J=true;b:while(J===true){J=false;this.C=this._;K=true;d:while(K===true){K=false;aq=this.A-this._;L=true;c:while(L===true){L=false;if(!(!c(this)?false:b(this,a.a_7,2)===0?false:true)){break c}break d}this._=this.A-aq;if(!(!c(this)?false:b(this,a.a_5,2)===0?false:true)){break b}}M=true;c:while(M===true){M=false;n=this.A-this._;N=true;d:while(N===true){N=false;if(!(b(this,a.a_1,2)===0?false:true)){break d}this.B=this._;if(!e(this,'')){return false}break c}this._=this.A-n;O=true;e:while(O===true){O=false;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break e}this.B=this._;if(!e(this,'')){return false}p=this.A-this._;P=true;d:while(P===true){P=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-p;break d}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-p;break d}}break c}this._=this.A-n;if(!i(this)){break b}}break a}this._=this.A-d;Q=true;c:while(Q===true){Q=false;this.C=this._;R=true;b:while(R===true){R=false;ax=this.A-this._;g=true;d:while(g===true){g=false;if(!(!c(this)?false:b(this,a.a_9,2)===0?false:true)){break d}break b}this._=this.A-ax;if(!(!c(this)?false:b(this,a.a_2,4)===0?false:true)){break c}}T=true;d:while(T===true){T=false;ay=this.A-this._;U=true;e:while(U===true){U=false;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break e}this.B=this._;if(!e(this,'')){return false}q=this.A-this._;V=true;b:while(V===true){V=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-q;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-q;break b}}break d}this._=this.A-ay;if(!(b(this,a.a_1,2)===0?false:true)){break c}}break a}this._=this.A-d;W=true;d:while(W===true){W=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_8,4)===0?false:true)){break d}this.B=this._;if(!e(this,'')){return false}ap=this.A-this._;X=true;e:while(X===true){X=false;this.C=this._;Y=true;c:while(Y===true){Y=false;r=this.A-this._;Z=true;f:while(Z===true){Z=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break f}this.B=this._;if(!e(this,'')){return false}s=this.A-this._;_=true;b:while(_===true){_=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-s;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-s;break b}}break c}this._=this.A-r;$=true;b:while($===true){$=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break b}this.B=this._;if(!e(this,'')){return false}as=this.A-this._;a0=true;f:while(a0===true){a0=false;if(!i(this)){this._=this.A-as;break f}}break c}this._=this.A-r;if(!i(this)){this._=this.A-ap;break e}}}break a}this._=this.A-d;a1=true;d:while(a1===true){a1=false;this.C=this._;a2=true;b:while(a2===true){a2=false;at=this.A-this._;a3=true;c:while(a3===true){a3=false;if(!(!c(this)?false:b(this,a.a_3,4)===0?false:!o(this)?false:true)){break c}break b}this._=this.A-at;if(!(!c(this)?false:b(this,a.a_10,2)===0?false:!h(this)?false:true)){break d}}this.B=this._;if(!e(this,'')){return false}au=this.A-this._;a4=true;e:while(a4===true){a4=false;a5=true;c:while(a5===true){a5=false;t=this.A-this._;a6=true;b:while(a6===true){a6=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){break b}break c}this._=this.A-t;a7=true;f:while(a7===true){a7=false;this.C=this._;a8=true;b:while(a8===true){a8=false;aw=this.A-this._;a9=true;g:while(a9===true){a9=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break g}break b}this._=this.A-aw;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break f}}this.B=this._;if(!e(this,'')){return false}u=this.A-this._;aa=true;b:while(aa===true){aa=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-u;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-u;break b}}break c}this._=this.A-t;if(!i(this)){this._=this.A-au;break e}}}break a}this._=this.A-d;ab=true;b:while(ab===true){ab=false;this.C=this._;if(!(b(this,a.a_1,2)===0?false:true)){break b}this.B=this._;if(!e(this,'')){return false}break a}this._=this.A-d;ac=true;b:while(ac===true){ac=false;if(!i(this)){break b}break a}this._=this.A-d;ad=true;c:while(ad===true){ad=false;this.C=this._;ae=true;b:while(ae===true){ae=false;v=this.A-this._;af=true;d:while(af===true){af=false;if(!(!c(this)?false:b(this,a.a_6,4)===0?false:true)){break d}break b}this._=this.A-v;ag=true;d:while(ag===true){ag=false;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!h(this)?false:true)){break d}break b}this._=this.A-v;if(!(!c(this)?false:b(this,a.a_4,2)===0?false:!h(this)?false:true)){break c}}this.B=this._;if(!e(this,'')){return false}w=this.A-this._;ah=true;b:while(ah===true){ah=false;this.C=this._;ai=true;d:while(ai===true){ai=false;aA=this.A-this._;aj=true;e:while(aj===true){aj=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break e}this.B=this._;if(!e(this,'')){return false}aB=this.A-this._;ak=true;f:while(ak===true){ak=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-aB;break f}}break d}this._=this.A-aA;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-w;break b}}this.B=this._;if(!e(this,'')){return false}this.C=this._;if(!i(this)){this._=this.A-w;break b}}break a}az=this._=this.A-d;this.C=az;al=true;b:while(al===true){al=false;ao=this.A-this._;am=true;c:while(am===true){am=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break c}break b}this._=this.A-ao;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){return false}}this.B=this._;if(!e(this,'')){return false}x=this.A-this._;an=true;b:while(an===true){an=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-x;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-x;break b}}}return true};a.prototype.r_stem_noun_suffixes=a.prototype.z;function L(d){var g;var as;var S;var m;var aw;var n;var ar;var p;var q;var ay;var az;var r;var aq;var s;var t;var at;var au;var av;var u;var ax;var v;var w;var x;var aB;var aC;var ap;var y;var z;var A;var B;var C;var D;var E;var F;var G;var H;var I;var J;var K;var L;var M;var N;var O;var P;var Q;var R;var j;var T;var U;var V;var W;var X;var Y;var Z;var _;var $;var a0;var a1;var a2;var a3;var a4;var a5;var a6;var a7;var a8;var a9;var aa;var ab;var ac;var ad;var ae;var af;var ag;var ah;var ai;var aj;var ak;var al;var am;var an;var ao;var aD;var aA;z=true;a:while(z===true){z=false;g=d.A-d._;A=true;b:while(A===true){A=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break b}d.B=d._;if(!e(d,'')){return false}as=d.A-d._;B=true;c:while(B===true){B=false;if(!i(d)){d._=d.A-as;break c}}break a}d._=d.A-g;C=true;g:while(C===true){C=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_11,2)===0?false:!o(d)?false:true)){break g}d.B=d._;if(!e(d,'')){return false}S=d.A-d._;D=true;b:while(D===true){D=false;E=true;c:while(E===true){E=false;m=d.A-d._;F=true;d:while(F===true){F=false;d.C=d._;if(!(b(d,a.a_1,2)===0?false:true)){break d}d.B=d._;if(!e(d,'')){return false}break c}d._=d.A-m;G=true;f:while(G===true){G=false;d.C=d._;H=true;d:while(H===true){H=false;aw=d.A-d._;I=true;e:while(I===true){I=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break e}break d}d._=d.A-aw;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break f}}d.B=d._;if(!e(d,'')){return false}n=d.A-d._;J=true;d:while(J===true){J=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-n;break d}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-n;break d}}break c}aD=d._=d.A-m;d.C=aD;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-S;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-S;break b}}}break a}d._=d.A-g;K=true;b:while(K===true){K=false;d.C=d._;L=true;d:while(L===true){L=false;ar=d.A-d._;M=true;c:while(M===true){M=false;if(!(!c(d)?false:b(d,a.a_7,2)===0?false:true)){break c}break d}d._=d.A-ar;if(!(!c(d)?false:b(d,a.a_5,2)===0?false:true)){break b}}N=true;c:while(N===true){N=false;p=d.A-d._;O=true;d:while(O===true){O=false;if(!(b(d,a.a_1,2)===0?false:true)){break d}d.B=d._;if(!e(d,'')){return false}break c}d._=d.A-p;P=true;e:while(P===true){P=false;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break e}d.B=d._;if(!e(d,'')){return false}q=d.A-d._;Q=true;d:while(Q===true){Q=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-q;break d}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-q;break d}}break c}d._=d.A-p;if(!i(d)){break b}}break a}d._=d.A-g;R=true;c:while(R===true){R=false;d.C=d._;j=true;b:while(j===true){j=false;ay=d.A-d._;T=true;d:while(T===true){T=false;if(!(!c(d)?false:b(d,a.a_9,2)===0?false:true)){break d}break b}d._=d.A-ay;if(!(!c(d)?false:b(d,a.a_2,4)===0?false:true)){break c}}U=true;d:while(U===true){U=false;az=d.A-d._;V=true;e:while(V===true){V=false;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break e}d.B=d._;if(!e(d,'')){return false}r=d.A-d._;W=true;b:while(W===true){W=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-r;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-r;break b}}break d}d._=d.A-az;if(!(b(d,a.a_1,2)===0?false:true)){break c}}break a}d._=d.A-g;X=true;d:while(X===true){X=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_8,4)===0?false:true)){break d}d.B=d._;if(!e(d,'')){return false}aq=d.A-d._;Y=true;e:while(Y===true){Y=false;d.C=d._;Z=true;c:while(Z===true){Z=false;s=d.A-d._;_=true;f:while(_===true){_=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break f}d.B=d._;if(!e(d,'')){return false}t=d.A-d._;$=true;b:while($===true){$=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-t;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-t;break b}}break c}d._=d.A-s;a0=true;b:while(a0===true){a0=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break b}d.B=d._;if(!e(d,'')){return false}at=d.A-d._;a1=true;f:while(a1===true){a1=false;if(!i(d)){d._=d.A-at;break f}}break c}d._=d.A-s;if(!i(d)){d._=d.A-aq;break e}}}break a}d._=d.A-g;a2=true;d:while(a2===true){a2=false;d.C=d._;a3=true;b:while(a3===true){a3=false;au=d.A-d._;a4=true;c:while(a4===true){a4=false;if(!(!c(d)?false:b(d,a.a_3,4)===0?false:!o(d)?false:true)){break c}break b}d._=d.A-au;if(!(!c(d)?false:b(d,a.a_10,2)===0?false:!h(d)?false:true)){break d}}d.B=d._;if(!e(d,'')){return false}av=d.A-d._;a5=true;e:while(a5===true){a5=false;a6=true;c:while(a6===true){a6=false;u=d.A-d._;a7=true;b:while(a7===true){a7=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){break b}break c}d._=d.A-u;a8=true;f:while(a8===true){a8=false;d.C=d._;a9=true;b:while(a9===true){a9=false;ax=d.A-d._;aa=true;g:while(aa===true){aa=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break g}break b}d._=d.A-ax;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break f}}d.B=d._;if(!e(d,'')){return false}v=d.A-d._;ab=true;b:while(ab===true){ab=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-v;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-v;break b}}break c}d._=d.A-u;if(!i(d)){d._=d.A-av;break e}}}break a}d._=d.A-g;ac=true;b:while(ac===true){ac=false;d.C=d._;if(!(b(d,a.a_1,2)===0?false:true)){break b}d.B=d._;if(!e(d,'')){return false}break a}d._=d.A-g;ad=true;b:while(ad===true){ad=false;if(!i(d)){break b}break a}d._=d.A-g;ae=true;c:while(ae===true){ae=false;d.C=d._;af=true;b:while(af===true){af=false;w=d.A-d._;ag=true;d:while(ag===true){ag=false;if(!(!c(d)?false:b(d,a.a_6,4)===0?false:true)){break d}break b}d._=d.A-w;ah=true;d:while(ah===true){ah=false;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!h(d)?false:true)){break d}break b}d._=d.A-w;if(!(!c(d)?false:b(d,a.a_4,2)===0?false:!h(d)?false:true)){break c}}d.B=d._;if(!e(d,'')){return false}x=d.A-d._;ai=true;b:while(ai===true){ai=false;d.C=d._;aj=true;d:while(aj===true){aj=false;aB=d.A-d._;ak=true;e:while(ak===true){ak=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break e}d.B=d._;if(!e(d,'')){return false}aC=d.A-d._;al=true;f:while(al===true){al=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-aC;break f}}break d}d._=d.A-aB;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-x;break b}}d.B=d._;if(!e(d,'')){return false}d.C=d._;if(!i(d)){d._=d.A-x;break b}}break a}aA=d._=d.A-g;d.C=aA;am=true;b:while(am===true){am=false;ap=d.A-d._;an=true;c:while(an===true){an=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break c}break b}d._=d.A-ap;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){return false}}d.B=d._;if(!e(d,'')){return false}y=d.A-d._;ao=true;b:while(ao===true){ao=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-y;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-y;break b}}}return true};a.prototype.w=function(){var c;this.C=this._;c=b(this,a.a_23,4);if(c===0){return false}this.B=this._;switch(c){case 0:return false;case 1:if(!e(this,'p')){return false}break;case 2:if(!e(this,'ç')){return false}break;case 3:if(!e(this,'t')){return false}break;case 4:if(!e(this,'k')){return false}break}return true};a.prototype.r_post_process_last_consonants=a.prototype.w;function w(c){var d;c.C=c._;d=b(c,a.a_23,4);if(d===0){return false}c.B=c._;switch(d){case 0:return false;case 1:if(!e(c,'p')){return false}break;case 2:if(!e(c,'ç')){return false}break;case 3:if(!e(c,'t')){return false}break;case 4:if(!e(c,'k')){return false}break}return true};a.prototype.N=function(){var L;var _;var i;var Y;var B;var W;var K;var l;var S;var Q;var p;var O;var M;var s;var U;var u;var v;var w;var x;var y;var z;var A;var b;var C;var D;var j;var F;var G;var H;var I;var J;var E;var t;var r;var N;var q;var P;var o;var R;var m;var T;var k;var V;var h;var X;var e;var Z;var d;var $;var a0;var a1;var c;L=this.A-this._;u=true;a:while(u===true){u=false;_=this.A-this._;v=true;b:while(v===true){v=false;if(!g(this,1,'d')){break b}break a}this._=this.A-_;if(!g(this,1,'g')){return false}}this._=this.A-L;w=true;a:while(w===true){w=false;i=this.A-this._;x=true;b:while(x===true){x=false;Y=this.A-this._;d:while(true){B=this.A-this._;y=true;c:while(y===true){y=false;if(!f(this,a.g_vowel,97,305)){break c}this._=this.A-B;break d}V=this._=this.A-B;if(V<=this.D){break b}this._--}z=true;c:while(z===true){z=false;W=this.A-this._;A=true;d:while(A===true){A=false;if(!g(this,1,'a')){break d}break c}this._=this.A-W;if(!g(this,1,'ı')){break b}}h=this._=this.A-Y;b=h;N=h;q=n(this,h,h,'ı');if(h<=this.B){this.B+=q|0}if(N<=this.C){this.C+=q|0}this._=b;break a}this._=this.A-i;C=true;b:while(C===true){C=false;K=this.A-this._;c:while(true){l=this.A-this._;D=true;d:while(D===true){D=false;if(!f(this,a.g_vowel,97,305)){break d}this._=this.A-l;break c}X=this._=this.A-l;if(X<=this.D){break b}this._--}j=true;c:while(j===true){j=false;S=this.A-this._;F=true;d:while(F===true){F=false;if(!g(this,1,'e')){break d}break c}this._=this.A-S;if(!g(this,1,'i')){break b}}e=this._=this.A-K;b=e;P=e;o=n(this,e,e,'i');if(e<=this.B){this.B+=o|0}if(P<=this.C){this.C+=o|0}this._=b;break a}this._=this.A-i;G=true;b:while(G===true){G=false;Q=this.A-this._;c:while(true){p=this.A-this._;H=true;d:while(H===true){H=false;if(!f(this,a.g_vowel,97,305)){break d}this._=this.A-p;break c}Z=this._=this.A-p;if(Z<=this.D){break b}this._--}I=true;c:while(I===true){I=false;O=this.A-this._;J=true;d:while(J===true){J=false;if(!g(this,1,'o')){break d}break c}this._=this.A-O;if(!g(this,1,'u')){break b}}d=this._=this.A-Q;b=d;R=d;m=n(this,d,d,'u');if(d<=this.B){this.B+=m|0}if(R<=this.C){this.C+=m|0}this._=b;break a}a1=this._=(a0=this.A)-i;M=a0-a1;b:while(true){s=this.A-this._;E=true;c:while(E===true){E=false;if(!f(this,a.g_vowel,97,305)){break c}this._=this.A-s;break b}$=this._=this.A-s;if($<=this.D){return false}this._--}t=true;b:while(t===true){t=false;U=this.A-this._;r=true;c:while(r===true){r=false;if(!g(this,1,'ö')){break c}break b}this._=this.A-U;if(!g(this,1,'ü')){return false}}c=this._=this.A-M;b=c;T=c;k=n(this,c,c,'ü');if(c<=this.B){this.B+=k|0}if(T<=this.C){this.C+=k|0}this._=b}return true};a.prototype.r_append_U_to_stems_ending_with_d_or_g=a.prototype.N;function z(b){var $;var Z;var j;var X;var F;var L;var T;var m;var R;var P;var q;var N;var V;var t;var M;var v;var w;var x;var y;var z;var A;var B;var c;var D;var E;var C;var G;var H;var I;var J;var K;var u;var s;var r;var O;var p;var Q;var o;var S;var l;var U;var k;var W;var i;var Y;var h;var _;var e;var a0;var a1;var a2;var d;$=b.A-b._;v=true;a:while(v===true){v=false;Z=b.A-b._;w=true;b:while(w===true){w=false;if(!g(b,1,'d')){break b}break a}b._=b.A-Z;if(!g(b,1,'g')){return false}}b._=b.A-$;x=true;a:while(x===true){x=false;j=b.A-b._;y=true;b:while(y===true){y=false;X=b.A-b._;d:while(true){F=b.A-b._;z=true;c:while(z===true){z=false;if(!f(b,a.g_vowel,97,305)){break c}b._=b.A-F;break d}W=b._=b.A-F;if(W<=b.D){break b}b._--}A=true;c:while(A===true){A=false;L=b.A-b._;B=true;d:while(B===true){B=false;if(!g(b,1,'a')){break d}break c}b._=b.A-L;if(!g(b,1,'ı')){break b}}i=b._=b.A-X;c=i;O=i;p=n(b,i,i,'ı');if(i<=b.B){b.B+=p|0}if(O<=b.C){b.C+=p|0}b._=c;break a}b._=b.A-j;D=true;b:while(D===true){D=false;T=b.A-b._;c:while(true){m=b.A-b._;E=true;d:while(E===true){E=false;if(!f(b,a.g_vowel,97,305)){break d}b._=b.A-m;break c}Y=b._=b.A-m;if(Y<=b.D){break b}b._--}C=true;c:while(C===true){C=false;R=b.A-b._;G=true;d:while(G===true){G=false;if(!g(b,1,'e')){break d}break c}b._=b.A-R;if(!g(b,1,'i')){break b}}h=b._=b.A-T;c=h;Q=h;o=n(b,h,h,'i');if(h<=b.B){b.B+=o|0}if(Q<=b.C){b.C+=o|0}b._=c;break a}b._=b.A-j;H=true;b:while(H===true){H=false;P=b.A-b._;c:while(true){q=b.A-b._;I=true;d:while(I===true){I=false;if(!f(b,a.g_vowel,97,305)){break d}b._=b.A-q;break c}_=b._=b.A-q;if(_<=b.D){break b}b._--}J=true;c:while(J===true){J=false;N=b.A-b._;K=true;d:while(K===true){K=false;if(!g(b,1,'o')){break d}break c}b._=b.A-N;if(!g(b,1,'u')){break b}}e=b._=b.A-P;c=e;S=e;l=n(b,e,e,'u');if(e<=b.B){b.B+=l|0}if(S<=b.C){b.C+=l|0}b._=c;break a}a2=b._=(a1=b.A)-j;V=a1-a2;b:while(true){t=b.A-b._;u=true;c:while(u===true){u=false;if(!f(b,a.g_vowel,97,305)){break c}b._=b.A-t;break b}a0=b._=b.A-t;if(a0<=b.D){return false}b._--}s=true;b:while(s===true){s=false;M=b.A-b._;r=true;c:while(r===true){r=false;if(!g(b,1,'ö')){break c}break b}b._=b.A-M;if(!g(b,1,'ü')){return false}}d=b._=b.A-V;c=d;U=d;k=n(b,d,d,'ü');if(d<=b.B){b.B+=k|0}if(U<=b.C){b.C+=k|0}b._=c}return true};a.prototype.v=function(){var e;var f;var b;var c;var d;e=this._;b=2;a:while(true){f=this._;c=true;b:while(c===true){c=false;c:while(true){d=true;d:while(d===true){d=false;if(!v(this,a.g_vowel,97,305)){break d}break c}if(this._>=this.A){break b}this._++}b--;continue a}this._=f;break a}if(b>0){return false}this._=e;return true};a.prototype.r_more_than_one_syllable_word=a.prototype.v;function N(b){var f;var g;var c;var d;var e;f=b._;c=2;a:while(true){g=b._;d=true;b:while(d===true){d=false;c:while(true){e=true;d:while(e===true){e=false;if(!v(b,a.g_vowel,97,305)){break d}break c}if(b._>=b.A){break b}b._++}c--;continue a}b._=g;break a}if(c>0){return false}b._=f;return true};a.prototype.P=function(){var f;var g;var h;var b;var a;var c;var d;var i;var j;var e;b=true;b:while(b===true){b=false;f=this._;a=true;a:while(a===true){a=false;g=this._;c:while(true){c=true;d:while(c===true){c=false;if(!s(this,2,'ad')){break d}break c}if(this._>=this.A){break a}this._++}i=this.I_strlen=2;if(!(i===this.A)){break a}this._=g;break b}j=this._=f;h=j;a:while(true){d=true;c:while(d===true){d=false;if(!s(this,5,'soyad')){break c}break a}if(this._>=this.A){return false}this._++}e=this.I_strlen=5;if(!(e===this.A)){return false}this._=h}return true};a.prototype.r_is_reserved_word=a.prototype.P;function x(a){var g;var h;var i;var c;var b;var d;var e;var j;var k;var f;c=true;b:while(c===true){c=false;g=a._;b=true;a:while(b===true){b=false;h=a._;c:while(true){d=true;d:while(d===true){d=false;if(!s(a,2,'ad')){break d}break c}if(a._>=a.A){break a}a._++}j=a.I_strlen=2;if(!(j===a.A)){break a}a._=h;break b}k=a._=g;i=k;a:while(true){e=true;c:while(e===true){e=false;if(!s(a,5,'soyad')){break c}break a}if(a._>=a.A){return false}a._++}f=a.I_strlen=5;if(!(f===a.A)){return false}a._=i}return true};a.prototype.x=function(){var d;var e;var a;var b;var c;var f;var g;var h;d=this._;a=true;a:while(a===true){a=false;if(!x(this)){break a}return false}f=this._=d;this.D=f;h=this._=g=this.A;e=g-h;b=true;a:while(b===true){b=false;if(!z(this)){break a}}this._=this.A-e;c=true;a:while(c===true){c=false;if(!w(this)){break a}}this._=this.D;return true};a.prototype.r_postlude=a.prototype.x;function O(a){var e;var f;var b;var c;var d;var g;var h;var i;e=a._;b=true;a:while(b===true){b=false;if(!x(a)){break a}return false}g=a._=e;a.D=g;i=a._=h=a.A;f=h-i;c=true;a:while(c===true){c=false;if(!z(a)){break a}}a._=a.A-f;d=true;a:while(d===true){d=false;if(!w(a)){break a}}a._=a.D;return true};a.prototype.H=function(){var c;var a;var b;var d;var e;if(!N(this)){return false}this.D=this._;e=this._=d=this.A;c=d-e;a=true;a:while(a===true){a=false;if(!J(this)){break a}}this._=this.A-c;if(!this.B_continue_stemming_noun_suffixes){return false}b=true;a:while(b===true){b=false;if(!L(this)){break a}}this._=this.D;return!O(this)?false:true};a.prototype.stem=a.prototype.H;a.prototype.L=function(b){return b instanceof a};a.prototype.equals=a.prototype.L;a.prototype.M=function(){var c;var a;var b;var d;c='TurkishStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};a.prototype.hashCode=a.prototype.M;a.serialVersionUID=1;j(a,'methodObject',function(){return new a});j(a,'a_0',function(){return[new d('m',-1,-1),new d('n',-1,-1),new d('miz',-1,-1),new d('niz',-1,-1),new d('muz',-1,-1),new d('nuz',-1,-1),new d('müz',-1,-1),new d('nüz',-1,-1),new d('mız',-1,-1),new d('nız',-1,-1)]});j(a,'a_1',function(){return[new d('leri',-1,-1),new d('ları',-1,-1)]});j(a,'a_2',function(){return[new d('ni',-1,-1),new d('nu',-1,-1),new d('nü',-1,-1),new d('nı',-1,-1)]});j(a,'a_3',function(){return[new d('in',-1,-1),new d('un',-1,-1),new d('ün',-1,-1),new d('ın',-1,-1)]});j(a,'a_4',function(){return[new d('a',-1,-1),new d('e',-1,-1)]});j(a,'a_5',function(){return[new d('na',-1,-1),new d('ne',-1,-1)]});j(a,'a_6',function(){return[new d('da',-1,-1),new d('ta',-1,-1),new d('de',-1,-1),new d('te',-1,-1)]});j(a,'a_7',function(){return[new d('nda',-1,-1),new d('nde',-1,-1)]});j(a,'a_8',function(){return[new d('dan',-1,-1),new d('tan',-1,-1),new d('den',-1,-1),new d('ten',-1,-1)]});j(a,'a_9',function(){return[new d('ndan',-1,-1),new d('nden',-1,-1)]});j(a,'a_10',function(){return[new d('la',-1,-1),new d('le',-1,-1)]});j(a,'a_11',function(){return[new d('ca',-1,-1),new d('ce',-1,-1)]});j(a,'a_12',function(){return[new d('im',-1,-1),new d('um',-1,-1),new d('üm',-1,-1),new d('ım',-1,-1)]});j(a,'a_13',function(){return[new d('sin',-1,-1),new d('sun',-1,-1),new d('sün',-1,-1),new d('sın',-1,-1)]});j(a,'a_14',function(){return[new d('iz',-1,-1),new d('uz',-1,-1),new d('üz',-1,-1),new d('ız',-1,-1)]});j(a,'a_15',function(){return[new d('siniz',-1,-1),new d('sunuz',-1,-1),new d('sünüz',-1,-1),new d('sınız',-1,-1)]});j(a,'a_16',function(){return[new d('lar',-1,-1),new d('ler',-1,-1)]});j(a,'a_17',function(){return[new d('niz',-1,-1),new d('nuz',-1,-1),new d('nüz',-1,-1),new d('nız',-1,-1)]});j(a,'a_18',function(){return[new d('dir',-1,-1),new d('tir',-1,-1),new d('dur',-1,-1),new d('tur',-1,-1),new d('dür',-1,-1),new d('tür',-1,-1),new d('dır',-1,-1),new d('tır',-1,-1)]});j(a,'a_19',function(){return[new d('casına',-1,-1),new d('cesine',-1,-1)]});j(a,'a_20',function(){return[new d('di',-1,-1),new d('ti',-1,-1),new d('dik',-1,-1),new d('tik',-1,-1),new d('duk',-1,-1),new d('tuk',-1,-1),new d('dük',-1,-1),new d('tük',-1,-1),new d('dık',-1,-1),new d('tık',-1,-1),new d('dim',-1,-1),new d('tim',-1,-1),new d('dum',-1,-1),new d('tum',-1,-1),new d('düm',-1,-1),new d('tüm',-1,-1),new d('dım',-1,-1),new d('tım',-1,-1),new d('din',-1,-1),new d('tin',-1,-1),new d('dun',-1,-1),new d('tun',-1,-1),new d('dün',-1,-1),new d('tün',-1,-1),new d('dın',-1,-1),new d('tın',-1,-1),new d('du',-1,-1),new d('tu',-1,-1),new d('dü',-1,-1),new d('tü',-1,-1),new d('dı',-1,-1),new d('tı',-1,-1)]});j(a,'a_21',function(){return[new d('sa',-1,-1),new d('se',-1,-1),new d('sak',-1,-1),new d('sek',-1,-1),new d('sam',-1,-1),new d('sem',-1,-1),new d('san',-1,-1),new d('sen',-1,-1)]});j(a,'a_22',function(){return[new d('miş',-1,-1),new d('muş',-1,-1),new d('müş',-1,-1),new d('mış',-1,-1)]});j(a,'a_23',function(){return[new d('b',-1,1),new d('c',-1,2),new d('d',-1,3),new d('ğ',-1,4)]});j(a,'g_vowel',function(){return[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,32,8,0,0,0,0,0,0,1]});j(a,'g_U',function(){return[1,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,1]});j(a,'g_vowel1',function(){return[1,64,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]});j(a,'g_vowel2',function(){return[17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,130]});j(a,'g_vowel3',function(){return[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]});j(a,'g_vowel4',function(){return[17]});j(a,'g_vowel5',function(){return[65]});j(a,'g_vowel6',function(){return[65]});var y={'src/stemmer.jsx':{Stemmer:u},'src/turkish-stemmer.jsx':{TurkishStemmer:a}}}(JSX))
@@ -29,12 +28,12 @@ class SearchTurkish(SearchLanguage):
language_name = 'Turkish'
js_stemmer_rawcode = 'turkish-stemmer.js'
js_stemmer_code = js_stemmer
- stopwords = set() # type: Set[unicode]
+ stopwords = set() # type: Set[str]
def init(self, options):
# type: (Dict) -> None
self.stemmer = snowballstemmer.stemmer('turkish')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/zh.py b/sphinx/search/zh.py
index cb75436ee..51fdc21d2 100644
--- a/sphinx/search/zh.py
+++ b/sphinx/search/zh.py
@@ -24,7 +24,6 @@ except ImportError:
if False:
# For type annotation
from typing import Dict, List # NOQA
- from sphinx.util.typing import unicode # NOQA
english_stopwords = set(u"""
a and are as at
@@ -246,8 +245,8 @@ class SearchChinese(SearchLanguage):
self.stemmer = get_stemmer()
def split(self, input):
- # type: (unicode) -> List[unicode]
- chinese = [] # type: List[unicode]
+ # type: (str) -> List[str]
+ chinese = [] # type: List[str]
if JIEBA:
chinese = list(jieba.cut_for_search(input))
@@ -255,9 +254,9 @@ class SearchChinese(SearchLanguage):
return chinese + latin1
def word_filter(self, stemmed_word):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return len(stemmed_word) > 1
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stem(word.lower())
diff --git a/sphinx/setup_command.py b/sphinx/setup_command.py
index 87944167a..dfe251d18 100644
--- a/sphinx/setup_command.py
+++ b/sphinx/setup_command.py
@@ -11,14 +11,12 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
import sys
from distutils.cmd import Command
from distutils.errors import DistutilsOptionError, DistutilsExecError
-
-from six import StringIO
+from io import StringIO
from sphinx.application import Sphinx
from sphinx.cmd.build import handle_exception
@@ -29,7 +27,6 @@ from sphinx.util.osutil import abspath
if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
- from sphinx.util.typing import unicode # NOQA
class BuildDoc(Command):
@@ -97,14 +94,14 @@ class BuildDoc(Command):
# type: () -> None
self.fresh_env = self.all_files = False
self.pdb = False
- self.source_dir = self.build_dir = None # type: unicode
+ self.source_dir = self.build_dir = None # type: str
self.builder = 'html'
self.warning_is_error = False
self.project = ''
self.version = ''
self.release = ''
self.today = ''
- self.config_dir = None # type: unicode
+ self.config_dir = None # type: str
self.link_index = False
self.copyright = ''
self.verbosity = 0
@@ -112,7 +109,7 @@ class BuildDoc(Command):
self.nitpicky = False
def _guess_source_dir(self):
- # type: () -> unicode
+ # type: () -> str
for guess in ('doc', 'docs'):
if not os.path.isdir(guess):
continue
@@ -125,7 +122,7 @@ class BuildDoc(Command):
# unicode, causing finalize_options to fail if invoked again. Workaround
# for https://bugs.python.org/issue19570
def _ensure_stringlike(self, option, what, default=None):
- # type: (unicode, unicode, Any) -> Any
+ # type: (str, str, Any) -> Any
val = getattr(self, option)
if val is None:
setattr(self, option, default)
@@ -156,7 +153,7 @@ class BuildDoc(Command):
self.builder_target_dirs = [
(builder, os.path.join(self.build_dir, builder))
- for builder in self.builder] # type: List[Tuple[str, unicode]]
+ for builder in self.builder]
def run(self):
# type: () -> None
@@ -166,7 +163,7 @@ class BuildDoc(Command):
status_stream = StringIO()
else:
status_stream = sys.stdout # type: ignore
- confoverrides = {} # type: Dict[unicode, Any]
+ confoverrides = {} # type: Dict[str, Any]
if self.project:
confoverrides['project'] = self.project
if self.version:
diff --git a/sphinx/testing/fixtures.py b/sphinx/testing/fixtures.py
index 968807788..399747826 100644
--- a/sphinx/testing/fixtures.py
+++ b/sphinx/testing/fixtures.py
@@ -8,16 +8,15 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
import subprocess
import sys
from collections import namedtuple
+from io import StringIO
from tempfile import gettempdir
import pytest
-from six import StringIO
from . import util
diff --git a/sphinx/testing/path.py b/sphinx/testing/path.py
index 438a42d53..77df1ab9b 100644
--- a/sphinx/testing/path.py
+++ b/sphinx/testing/path.py
@@ -16,7 +16,6 @@ if False:
# For type annotation
import builtins # NOQA
from typing import Any, Callable, IO, List # NOQA
- from sphinx.util.typing import unicode # NOQA
FILESYSTEMENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding()
@@ -36,7 +35,7 @@ class path(text_type):
return self.__class__(os.path.dirname(self))
def basename(self):
- # type: () -> unicode
+ # type: () -> str
return os.path.basename(self)
def abspath(self):
@@ -101,7 +100,7 @@ class path(text_type):
shutil.rmtree(self, ignore_errors=ignore_errors, onerror=onerror)
def copytree(self, destination, symlinks=False):
- # type: (unicode, bool) -> None
+ # type: (str, bool) -> None
"""
Recursively copy a directory to the given `destination`. If the given
`destination` does not exist it will be created.
@@ -114,7 +113,7 @@ class path(text_type):
shutil.copytree(self, destination, symlinks=symlinks)
def movetree(self, destination):
- # type: (unicode) -> None
+ # type: (str) -> None
"""
Recursively move the file or directory to the given `destination`
similar to the Unix "mv" command.
@@ -145,11 +144,11 @@ class path(text_type):
os.utime(self, arg)
def open(self, mode='r', **kwargs):
- # type: (unicode, Any) -> IO
+ # type: (str, Any) -> IO
return open(self, mode, **kwargs)
def write_text(self, text, encoding='utf-8', **kwargs):
- # type: (unicode, unicode, Any) -> None
+ # type: (str, str, Any) -> None
"""
Writes the given `text` to the file.
"""
@@ -159,7 +158,7 @@ class path(text_type):
f.write(text)
def text(self, encoding='utf-8', **kwargs):
- # type: (unicode, Any) -> unicode
+ # type: (str, Any) -> str
"""
Returns the text in the file.
"""
@@ -219,7 +218,7 @@ class path(text_type):
return self.__class__(os.path.join(self, *map(self.__class__, args)))
def listdir(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
return os.listdir(self)
__div__ = __truediv__ = joinpath
diff --git a/sphinx/testing/util.py b/sphinx/testing/util.py
index f4d44ecde..13147a3f9 100644
--- a/sphinx/testing/util.py
+++ b/sphinx/testing/util.py
@@ -28,7 +28,6 @@ if False:
# For type annotation
from typing import List # NOQA
from typing import Any, Dict, Generator, IO, List, Pattern # NOQA
- from sphinx.util.typing import unicode # NOQA
__all__ = [
@@ -39,25 +38,25 @@ __all__ = [
def assert_re_search(regex, text, flags=0):
- # type: (Pattern, unicode, int) -> None
+ # type: (Pattern, str, int) -> None
if not re.search(regex, text, flags):
assert False, '%r did not match %r' % (regex, text)
def assert_not_re_search(regex, text, flags=0):
- # type: (Pattern, unicode, int) -> None
+ # type: (Pattern, str, int) -> None
if re.search(regex, text, flags):
assert False, '%r did match %r' % (regex, text)
def assert_startswith(thing, prefix):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
if not thing.startswith(prefix):
assert False, '%r does not start with %r' % (thing, prefix)
def assert_node(node, cls=None, xpath="", **kwargs):
- # type: (nodes.Node, Any, unicode, Any) -> None
+ # type: (nodes.Node, Any, str, Any) -> None
if cls:
if isinstance(cls, list):
assert_node(node, cls[0], xpath=xpath, **kwargs)
@@ -96,7 +95,7 @@ def assert_node(node, cls=None, xpath="", **kwargs):
def etree_parse(path):
- # type: (unicode) -> Any
+ # type: (str) -> Any
with warnings.catch_warnings(record=False):
warnings.filterwarnings("ignore", category=DeprecationWarning)
return ElementTree.parse(path)
@@ -117,7 +116,7 @@ class SphinxTestApp(application.Sphinx):
def __init__(self, buildername='html', srcdir=None,
freshenv=False, confoverrides=None, status=None, warning=None,
tags=None, docutilsconf=None):
- # type: (unicode, path, bool, Dict, IO, IO, List[unicode], unicode) -> None
+ # type: (str, path, bool, Dict, IO, IO, List[str], str) -> None
if docutilsconf is not None:
(srcdir / 'docutils.conf').write_text(docutilsconf)
@@ -194,14 +193,14 @@ _unicode_literals_re = re.compile(r'u(".*?")|u(\'.*?\')')
def remove_unicode_literals(s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
warnings.warn('remove_unicode_literals() is deprecated.',
RemovedInSphinx40Warning)
return _unicode_literals_re.sub(lambda x: x.group(1) or x.group(2), s)
def find_files(root, suffix=None):
- # type: (unicode, bool) -> Generator
+ # type: (str, bool) -> Generator
for dirpath, dirs, files in os.walk(root, followlinks=True):
dirpath = path(dirpath)
for f in [f for f in files if not suffix or f.endswith(suffix)]: # type: ignore
@@ -210,5 +209,5 @@ def find_files(root, suffix=None):
def strip_escseq(text):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return re.sub('\x1b.*?m', '', text)
diff --git a/sphinx/theming.py b/sphinx/theming.py
index 1ab5b8124..75e972cd0 100644
--- a/sphinx/theming.py
+++ b/sphinx/theming.py
@@ -30,14 +30,13 @@ if False:
# For type annotation
from typing import Any, Dict, Iterator, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
NODEFAULT = object()
THEMECONF = 'theme.conf'
def extract_zip(filename, targetdir):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Extract zip file to target directory."""
ensuredir(targetdir)
@@ -57,7 +56,7 @@ class Theme:
This class supports both theme directory and theme archive (zipped theme)."""
def __init__(self, name, theme_path, factory):
- # type: (unicode, unicode, HTMLThemeFactory) -> None
+ # type: (str, str, HTMLThemeFactory) -> None
self.name = name
self.base = None
self.rootdir = None
@@ -90,7 +89,7 @@ class Theme:
(inherit, name))
def get_theme_dirs(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""Return a list of theme directories, beginning with this theme's,
then the base theme's, then that one's base theme's, etc.
"""
@@ -100,7 +99,7 @@ class Theme:
return [self.themedir] + self.base.get_theme_dirs()
def get_config(self, section, name, default=NODEFAULT):
- # type: (unicode, unicode, Any) -> Any
+ # type: (str, str, Any) -> Any
"""Return the value for a theme configuration setting, searching the
base theme chain.
"""
@@ -117,7 +116,7 @@ class Theme:
return default
def get_options(self, overrides={}):
- # type: (Dict[unicode, Any]) -> Dict[unicode, Any]
+ # type: (Dict[str, Any]) -> Dict[str, Any]
"""Return a dictionary of theme options and their values."""
if self.base:
options = self.base.get_options()
@@ -150,7 +149,7 @@ class Theme:
def is_archived_theme(filename):
- # type: (unicode) -> bool
+ # type: (str) -> bool
"""Check the specified file is an archived theme file or not."""
try:
with ZipFile(filename) as f:
@@ -178,7 +177,7 @@ class HTMLThemeFactory:
self.themes[name] = theme
def load_additional_themes(self, theme_paths):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Load additional themes placed at specified directories."""
for theme_path in theme_paths:
abs_theme_path = path.abspath(path.join(self.app.confdir, theme_path))
@@ -187,7 +186,7 @@ class HTMLThemeFactory:
self.themes[name] = theme
def load_extra_theme(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Try to load a theme having specifed name."""
if name == 'alabaster':
self.load_alabaster_theme()
@@ -213,7 +212,7 @@ class HTMLThemeFactory:
pass
def load_external_theme(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Try to load a theme using entry_points.
Sphinx refers to ``sphinx_themes`` entry_points.
@@ -228,9 +227,9 @@ class HTMLThemeFactory:
pass
def find_themes(self, theme_path):
- # type: (unicode) -> Dict[unicode, unicode]
+ # type: (str) -> Dict[str, str]
"""Search themes from specified directory."""
- themes = {} # type: Dict[unicode, unicode]
+ themes = {} # type: Dict[str, str]
if not path.isdir(theme_path):
return themes
@@ -250,7 +249,7 @@ class HTMLThemeFactory:
return themes
def create(self, name):
- # type: (unicode) -> Theme
+ # type: (str) -> Theme
"""Create an instance of theme."""
if name not in self.themes:
self.load_extra_theme(name)
diff --git a/sphinx/transforms/__init__.py b/sphinx/transforms/__init__.py
index 3052ac38a..6e189b35f 100644
--- a/sphinx/transforms/__init__.py
+++ b/sphinx/transforms/__init__.py
@@ -33,7 +33,6 @@ if False:
from sphinx.config import Config # NOQA
from sphinx.domain.std import StandardDomain # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -385,7 +384,7 @@ class SphinxSmartQuotes(SmartQuotes, SphinxTransform):
return False
def get_tokens(self, txtnodes):
- # type: (List[nodes.Text]) -> Generator[Tuple[unicode, unicode], None, None]
+ # type: (List[nodes.Text]) -> Generator[Tuple[str, str], None, None]
# A generator that yields ``(texttype, nodetext)`` tuples for a list
# of "Text" nodes (interface to ``smartquotes.educate_tokens()``).
diff --git a/sphinx/transforms/i18n.py b/sphinx/transforms/i18n.py
index 8e6605824..d3acee115 100644
--- a/sphinx/transforms/i18n.py
+++ b/sphinx/transforms/i18n.py
@@ -33,7 +33,6 @@ if False:
from typing import Dict, List, Tuple, Type # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -41,12 +40,12 @@ N = TypeVar('N', bound=nodes.Node)
def publish_msgstr(app, source, source_path, source_line, config, settings):
- # type: (Sphinx, unicode, unicode, int, Config, Any) -> nodes.Element
+ # type: (Sphinx, str, str, int, Config, Any) -> nodes.Element
"""Publish msgstr (single line) into docutils document
:param sphinx.application.Sphinx app: sphinx application
- :param unicode source: source text
- :param unicode source_path: source path for warning indication
+ :param str source: source text
+ :param str source_path: source path for warning indication
:param source_line: source line for warning indication
:param sphinx.config.Config config: sphinx config
:param docutils.frontend.Values settings: docutils settings
@@ -296,7 +295,7 @@ class Locale(SphinxTransform):
' original: {0}, translated: {1}')
.format(old_foot_ref_rawsources, new_foot_ref_rawsources),
location=node)
- old_foot_namerefs = {} # type: Dict[unicode, List[nodes.footnote_reference]]
+ old_foot_namerefs = {} # type: Dict[str, List[nodes.footnote_reference]]
for r in old_foot_refs:
old_foot_namerefs.setdefault(r.get('refname'), []).append(r)
for newf in new_foot_refs:
@@ -358,7 +357,7 @@ class Locale(SphinxTransform):
is_refnamed_footnote_ref = NodeMatcher(nodes.footnote_reference, refname=Any)
old_foot_refs = node.traverse(is_refnamed_footnote_ref)
new_foot_refs = patch.traverse(is_refnamed_footnote_ref)
- refname_ids_map = {} # type: Dict[unicode, List[unicode]]
+ refname_ids_map = {} # type: Dict[str, List[str]]
if len(old_foot_refs) != len(new_foot_refs):
old_foot_ref_rawsources = [ref.rawsource for ref in old_foot_refs]
new_foot_ref_rawsources = [ref.rawsource for ref in new_foot_refs]
@@ -407,7 +406,7 @@ class Locale(SphinxTransform):
location=node)
def get_ref_key(node):
- # type: (addnodes.pending_xref) -> Tuple[unicode, unicode, unicode]
+ # type: (addnodes.pending_xref) -> Tuple[str, str, str]
case = node["refdomain"], node["reftype"]
if case == ('std', 'term'):
return None
@@ -449,7 +448,7 @@ class Locale(SphinxTransform):
if 'index' in self.config.gettext_additional_targets:
# Extract and translate messages for index entries.
for node, entries in traverse_translatable_index(self.document):
- new_entries = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode]] # NOQA
+ new_entries = [] # type: List[Tuple[str, str, str, str, str]]
for type, msg, tid, main, key_ in entries:
msg_parts = split_index_msg(type, msg)
msgstr_parts = []
diff --git a/sphinx/transforms/post_transforms/__init__.py b/sphinx/transforms/post_transforms/__init__.py
index edc5c293f..e6b254b5b 100644
--- a/sphinx/transforms/post_transforms/__init__.py
+++ b/sphinx/transforms/post_transforms/__init__.py
@@ -25,7 +25,6 @@ if False:
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.domains import Domain # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -74,11 +73,11 @@ class ReferencesResolver(SphinxTransform):
node.replace_self(newnode or contnode)
def resolve_anyref(self, refdoc, node, contnode):
- # type: (unicode, addnodes.pending_xref, nodes.TextElement) -> nodes.Element
+ # type: (str, addnodes.pending_xref, nodes.TextElement) -> nodes.Element
"""Resolve reference generated by the "any" role."""
stddomain = self.env.get_domain('std')
target = node['reftarget']
- results = [] # type: List[Tuple[unicode, nodes.Element]]
+ results = [] # type: List[Tuple[str, nodes.Element]]
# first, try resolving as :doc:
doc_ref = stddomain.resolve_xref(self.env, refdoc, self.app.builder,
'doc', target, node, contnode)
@@ -124,7 +123,7 @@ class ReferencesResolver(SphinxTransform):
return newnode
def warn_missing_reference(self, refdoc, typ, target, node, domain):
- # type: (unicode, unicode, unicode, addnodes.pending_xref, Domain) -> None
+ # type: (str, str, str, addnodes.pending_xref, Domain) -> None
warn = node.get('refwarn')
if self.config.nitpicky:
warn = True
@@ -162,7 +161,7 @@ class OnlyNodeTransform(SphinxTransform):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_post_transform(ReferencesResolver)
app.add_post_transform(OnlyNodeTransform)
diff --git a/sphinx/transforms/post_transforms/code.py b/sphinx/transforms/post_transforms/code.py
index 8b785d7d2..8536fab6e 100644
--- a/sphinx/transforms/post_transforms/code.py
+++ b/sphinx/transforms/post_transforms/code.py
@@ -24,7 +24,6 @@ if False:
# For type annotation
from typing import Any, Dict, List # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
HighlightSetting = NamedTuple('HighlightSetting', [('language', text_type),
@@ -53,7 +52,7 @@ class HighlightLanguageTransform(SphinxTransform):
class HighlightLanguageVisitor(nodes.NodeVisitor):
def __init__(self, document, default_language):
- # type: (nodes.document, unicode) -> None
+ # type: (nodes.document, str) -> None
self.default_setting = HighlightSetting(default_language, sys.maxsize)
self.settings = [] # type: List[HighlightSetting]
super(HighlightLanguageVisitor, self).__init__(document)
@@ -142,7 +141,7 @@ class TrimDoctestFlagsTransform(SphinxTransform):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_post_transform(HighlightLanguageTransform)
app.add_post_transform(TrimDoctestFlagsTransform)
diff --git a/sphinx/transforms/post_transforms/compat.py b/sphinx/transforms/post_transforms/compat.py
index cc3f0ab73..31f277620 100644
--- a/sphinx/transforms/post_transforms/compat.py
+++ b/sphinx/transforms/post_transforms/compat.py
@@ -27,7 +27,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -86,7 +85,7 @@ class MathNodeMigrator(SphinxTransform):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_post_transform(MathNodeMigrator)
return {
diff --git a/sphinx/transforms/post_transforms/images.py b/sphinx/transforms/post_transforms/images.py
index efa3335e3..ff8c9f9cc 100644
--- a/sphinx/transforms/post_transforms/images.py
+++ b/sphinx/transforms/post_transforms/images.py
@@ -27,7 +27,6 @@ if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -52,7 +51,7 @@ class BaseImageConverter(SphinxTransform):
@property
def imagedir(self):
- # type: () -> unicode
+ # type: () -> str
return os.path.join(self.app.doctreedir, 'images')
@@ -160,7 +159,7 @@ class DataURIExtractor(BaseImageConverter):
def get_filename_for(filename, mimetype):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
basename = os.path.basename(filename)
return os.path.splitext(basename)[0] + get_image_extension(mimetype)
@@ -197,7 +196,7 @@ class ImageConverter(BaseImageConverter):
#: ('image/gif', 'image/png'),
#: ('application/pdf', 'image/png'),
#: ]
- conversion_rules = [] # type: List[Tuple[unicode, unicode]]
+ conversion_rules = [] # type: List[Tuple[str, str]]
def __init__(self, *args, **kwargs):
# type: (Any, Any) -> None
@@ -224,7 +223,7 @@ class ImageConverter(BaseImageConverter):
return False
def get_conversion_rule(self, node):
- # type: (nodes.image) -> Tuple[unicode, unicode]
+ # type: (nodes.image) -> Tuple[str, str]
for candidate in self.guess_mimetypes(node):
for supported in self.app.builder.supported_image_types:
rule = (candidate, supported)
@@ -239,7 +238,7 @@ class ImageConverter(BaseImageConverter):
raise NotImplementedError()
def guess_mimetypes(self, node):
- # type: (nodes.image) -> List[unicode]
+ # type: (nodes.image) -> List[str]
if '?' in node['candidates']:
return []
elif '*' in node['candidates']:
@@ -273,7 +272,7 @@ class ImageConverter(BaseImageConverter):
self.env.images.add_file(self.env.docname, destpath)
def convert(self, _from, _to):
- # type: (unicode, unicode) -> bool
+ # type: (str, str) -> bool
"""Convert a image file to expected format.
*_from* is a path for source image file, and *_to* is a path for
@@ -283,7 +282,7 @@ class ImageConverter(BaseImageConverter):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_post_transform(ImageDownloader)
app.add_post_transform(DataURIExtractor)
diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py
index b228ca182..e1d39718c 100644
--- a/sphinx/util/__init__.py
+++ b/sphinx/util/__init__.py
@@ -52,7 +52,6 @@ from sphinx.util.matching import patfilter # noqa
if False:
# For type annotation
from typing import Any, Callable, Dict, IO, Iterable, Iterator, List, Pattern, Sequence, Set, Tuple, Union # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -65,13 +64,13 @@ url_re = re.compile(r'(?P<schema>.+)://.*') # type: Pattern
# High-level utility functions.
def docname_join(basedocname, docname):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return posixpath.normpath(
posixpath.join('/' + basedocname, '..', docname))[1:]
def path_stabilize(filepath):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"normalize path separater and unicode string"
newpath = filepath.replace(os.path.sep, SEP)
if isinstance(newpath, text_type):
@@ -80,7 +79,7 @@ def path_stabilize(filepath):
def get_matching_files(dirname, exclude_matchers=()):
- # type: (unicode, Tuple[Callable[[unicode], bool], ...]) -> Iterable[unicode]
+ # type: (str, Tuple[Callable[[str], bool], ...]) -> Iterable[str]
"""Get all file names in a directory, recursively.
Exclude files and dirs matching some matcher in *exclude_matchers*.
@@ -93,9 +92,9 @@ def get_matching_files(dirname, exclude_matchers=()):
relativeroot = root[dirlen:]
qdirs = enumerate(path_stabilize(path.join(relativeroot, dn))
- for dn in dirs) # type: Iterable[Tuple[int, unicode]]
+ for dn in dirs) # type: Iterable[Tuple[int, str]]
qfiles = enumerate(path_stabilize(path.join(relativeroot, fn))
- for fn in files) # type: Iterable[Tuple[int, unicode]]
+ for fn in files) # type: Iterable[Tuple[int, str]]
for matcher in exclude_matchers:
qdirs = [entry for entry in qdirs if not matcher(entry[1])]
qfiles = [entry for entry in qfiles if not matcher(entry[1])]
@@ -107,7 +106,7 @@ def get_matching_files(dirname, exclude_matchers=()):
def get_matching_docs(dirname, suffixes, exclude_matchers=()):
- # type: (unicode, List[unicode], Tuple[Callable[[unicode], bool], ...]) -> Iterable[unicode] # NOQA
+ # type: (str, List[str], Tuple[Callable[[str], bool], ...]) -> Iterable[str] # NOQA
"""Get all file names (without suffixes) matching a suffix in a directory,
recursively.
@@ -131,10 +130,10 @@ class FilenameUniqDict(dict):
"""
def __init__(self):
# type: () -> None
- self._existing = set() # type: Set[unicode]
+ self._existing = set() # type: Set[str]
def add_file(self, docname, newfile):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if newfile in self:
self[newfile][0].add(docname)
return self[newfile][1]
@@ -149,7 +148,7 @@ class FilenameUniqDict(dict):
return uniquename
def purge_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for filename, (docs, unique) in list(self.items()):
docs.discard(docname)
if not docs:
@@ -157,17 +156,17 @@ class FilenameUniqDict(dict):
self._existing.discard(unique)
def merge_other(self, docnames, other):
- # type: (Set[unicode], Dict[unicode, Tuple[Set[unicode], Any]]) -> None
+ # type: (Set[str], Dict[str, Tuple[Set[str], Any]]) -> None
for filename, (docs, unique) in other.items():
for doc in docs & set(docnames):
self.add_file(doc, filename)
def __getstate__(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
return self._existing
def __setstate__(self, state):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
self._existing = state
@@ -179,7 +178,7 @@ class DownloadFiles(dict):
"""
def add_file(self, docname, filename):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
if filename not in self:
digest = md5(filename.encode('utf-8')).hexdigest()
dest = '%s/%s' % (digest, os.path.basename(filename))
@@ -189,14 +188,14 @@ class DownloadFiles(dict):
return self[filename][1]
def purge_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for filename, (docs, dest) in list(self.items()):
docs.discard(docname)
if not docs:
del self[filename]
def merge_other(self, docnames, other):
- # type: (Set[unicode], Dict[unicode, Tuple[Set[unicode], Any]]) -> None
+ # type: (Set[str], Dict[str, Tuple[Set[str], Any]]) -> None
for filename, (docs, dest) in other.items():
for docname in docs & set(docnames):
self.add_file(docname, filename)
@@ -204,7 +203,7 @@ class DownloadFiles(dict):
def copy_static_entry(source, targetdir, builder, context={},
exclude_matchers=(), level=0):
- # type: (unicode, unicode, Any, Dict, Tuple[Callable, ...], int) -> None
+ # type: (str, str, Any, Dict, Tuple[Callable, ...], int) -> None
"""[DEPRECATED] Copy a HTML builder static_path entry from source to targetdir.
Handles all possible cases of files, directories and subdirectories.
@@ -244,7 +243,7 @@ _DEBUG_HEADER = '''\
def save_traceback(app):
- # type: (Any) -> unicode
+ # type: (Any) -> str
"""Save the current exception's traceback in a temporary file."""
import sphinx
import jinja2
@@ -282,7 +281,7 @@ def save_traceback(app):
def get_module_source(modname):
- # type: (str) -> Tuple[unicode, unicode]
+ # type: (str) -> Tuple[str, str]
"""Try to find the source code for a module.
Can return ('file', 'filename') in which case the source is in the given
@@ -330,7 +329,7 @@ def get_module_source(modname):
def get_full_modname(modname, attribute):
- # type: (str, unicode) -> unicode
+ # type: (str, str) -> str
if modname is None:
# Prevents a TypeError: if the last getattr() call will return None
# then it's better to return it directly
@@ -353,7 +352,7 @@ _coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
def detect_encoding(readline):
- # type: (Callable[[], bytes]) -> unicode
+ # type: (Callable[[], bytes]) -> str
"""Like tokenize.detect_encoding() from Py3k, but a bit simplified."""
def read_or_stop():
@@ -376,7 +375,7 @@ def detect_encoding(readline):
return orig_enc
def find_cookie(line):
- # type: (bytes) -> unicode
+ # type: (bytes) -> str
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
@@ -410,11 +409,11 @@ class UnicodeDecodeErrorHandler:
"""Custom error handler for open() that warns and replaces."""
def __init__(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
self.docname = docname
def __call__(self, error):
- # type: (UnicodeDecodeError) -> Tuple[Union[unicode, str], int]
+ # type: (UnicodeDecodeError) -> Tuple[Union[str, str], int]
linestart = error.object.rfind(b'\n', 0, error.start)
lineend = error.object.find(b'\n', error.start)
if lineend == -1:
@@ -440,7 +439,7 @@ class Tee:
self.stream2 = stream2
def write(self, text):
- # type: (unicode) -> None
+ # type: (str) -> None
self.stream1.write(text)
self.stream2.write(text)
@@ -453,7 +452,7 @@ class Tee:
def parselinenos(spec, total):
- # type: (unicode, int) -> List[int]
+ # type: (str, int) -> List[int]
"""Parse a line number spec (such as "1,2,4-6") and return a list of
wanted line numbers.
"""
@@ -481,7 +480,7 @@ def parselinenos(spec, total):
def force_decode(string, encoding):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Forcibly get a unicode string out of a bytestring."""
warnings.warn('force_decode() is deprecated.',
RemovedInSphinx40Warning, stacklevel=2)
@@ -505,20 +504,20 @@ class attrdict(dict):
RemovedInSphinx40Warning, stacklevel=2)
def __getattr__(self, key):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self[key]
def __setattr__(self, key, val):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
self[key] = val
def __delattr__(self, key):
- # type: (unicode) -> None
+ # type: (str) -> None
del self[key]
def rpartition(s, t):
- # type: (unicode, unicode) -> Tuple[unicode, unicode]
+ # type: (str, str) -> Tuple[str, str]
"""Similar to str.rpartition from 2.5, but doesn't return the separator."""
i = s.rfind(t)
if i != -1:
@@ -527,7 +526,7 @@ def rpartition(s, t):
def split_into(n, type, value):
- # type: (int, unicode, unicode) -> List[unicode]
+ # type: (int, str, str) -> List[str]
"""Split an index entry into a given number of parts at semicolons."""
parts = [x.strip() for x in value.split(';', n - 1)]
if sum(1 for part in parts if part) < n:
@@ -536,7 +535,7 @@ def split_into(n, type, value):
def split_index_msg(type, value):
- # type: (unicode, unicode) -> List[unicode]
+ # type: (str, str) -> List[str]
# new entry types must be listed in directives/other.py!
if type == 'single':
try:
@@ -558,11 +557,11 @@ def split_index_msg(type, value):
def format_exception_cut_frames(x=1):
- # type: (int) -> unicode
+ # type: (int) -> str
"""Format an exception with traceback, but only the last x frames."""
typ, val, tb = sys.exc_info()
# res = ['Traceback (most recent call last):\n']
- res = [] # type: List[unicode]
+ res = [] # type: List[str]
tbres = traceback.format_tb(tb)
res += tbres[-x:]
res += traceback.format_exception_only(typ, val)
@@ -610,7 +609,7 @@ class PeekableIterator:
def import_object(objname, source=None):
- # type: (str, unicode) -> Any
+ # type: (str, str) -> Any
try:
module, name = objname.rsplit('.', 1)
except ValueError as err:
@@ -630,8 +629,8 @@ def import_object(objname, source=None):
def encode_uri(uri):
- # type: (unicode) -> unicode
- split = list(urlsplit(uri)) # type: List[unicode]
+ # type: (str) -> str
+ split = list(urlsplit(uri))
split[1] = split[1].encode('idna').decode('ascii')
split[2] = quote_plus(split[2].encode('utf-8'), '/')
query = list((q, v.encode('utf-8')) for (q, v) in parse_qsl(split[3]))
@@ -640,7 +639,7 @@ def encode_uri(uri):
def display_chunk(chunk):
- # type: (Any) -> unicode
+ # type: (Any) -> str
if isinstance(chunk, (list, tuple)):
if len(chunk) == 1:
return text_type(chunk[0])
@@ -649,7 +648,7 @@ def display_chunk(chunk):
def old_status_iterator(iterable, summary, color="darkgreen", stringify_func=display_chunk):
- # type: (Iterable, unicode, str, Callable[[Any], unicode]) -> Iterator
+ # type: (Iterable, str, str, Callable[[Any], str]) -> Iterator
l = 0
for item in iterable:
if l == 0:
@@ -665,10 +664,9 @@ def old_status_iterator(iterable, summary, color="darkgreen", stringify_func=dis
# new version with progress info
def status_iterator(iterable, summary, color="darkgreen", length=0, verbosity=0,
stringify_func=display_chunk):
- # type: (Iterable, unicode, str, int, int, Callable[[Any], unicode]) -> Iterable # NOQA
+ # type: (Iterable, str, str, int, int, Callable[[Any], str]) -> Iterable
if length == 0:
- for item in old_status_iterator(iterable, summary, color, stringify_func):
- yield item
+ yield from old_status_iterator(iterable, summary, color, stringify_func)
return
l = 0
summary = bold(summary)
@@ -686,7 +684,7 @@ def status_iterator(iterable, summary, color="darkgreen", length=0, verbosity=0,
def epoch_to_rfc1123(epoch):
- # type: (float) -> unicode
+ # type: (float) -> str
"""Convert datetime format epoch to RFC1123."""
from babel.dates import format_datetime
@@ -719,7 +717,7 @@ def xmlname_checker():
]
def convert(entries, splitter=u'|'):
- # type: (Any, unicode) -> unicode
+ # type: (Any, str) -> str
results = []
for entry in entries:
if isinstance(entry, list):
diff --git a/sphinx/util/compat.py b/sphinx/util/compat.py
index 548a57ef5..e1af4612a 100644
--- a/sphinx/util/compat.py
+++ b/sphinx/util/compat.py
@@ -26,7 +26,6 @@ if False:
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
- from sphinx.util.typing import unicode # NOQA
def deprecate_source_parsers(app, config):
@@ -71,7 +70,7 @@ class IndexEntriesMigrator(SphinxTransform):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_transform(IndexEntriesMigrator)
app.connect('config-inited', deprecate_source_parsers)
app.connect('builder-inited', register_application_for_autosummary)
diff --git a/sphinx/util/console.py b/sphinx/util/console.py
index 2ce2bda76..a194441f1 100644
--- a/sphinx/util/console.py
+++ b/sphinx/util/console.py
@@ -22,7 +22,6 @@ except ImportError:
if False:
# For type annotation
from typing import Dict # NOQA
- from sphinx.util.typing import unicode # NOQA
_ansi_re = re.compile('\x1b\\[(\\d\\d;){0,2}\\d\\dm')
@@ -89,7 +88,7 @@ def coloron():
def colorize(name, text, input_mode=False):
- # type: (str, unicode, bool) -> unicode
+ # type: (str, str, bool) -> str
def escseq(name):
# Wrap escape sequence with ``\1`` and ``\2`` to let readline know
# it is non-printable characters
@@ -113,7 +112,7 @@ def strip_colors(s):
def create_color_func(name):
# type: (str) -> None
def inner(text):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return colorize(name, text)
globals()[name] = inner
diff --git a/sphinx/util/docfields.py b/sphinx/util/docfields.py
index 277e497f3..34b5e8b9d 100644
--- a/sphinx/util/docfields.py
+++ b/sphinx/util/docfields.py
@@ -22,7 +22,7 @@ if False:
from typing import Any, Dict, Type, Union # NOQA
from sphinx.domains import Domain # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import TextlikeNode, unicode # NOQA
+ from sphinx.util.typing import TextlikeNode # NOQA
def _is_single_paragraph(node):
@@ -57,7 +57,7 @@ class Field:
def __init__(self, name, names=(), label=None, has_arg=True, rolename=None,
bodyrolename=None):
- # type: (unicode, Tuple[unicode, ...], unicode, bool, unicode, unicode) -> None
+ # type: (str, Tuple[str, ...], str, bool, str, str) -> None
self.name = name
self.names = names
self.label = label
@@ -66,9 +66,9 @@ class Field:
self.bodyrolename = bodyrolename
def make_xref(self,
- rolename, # type: unicode
- domain, # type: unicode
- target, # type: unicode
+ rolename, # type: str
+ domain, # type: str
+ target, # type: str
innernode=addnodes.literal_emphasis, # type: Type[TextlikeNode]
contnode=None, # type: nodes.Node
env=None, # type: BuildEnvironment
@@ -84,9 +84,9 @@ class Field:
return refnode
def make_xrefs(self,
- rolename, # type: unicode
- domain, # type: unicode
- target, # type: unicode
+ rolename, # type: str
+ domain, # type: str
+ target, # type: str
innernode=addnodes.literal_emphasis, # type: Type[TextlikeNode]
contnode=None, # type: nodes.Node
env=None, # type: BuildEnvironment
@@ -95,12 +95,12 @@ class Field:
return [self.make_xref(rolename, domain, target, innernode, contnode, env)]
def make_entry(self, fieldarg, content):
- # type: (unicode, List[nodes.Node]) -> Tuple[unicode, List[nodes.Node]]
+ # type: (str, List[nodes.Node]) -> Tuple[str, List[nodes.Node]]
return (fieldarg, content)
def make_field(self,
- types, # type: Dict[unicode, List[nodes.Node]]
- domain, # type: unicode
+ types, # type: Dict[str, List[nodes.Node]]
+ domain, # type: str
item, # type: Tuple
env=None, # type: BuildEnvironment
):
@@ -140,13 +140,13 @@ class GroupedField(Field):
def __init__(self, name, names=(), label=None, rolename=None,
can_collapse=False):
- # type: (unicode, Tuple[unicode, ...], unicode, unicode, bool) -> None
+ # type: (str, Tuple[str, ...], str, str, bool) -> None
super(GroupedField, self).__init__(name, names, label, True, rolename)
self.can_collapse = can_collapse
def make_field(self,
- types, # type: Dict[unicode, List[nodes.Node]]
- domain, # type: unicode
+ types, # type: Dict[str, List[nodes.Node]]
+ domain, # type: str
items, # type: Tuple
env=None, # type: BuildEnvironment
):
@@ -193,20 +193,20 @@ class TypedField(GroupedField):
def __init__(self, name, names=(), typenames=(), label=None,
rolename=None, typerolename=None, can_collapse=False):
- # type: (unicode, Tuple[unicode, ...], Tuple[unicode, ...], unicode, unicode, unicode, bool) -> None # NOQA
+ # type: (str, Tuple[str, ...], Tuple[str, ...], str, str, str, bool) -> None
super(TypedField, self).__init__(name, names, label, rolename, can_collapse)
self.typenames = typenames
self.typerolename = typerolename
def make_field(self,
- types, # type: Dict[unicode, List[nodes.Node]]
- domain, # type: unicode
+ types, # type: Dict[str, List[nodes.Node]]
+ domain, # type: str
items, # type: Tuple
env=None, # type: BuildEnvironment
):
# type: (...) -> nodes.field
def handle_item(fieldarg, content):
- # type: (unicode, unicode) -> nodes.paragraph
+ # type: (str, str) -> nodes.paragraph
par = nodes.paragraph()
par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
addnodes.literal_strong, env=env))
@@ -244,7 +244,7 @@ class DocFieldTransformer:
Transforms field lists in "doc field" syntax into better-looking
equivalents, using the field type definitions given on a domain.
"""
- typemap = None # type: Dict[unicode, Tuple[Field, bool]]
+ typemap = None # type: Dict[str, Tuple[Field, bool]]
def __init__(self, directive):
# type: (Any) -> None
@@ -255,7 +255,7 @@ class DocFieldTransformer:
self.typemap = directive._doc_field_type_map
def preprocess_fieldtypes(self, types):
- # type: (List[Field]) -> Dict[unicode, Tuple[Field, bool]]
+ # type: (List[Field]) -> Dict[str, Tuple[Field, bool]]
typemap = {}
for fieldtype in types:
for name in fieldtype.names:
@@ -280,8 +280,8 @@ class DocFieldTransformer:
typemap = self.typemap
entries = [] # type: List[Union[nodes.field, Tuple[Field, Any]]]
- groupindices = {} # type: Dict[unicode, int]
- types = {} # type: Dict[unicode, Dict]
+ groupindices = {} # type: Dict[str, int]
+ types = {} # type: Dict[str, Dict]
# step 1: traverse all fields and collect field types and content
for field in cast(List[nodes.field], node):
diff --git a/sphinx/util/docstrings.py b/sphinx/util/docstrings.py
index 5cd12a177..8cd4e5fc5 100644
--- a/sphinx/util/docstrings.py
+++ b/sphinx/util/docstrings.py
@@ -14,11 +14,10 @@ import sys
if False:
# For type annotation
from typing import List # NOQA
- from sphinx.util.typing import unicode # NOQA
def prepare_docstring(s, ignore=1):
- # type: (unicode, int) -> List[unicode]
+ # type: (str, int) -> List[str]
"""Convert a docstring into lines of parseable reST. Remove common leading
indentation, where the indentation of a given number of lines (usually just
one) is ignored.
@@ -52,7 +51,7 @@ def prepare_docstring(s, ignore=1):
def prepare_commentdoc(s):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
"""Extract documentation comment lines (starting with #:) and return them
as a list of lines. Returns an empty list if there is no documentation.
"""
diff --git a/sphinx/util/docutils.py b/sphinx/util/docutils.py
index 1ec01ddc4..3e36565f7 100644
--- a/sphinx/util/docutils.py
+++ b/sphinx/util/docutils.py
@@ -44,7 +44,7 @@ if False:
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.io import SphinxFileInput # NOQA
- from sphinx.util.typing import RoleFunction, unicode # NOQA
+ from sphinx.util.typing import RoleFunction # NOQA
__version_info__ = tuple(LooseVersion(docutils.__version__).version)
@@ -70,13 +70,13 @@ def docutils_namespace():
def is_directive_registered(name):
- # type: (unicode) -> bool
+ # type: (str) -> bool
"""Check the *name* directive is already registered."""
return name in directives._directives # type: ignore
def register_directive(name, directive):
- # type: (unicode, Type[Directive]) -> None
+ # type: (str, Type[Directive]) -> None
"""Register a directive to docutils.
This modifies global state of docutils. So it is better to use this
@@ -86,13 +86,13 @@ def register_directive(name, directive):
def is_role_registered(name):
- # type: (unicode) -> bool
+ # type: (str) -> bool
"""Check the *name* role is already registered."""
return name in roles._roles # type: ignore
def register_role(name, role):
- # type: (unicode, RoleFunction) -> None
+ # type: (str, RoleFunction) -> None
"""Register a role to docutils.
This modifies global state of docutils. So it is better to use this
@@ -102,7 +102,7 @@ def register_role(name, role):
def unregister_role(name):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Unregister a role from docutils."""
roles._roles.pop(name, None) # type: ignore
@@ -149,7 +149,7 @@ def patched_get_language():
from docutils.languages import get_language
def patched_get_language(language_code, reporter=None):
- # type: (unicode, Reporter) -> Any
+ # type: (str, Reporter) -> Any
return get_language(language_code)
try:
@@ -162,7 +162,7 @@ def patched_get_language():
@contextmanager
def using_user_docutils_conf(confdir):
- # type: (unicode) -> Generator[None, None, None]
+ # type: (str) -> Generator[None, None, None]
"""Let docutils know the location of ``docutils.conf`` for Sphinx."""
try:
docutilsconfig = os.environ.get('DOCUTILSCONFIG', None)
@@ -179,7 +179,7 @@ def using_user_docutils_conf(confdir):
@contextmanager
def patch_docutils(confdir=None):
- # type: (unicode) -> Generator[None, None, None]
+ # type: (str) -> Generator[None, None, None]
"""Patch to docutils temporarily."""
with patched_get_language(), using_user_docutils_conf(confdir):
yield
@@ -204,7 +204,7 @@ class sphinx_domains:
self.enable()
def __exit__(self, type, value, traceback):
- # type: (unicode, unicode, unicode) -> None
+ # type: (str, str, str) -> None
self.disable()
def enable(self):
@@ -221,7 +221,7 @@ class sphinx_domains:
roles.role = self.role_func
def lookup_domain_element(self, type, name):
- # type: (unicode, unicode) -> Any
+ # type: (str, str) -> Any
"""Lookup a markup element (directive or role), given its name which can
be a full name (with domain).
"""
@@ -250,14 +250,14 @@ class sphinx_domains:
raise ElementLookupError
def lookup_directive(self, name, lang_module, document):
- # type: (unicode, ModuleType, nodes.document) -> Tuple[Type[Directive], List[nodes.system_message]] # NOQA
+ # type: (str, ModuleType, nodes.document) -> Tuple[Type[Directive], List[nodes.system_message]] # NOQA
try:
return self.lookup_domain_element('directive', name)
except ElementLookupError:
return self.directive_func(name, lang_module, document)
def lookup_role(self, name, lang_module, lineno, reporter):
- # type: (unicode, ModuleType, int, Reporter) -> Tuple[RoleFunction, List[nodes.system_message]] # NOQA
+ # type: (str, ModuleType, int, Reporter) -> Tuple[RoleFunction, List[nodes.system_message]] # NOQA
try:
return self.lookup_domain_element('role', name)
except ElementLookupError:
@@ -266,7 +266,7 @@ class sphinx_domains:
class WarningStream:
def write(self, text):
- # type: (unicode) -> None
+ # type: (str) -> None
matched = report_re.search(text)
if not matched:
logger.warning(text.rstrip("\r\n"))
@@ -287,7 +287,7 @@ class LoggingReporter(Reporter):
def __init__(self, source, report_level=Reporter.WARNING_LEVEL,
halt_level=Reporter.SEVERE_LEVEL, debug=False,
error_handler='backslashreplace'):
- # type: (unicode, int, int, bool, unicode) -> None
+ # type: (str, int, int, bool, str) -> None
stream = cast(IO, WarningStream())
super(LoggingReporter, self).__init__(source, report_level, halt_level,
stream, debug, error_handler=error_handler)
@@ -352,7 +352,7 @@ class SphinxFileOutput(FileOutput):
super(SphinxFileOutput, self).__init__(**kwargs)
def write(self, data):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if (self.destination_path and self.autoclose and 'b' not in self.mode and
self.overwrite_if_changed and os.path.exists(self.destination_path)):
with open(self.destination_path, encoding=self.encoding) as f:
@@ -408,7 +408,7 @@ __document_cache__ = None # type: nodes.document
def new_document(source_path, settings=None):
- # type: (unicode, Any) -> nodes.document
+ # type: (str, Any) -> nodes.document
"""Return a new empty document object. This is an alternative of docutils'.
This is a simple wrapper for ``docutils.utils.new_document()``. It
diff --git a/sphinx/util/fileutil.py b/sphinx/util/fileutil.py
index df6bd051b..ecce46c76 100644
--- a/sphinx/util/fileutil.py
+++ b/sphinx/util/fileutil.py
@@ -22,11 +22,10 @@ if False:
from typing import Callable, Dict, Union # NOQA
from sphinx.util.matching import Matcher # NOQA
from sphinx.util.template import BaseRenderer # NOQA
- from sphinx.util.typing import unicode # NOQA
def copy_asset_file(source, destination, context=None, renderer=None):
- # type: (unicode, unicode, Dict, BaseRenderer) -> None
+ # type: (str, str, Dict, BaseRenderer) -> None
"""Copy an asset file to destination.
On copying, it expands the template variables if context argument is given and
@@ -59,7 +58,7 @@ def copy_asset_file(source, destination, context=None, renderer=None):
def copy_asset(source, destination, excluded=lambda path: False, context=None, renderer=None):
- # type: (unicode, unicode, Union[Callable[[unicode], bool], Matcher], Dict, BaseRenderer) -> None # NOQA
+ # type: (str, str, Union[Callable[[str], bool], Matcher], Dict, BaseRenderer) -> None
"""Copy asset files to destination recursively.
On copying, it expands the template variables if context argument is given and
diff --git a/sphinx/util/i18n.py b/sphinx/util/i18n.py
index fe8b66a2c..56591add8 100644
--- a/sphinx/util/i18n.py
+++ b/sphinx/util/i18n.py
@@ -34,7 +34,6 @@ if False:
# For type annotation
from typing import Callable, List, Set # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
LocaleFileInfoBase = namedtuple('CatalogInfo', 'base_dir,domain,charset')
@@ -43,22 +42,22 @@ class CatalogInfo(LocaleFileInfoBase):
@property
def po_file(self):
- # type: () -> unicode
+ # type: () -> str
return self.domain + '.po'
@property
def mo_file(self):
- # type: () -> unicode
+ # type: () -> str
return self.domain + '.mo'
@property
def po_path(self):
- # type: () -> unicode
+ # type: () -> str
return path.join(self.base_dir, self.po_file)
@property
def mo_path(self):
- # type: () -> unicode
+ # type: () -> str
return path.join(self.base_dir, self.mo_file)
def is_outdated(self):
@@ -68,7 +67,7 @@ class CatalogInfo(LocaleFileInfoBase):
path.getmtime(self.mo_path) < path.getmtime(self.po_path))
def write_mo(self, locale):
- # type: (unicode) -> None
+ # type: (str) -> None
with open(self.po_path, encoding=self.charset) as file_po:
try:
po = read_po(file_po, locale)
@@ -84,7 +83,7 @@ class CatalogInfo(LocaleFileInfoBase):
def find_catalog(docname, compaction):
- # type: (unicode, bool) -> unicode
+ # type: (str, bool) -> str
if compaction:
ret = docname.split(SEP, 1)[0]
else:
@@ -94,7 +93,7 @@ def find_catalog(docname, compaction):
def find_catalog_files(docname, srcdir, locale_dirs, lang, compaction):
- # type: (unicode, unicode, List[unicode], unicode, bool) -> List[unicode]
+ # type: (str, str, List[str], str, bool) -> List[str]
if not(lang and locale_dirs):
return []
@@ -108,7 +107,7 @@ def find_catalog_files(docname, srcdir, locale_dirs, lang, compaction):
def find_catalog_source_files(locale_dirs, locale, domains=None, gettext_compact=None,
charset='utf-8', force_all=False,
excluded=Matcher([])):
- # type: (List[unicode], unicode, List[unicode], bool, unicode, bool, Matcher) -> Set[CatalogInfo] # NOQA
+ # type: (List[str], str, List[str], bool, str, bool, Matcher) -> Set[CatalogInfo]
"""
:param list locale_dirs:
list of path as `['locale_dir1', 'locale_dir2', ...]` to find
@@ -199,7 +198,7 @@ date_format_re = re.compile('(%s)' % '|'.join(date_format_mappings))
def babel_format_date(date, format, locale, formatter=babel.dates.format_date):
- # type: (datetime, unicode, unicode, Callable) -> unicode
+ # type: (datetime, str, str, Callable) -> str
if locale is None:
locale = 'en'
@@ -220,7 +219,7 @@ def babel_format_date(date, format, locale, formatter=babel.dates.format_date):
def format_date(format, date=None, language=None):
- # type: (str, datetime, unicode) -> unicode
+ # type: (str, datetime, str) -> str
if date is None:
# If time is not specified, try to use $SOURCE_DATE_EPOCH variable
# See https://wiki.debian.org/ReproducibleBuilds/TimestampsProposal
@@ -255,7 +254,7 @@ def format_date(format, date=None, language=None):
def get_image_filename_for_language(filename, env):
- # type: (unicode, BuildEnvironment) -> unicode
+ # type: (str, BuildEnvironment) -> str
if not env.config.language:
return filename
@@ -275,7 +274,7 @@ def get_image_filename_for_language(filename, env):
def search_image_for_language(filename, env):
- # type: (unicode, BuildEnvironment) -> unicode
+ # type: (str, BuildEnvironment) -> str
if not env.config.language:
return filename
diff --git a/sphinx/util/images.py b/sphinx/util/images.py
index fe6f15046..5b3a27d77 100644
--- a/sphinx/util/images.py
+++ b/sphinx/util/images.py
@@ -34,7 +34,6 @@ except ImportError:
if False:
# For type annotation
from typing import Dict, IO, List, Tuple # NOQA
- from sphinx.util.typing import unicode # NOQA
mime_suffixes = OrderedDict([
('.gif', 'image/gif'),
@@ -43,7 +42,7 @@ mime_suffixes = OrderedDict([
('.pdf', 'application/pdf'),
('.svg', 'image/svg+xml'),
('.svgz', 'image/svg+xml'),
-]) # type: Dict[unicode, unicode]
+])
DataURI = NamedTuple('DataURI', [('mimetype', text_type),
('charset', text_type),
@@ -51,7 +50,7 @@ DataURI = NamedTuple('DataURI', [('mimetype', text_type),
def get_image_size(filename):
- # type: (unicode) -> Tuple[int, int]
+ # type: (str) -> Tuple[int, int]
try:
size = imagesize.get(filename)
if size[0] == -1:
@@ -71,7 +70,7 @@ def get_image_size(filename):
def guess_mimetype_for_stream(stream, default=None):
- # type: (IO, unicode) -> unicode
+ # type: (IO, str) -> str
imgtype = imghdr.what(stream) # type: ignore
if imgtype:
return 'image/' + imgtype
@@ -80,7 +79,7 @@ def guess_mimetype_for_stream(stream, default=None):
def guess_mimetype(filename='', content=None, default=None):
- # type: (unicode, bytes, unicode) -> unicode
+ # type: (str, bytes, str) -> str
_, ext = path.splitext(filename.lower())
if ext in mime_suffixes:
return mime_suffixes[ext]
@@ -96,7 +95,7 @@ def guess_mimetype(filename='', content=None, default=None):
def get_image_extension(mimetype):
- # type: (unicode) -> unicode
+ # type: (str) -> str
for ext, _mimetype in mime_suffixes.items():
if mimetype == _mimetype:
return ext
@@ -105,7 +104,7 @@ def get_image_extension(mimetype):
def parse_data_uri(uri):
- # type: (unicode) -> DataURI
+ # type: (str) -> DataURI
if not uri.startswith('data:'):
return None
@@ -127,7 +126,7 @@ def parse_data_uri(uri):
def test_svg(h, f):
- # type: (bytes, IO) -> unicode
+ # type: (bytes, IO) -> str
"""An additional imghdr library helper; test the header is SVG's or not."""
try:
if '<svg' in h.decode('utf-8').lower():
diff --git a/sphinx/util/inspect.py b/sphinx/util/inspect.py
index eeedf77ab..8a1181f1a 100644
--- a/sphinx/util/inspect.py
+++ b/sphinx/util/inspect.py
@@ -16,17 +16,17 @@ import inspect
import re
import sys
import typing
+import warnings
from functools import partial
+from io import StringIO
-from six import StringIO
-
+from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.util import logging
from sphinx.util.pycompat import NoneType
if False:
# For type annotation
from typing import Any, Callable, Mapping, List, Tuple, Type # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -129,17 +129,14 @@ def isclassmethod(obj):
"""Check if the object is classmethod."""
if isinstance(obj, classmethod):
return True
- elif inspect.ismethod(obj):
- if getattr(obj, 'im_self', None): # py2
- return True
- elif getattr(obj, '__self__', None): # py3
- return True
+ elif inspect.ismethod(obj) and obj.__self__ is not None:
+ return True
return False
def isstaticmethod(obj, cls=None, name=None):
- # type: (Any, Any, unicode) -> bool
+ # type: (Any, Any, str) -> bool
"""Check if the object is staticmethod."""
if isinstance(obj, staticmethod):
return True
@@ -180,7 +177,7 @@ def isbuiltin(obj):
def safe_getattr(obj, name, *defargs):
- # type: (Any, unicode, unicode) -> object
+ # type: (Any, str, str) -> object
"""A getattr() that turns all exceptions into AttributeErrors."""
try:
return getattr(obj, name, *defargs)
@@ -203,9 +200,9 @@ def safe_getattr(obj, name, *defargs):
def safe_getmembers(object, predicate=None, attr_getter=safe_getattr):
- # type: (Any, Callable[[unicode], bool], Callable) -> List[Tuple[unicode, Any]]
+ # type: (Any, Callable[[str], bool], Callable) -> List[Tuple[str, Any]]
"""A version of inspect.getmembers() that uses safe_getattr()."""
- results = [] # type: List[Tuple[unicode, Any]]
+ results = [] # type: List[Tuple[str, Any]]
for key in dir(object):
try:
value = attr_getter(object, key, None)
@@ -218,7 +215,7 @@ def safe_getmembers(object, predicate=None, attr_getter=safe_getattr):
def object_description(object):
- # type: (Any) -> unicode
+ # type: (Any) -> str
"""A repr() implementation that returns text safe to use in reST context."""
if isinstance(object, dict):
try:
@@ -256,7 +253,7 @@ def object_description(object):
def is_builtin_class_method(obj, attr_name):
- # type: (Any, unicode) -> bool
+ # type: (Any, str) -> bool
"""If attr_name is implemented at builtin class, return True.
>>> is_builtin_class_method(int, '__init__')
@@ -289,6 +286,9 @@ class Parameter:
self.default = default
self.annotation = self.empty
+ warnings.warn('sphinx.util.inspect.Parameter is deprecated.',
+ RemovedInSphinx30Warning, stacklevel=2)
+
class Signature:
"""The Signature object represents the call signature of a callable object and
@@ -360,7 +360,7 @@ class Signature:
return None
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
args = []
last_kind = None
for i, param in enumerate(self.parameters.values()):
@@ -577,7 +577,7 @@ class Signature:
def getdoc(obj, attrgetter=safe_getattr, allow_inherited=False):
- # type: (Any, Callable, bool) -> unicode
+ # type: (Any, Callable, bool) -> str
"""Get the docstring for the object.
This tries to obtain the docstring for some kind of objects additionally:
diff --git a/sphinx/util/inventory.py b/sphinx/util/inventory.py
index cd0555d9d..31daa3f37 100644
--- a/sphinx/util/inventory.py
+++ b/sphinx/util/inventory.py
@@ -21,7 +21,6 @@ if False:
from typing import Callable, Dict, IO, Iterator, Tuple # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import unicode # NOQA
Inventory = Dict[text_type, Dict[text_type, Tuple[text_type, text_type, text_type, text_type]]] # NOQA
@@ -50,7 +49,7 @@ class InventoryFileReader:
self.buffer += chunk
def readline(self):
- # type: () -> unicode
+ # type: () -> str
pos = self.buffer.find(b'\n')
if pos != -1:
line = self.buffer[:pos].decode('utf-8')
@@ -65,7 +64,7 @@ class InventoryFileReader:
return line
def readlines(self):
- # type: () -> Iterator[unicode]
+ # type: () -> Iterator[str]
while not self.eof:
line = self.readline()
if line:
@@ -81,7 +80,7 @@ class InventoryFileReader:
yield decompressor.flush()
def read_compressed_lines(self):
- # type: () -> Iterator[unicode]
+ # type: () -> Iterator[str]
buf = b''
for chunk in self.read_compressed_chunks():
buf += chunk
@@ -95,7 +94,7 @@ class InventoryFileReader:
class InventoryFile:
@classmethod
def load(cls, stream, uri, joinfunc):
- # type: (IO, unicode, Callable) -> Inventory
+ # type: (IO, str, Callable) -> Inventory
reader = InventoryFileReader(stream)
line = reader.readline().rstrip()
if line == '# Sphinx inventory version 1':
@@ -107,7 +106,7 @@ class InventoryFile:
@classmethod
def load_v1(cls, stream, uri, join):
- # type: (InventoryFileReader, unicode, Callable) -> Inventory
+ # type: (InventoryFileReader, str, Callable) -> Inventory
invdata = {} # type: Inventory
projname = stream.readline().rstrip()[11:]
version = stream.readline().rstrip()[11:]
@@ -126,7 +125,7 @@ class InventoryFile:
@classmethod
def load_v2(cls, stream, uri, join):
- # type: (InventoryFileReader, unicode, Callable) -> Inventory
+ # type: (InventoryFileReader, str, Callable) -> Inventory
invdata = {} # type: Inventory
projname = stream.readline().rstrip()[11:]
version = stream.readline().rstrip()[11:]
@@ -156,9 +155,9 @@ class InventoryFile:
@classmethod
def dump(cls, filename, env, builder):
- # type: (unicode, BuildEnvironment, Builder) -> None
+ # type: (str, BuildEnvironment, Builder) -> None
def escape(string):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return re.sub("\\s+", " ", string)
with open(os.path.join(filename), 'wb') as f:
diff --git a/sphinx/util/jsdump.py b/sphinx/util/jsdump.py
index 39ed3ea6b..52522f255 100644
--- a/sphinx/util/jsdump.py
+++ b/sphinx/util/jsdump.py
@@ -12,12 +12,9 @@
import re
-from six import integer_types
-
if False:
# For type annotation
from typing import Any, Dict, IO, List, Match, Union # NOQA
- from sphinx.util.typing import unicode # NOQA
_str_re = re.compile(r'"(\\\\|\\"|[^"])*"')
_int_re = re.compile(r'\d+')
@@ -42,7 +39,7 @@ ESCAPED = re.compile(r'\\u.{4}|\\.')
def encode_string(s):
# type: (str) -> str
def replace(match):
- # type: (Match) -> unicode
+ # type: (Match) -> str
s = match.group(0)
try:
return ESCAPE_DICT[s]
@@ -95,7 +92,7 @@ def dumps(obj, key=False):
return 'null'
elif obj is True or obj is False:
return obj and 'true' or 'false'
- elif isinstance(obj, integer_types + (float,)): # type: ignore
+ elif isinstance(obj, (int, float)):
return str(obj)
elif isinstance(obj, dict):
return '{%s}' % ','.join(sorted('%s:%s' % (
diff --git a/sphinx/util/jsonimpl.py b/sphinx/util/jsonimpl.py
index 557bc122f..ba47cf29f 100644
--- a/sphinx/util/jsonimpl.py
+++ b/sphinx/util/jsonimpl.py
@@ -10,20 +10,19 @@
"""
import json
+from collections import UserString
from six import text_type
-from six.moves import UserString
if False:
# For type annotation
from typing import Any, IO # NOQA
- from sphinx.util.typing import unicode # NOQA
class SphinxJSONEncoder(json.JSONEncoder):
"""JSONEncoder subclass that forces translation proxies."""
def default(self, obj):
- # type: (Any) -> unicode
+ # type: (Any) -> str
if isinstance(obj, UserString):
return text_type(obj)
return super(SphinxJSONEncoder, self).default(obj)
@@ -36,7 +35,7 @@ def dump(obj, fp, *args, **kwds):
def dumps(obj, *args, **kwds):
- # type: (Any, Any, Any) -> unicode
+ # type: (Any, Any, Any) -> str
kwds['cls'] = SphinxJSONEncoder
return json.dumps(obj, *args, **kwds)
diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py
index 4bde6aa9b..ad4978022 100644
--- a/sphinx/util/logging.py
+++ b/sphinx/util/logging.py
@@ -26,7 +26,6 @@ if False:
from typing import Any, Dict, Generator, IO, List, Tuple, Type, Union # NOQA
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
- from sphinx.util.typing import unicode # NOQA
NAMESPACE = 'sphinx'
@@ -50,12 +49,12 @@ VERBOSITY_MAP.update({
2: logging.DEBUG,
})
-COLOR_MAP = defaultdict(lambda: 'blue') # type: Dict[int, unicode]
-COLOR_MAP.update({
- logging.ERROR: 'darkred',
- logging.WARNING: 'red',
- logging.DEBUG: 'darkgray',
-})
+COLOR_MAP = defaultdict(lambda: 'blue',
+ {
+ logging.ERROR: 'darkred',
+ logging.WARNING: 'red',
+ logging.DEBUG: 'darkgray'
+ })
def getLogger(name):
@@ -125,7 +124,7 @@ class SphinxLoggerAdapter(logging.LoggerAdapter):
"""LoggerAdapter allowing ``type`` and ``subtype`` keywords."""
def log(self, level, msg, *args, **kwargs): # type: ignore
- # type: (Union[int, str], unicode, Any, Any) -> None
+ # type: (Union[int, str], str, Any, Any) -> None
if isinstance(level, int):
super(SphinxLoggerAdapter, self).log(level, msg, *args, **kwargs)
else:
@@ -133,11 +132,11 @@ class SphinxLoggerAdapter(logging.LoggerAdapter):
super(SphinxLoggerAdapter, self).log(levelno, msg, *args, **kwargs)
def verbose(self, msg, *args, **kwargs):
- # type: (unicode, Any, Any) -> None
+ # type: (str, Any, Any) -> None
self.log(VERBOSE, msg, *args, **kwargs)
def process(self, msg, kwargs): # type: ignore
- # type: (unicode, Dict) -> Tuple[unicode, Dict]
+ # type: (str, Dict) -> Tuple[str, Dict]
extra = kwargs.setdefault('extra', {})
if 'type' in kwargs:
extra['type'] = kwargs.pop('type')
@@ -290,7 +289,7 @@ def skip_warningiserror(skip=True):
@contextmanager
def prefixed_warnings(prefix):
- # type: (unicode) -> Generator
+ # type: (str) -> Generator
"""Prepend prefix to all records for a while.
For example::
@@ -361,7 +360,7 @@ class InfoFilter(logging.Filter):
def is_suppressed_warning(type, subtype, suppress_warnings):
- # type: (unicode, unicode, List[unicode]) -> bool
+ # type: (str, str, List[str]) -> bool
"""Check the warning is suppressed or not."""
if type is None:
return False
@@ -447,7 +446,7 @@ class MessagePrefixFilter(logging.Filter):
"""Prepend prefix to all records."""
def __init__(self, prefix):
- # type: (unicode) -> None
+ # type: (str) -> None
self.prefix = prefix
super(MessagePrefixFilter, self).__init__()
@@ -539,7 +538,7 @@ class SafeEncodingWriter:
self.encoding = getattr(stream, 'encoding', 'ascii') or 'ascii'
def write(self, data):
- # type: (unicode) -> None
+ # type: (str) -> None
try:
self.stream.write(data)
except UnicodeEncodeError:
@@ -560,7 +559,7 @@ class LastMessagesWriter:
self.app = app
def write(self, data):
- # type: (unicode) -> None
+ # type: (str) -> None
self.app.messagelog.append(data)
diff --git a/sphinx/util/matching.py b/sphinx/util/matching.py
index 7dd8100b4..3762fa89e 100644
--- a/sphinx/util/matching.py
+++ b/sphinx/util/matching.py
@@ -14,18 +14,17 @@ import re
if False:
# For type annotation
from typing import Callable, Dict, List, Match, Pattern # NOQA
- from sphinx.util.typing import unicode # NOQA
def _translate_pattern(pat):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Translate a shell-style glob pattern to a regular expression.
Adapted from the fnmatch module, but enhanced so that single stars don't
match slashes.
"""
i, n = 0, len(pat)
- res = '' # type: unicode
+ res = '' # type: str
while i < n:
c = pat[i]
i += 1
@@ -65,7 +64,7 @@ def _translate_pattern(pat):
def compile_matchers(patterns):
- # type: (List[unicode]) -> List[Callable[[unicode], Match[unicode]]]
+ # type: (List[str]) -> List[Callable[[str], Match[str]]]
return [re.compile(_translate_pattern(pat)).match for pat in patterns]
@@ -77,27 +76,27 @@ class Matcher:
"""
def __init__(self, patterns):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
expanded = [pat[3:] for pat in patterns if pat.startswith('**/')]
self.patterns = compile_matchers(patterns + expanded)
def __call__(self, string):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return self.match(string)
def match(self, string):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return any(pat(string) for pat in self.patterns)
DOTFILES = Matcher(['**/.*'])
-_pat_cache = {} # type: Dict[unicode, Pattern]
+_pat_cache = {} # type: Dict[str, Pattern]
def patmatch(name, pat):
- # type: (unicode, unicode) -> Match[unicode]
+ # type: (str, str) -> Match[str]
"""Return if name matches pat. Adapted from fnmatch module."""
if pat not in _pat_cache:
_pat_cache[pat] = re.compile(_translate_pattern(pat))
@@ -105,7 +104,7 @@ def patmatch(name, pat):
def patfilter(names, pat):
- # type: (List[unicode], unicode) -> List[unicode]
+ # type: (List[str], str) -> List[str]
"""Return the subset of the list NAMES that match PAT.
Adapted from fnmatch module.
diff --git a/sphinx/util/math.py b/sphinx/util/math.py
index 7057757a5..d442781e7 100644
--- a/sphinx/util/math.py
+++ b/sphinx/util/math.py
@@ -14,11 +14,10 @@ if False:
# For type annotation
from docutils import nodes # NOQA
from sphinx.builders.html import HTMLTranslator # NOQA
- from sphinx.util.typing import unicode # NOQA
def get_node_equation_number(writer, node):
- # type: (HTMLTranslator, nodes.math_block) -> unicode
+ # type: (HTMLTranslator, nodes.math_block) -> str
if writer.builder.config.math_numfig and writer.builder.config.numfig:
figtype = 'displaymath'
if writer.builder.name == 'singlehtml':
@@ -34,9 +33,9 @@ def get_node_equation_number(writer, node):
def wrap_displaymath(text, label, numbering):
- # type: (unicode, unicode, bool) -> unicode
+ # type: (str, str, bool) -> str
def is_equation(part):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return part.strip()
if label is None:
diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py
index 39b7cbbe0..e01c1490f 100644
--- a/sphinx/util/nodes.py
+++ b/sphinx/util/nodes.py
@@ -27,7 +27,6 @@ if False:
from docutils.statemachine import StringList # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.utils.tags import Tags # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -104,7 +103,7 @@ def get_full_module_name(node):
def repr_domxml(node, length=80):
- # type: (nodes.Node, Optional[int]) -> unicode
+ # type: (nodes.Node, Optional[int]) -> str
"""
return DOM XML representation of the specified node like:
'<paragraph translatable="False"><inline classes="versionmodified">New in version...'
@@ -254,7 +253,7 @@ META_TYPE_NODES = (
def extract_messages(doctree):
- # type: (nodes.Element) -> Iterable[Tuple[nodes.Element, unicode]]
+ # type: (nodes.Element) -> Iterable[Tuple[nodes.Element, str]]
"""Extract translatable messages from a document tree."""
for node in doctree.traverse(is_translatable): # type: nodes.Element
if isinstance(node, addnodes.translatable):
@@ -282,7 +281,7 @@ def extract_messages(doctree):
def find_source_node(node):
- # type: (nodes.Element) -> unicode
+ # type: (nodes.Element) -> str
for pnode in traverse_parent(node):
if pnode.source:
return pnode.source
@@ -298,7 +297,7 @@ def traverse_parent(node, cls=None):
def traverse_translatable_index(doctree):
- # type: (nodes.Element) -> Iterable[Tuple[nodes.Element, List[unicode]]]
+ # type: (nodes.Element) -> Iterable[Tuple[nodes.Element, List[str]]]
"""Traverse translatable index node from a document tree."""
for node in doctree.traverse(NodeMatcher(addnodes.index, inline=False)): # type: addnodes.index # NOQA
if 'raw_entries' in node:
@@ -309,7 +308,7 @@ def traverse_translatable_index(doctree):
def nested_parse_with_titles(state, content, node):
- # type: (Any, StringList, nodes.Node) -> unicode
+ # type: (Any, StringList, nodes.Node) -> str
"""Version of state.nested_parse() that allows titles and does not require
titles to have the same decoration as the calling document.
@@ -329,7 +328,7 @@ def nested_parse_with_titles(state, content, node):
def clean_astext(node):
- # type: (nodes.Element) -> unicode
+ # type: (nodes.Element) -> str
"""Like node.astext(), but ignore images."""
node = node.deepcopy()
for img in node.traverse(nodes.image):
@@ -340,7 +339,7 @@ def clean_astext(node):
def split_explicit_title(text):
- # type: (unicode) -> Tuple[bool, unicode, unicode]
+ # type: (str) -> Tuple[bool, str, str]
"""Split role content into title and target, if given."""
match = explicit_title_re.match(text)
if match:
@@ -354,10 +353,10 @@ indextypes = [
def process_index_entry(entry, targetid):
- # type: (unicode, unicode) -> List[Tuple[unicode, unicode, unicode, unicode, unicode]]
+ # type: (str, str) -> List[Tuple[str, str, str, str, str]]
from sphinx.domains.python import pairindextypes
- indexentries = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode]]
+ indexentries = [] # type: List[Tuple[str, str, str, str, str]]
entry = entry.strip()
oentry = entry
main = ''
@@ -393,7 +392,7 @@ def process_index_entry(entry, targetid):
def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc, traversed):
- # type: (Builder, Set[unicode], unicode, nodes.document, Callable, List[unicode]) -> nodes.document # NOQA
+ # type: (Builder, Set[str], str, nodes.document, Callable, List[str]) -> nodes.document
"""Inline all toctrees in the *tree*.
Record all docnames in *docnameset*, and output docnames with *colorfunc*.
@@ -426,7 +425,7 @@ def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc, traversed
def make_refnode(builder, fromdocname, todocname, targetid, child, title=None):
- # type: (Builder, unicode, unicode, unicode, nodes.Node, unicode) -> nodes.reference
+ # type: (Builder, str, str, str, nodes.Node, str) -> nodes.reference
"""Shortcut to create a reference node."""
node = nodes.reference('', '', internal=True)
if fromdocname == todocname and targetid:
diff --git a/sphinx/util/osutil.py b/sphinx/util/osutil.py
index 7044cead3..13d4a877d 100644
--- a/sphinx/util/osutil.py
+++ b/sphinx/util/osutil.py
@@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import contextlib
import errno
@@ -29,7 +28,6 @@ from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warnin
if False:
# For type annotation
from typing import Any, Iterator, List, Tuple, Union # NOQA
- from sphinx.util.typing import unicode # NOQA
# Errnos that we need.
EEXIST = getattr(errno, 'EEXIST', 0)
@@ -46,18 +44,18 @@ SEP = "/"
def os_path(canonicalpath):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return canonicalpath.replace(SEP, path.sep)
def canon_path(nativepath):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Return path in OS-independent form"""
return nativepath.replace(path.sep, SEP)
def relative_uri(base, to):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Return a relative URL from ``base`` to ``to``."""
if to.startswith(SEP):
return to
@@ -81,13 +79,13 @@ def relative_uri(base, to):
def ensuredir(path):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Ensure that a path exists."""
os.makedirs(path, exist_ok=True)
def walk(top, topdown=True, followlinks=False):
- # type: (unicode, bool, bool) -> Iterator[Tuple[unicode, List[unicode], List[unicode]]]
+ # type: (str, bool, bool) -> Iterator[Tuple[str, List[str], List[str]]]
warnings.warn('sphinx.util.osutil.walk() is deprecated for removal. '
'Please use os.walk() instead.',
RemovedInSphinx40Warning)
@@ -95,7 +93,7 @@ def walk(top, topdown=True, followlinks=False):
def mtimes_of_files(dirnames, suffix):
- # type: (List[unicode], unicode) -> Iterator[float]
+ # type: (List[str], str) -> Iterator[float]
for dirname in dirnames:
for root, dirs, files in os.walk(dirname):
for sfile in files:
@@ -107,7 +105,7 @@ def mtimes_of_files(dirnames, suffix):
def movefile(source, dest):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Move a file, removing the destination if it exists."""
if os.path.exists(dest):
try:
@@ -118,7 +116,7 @@ def movefile(source, dest):
def copytimes(source, dest):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Copy a file's modification times."""
st = os.stat(source)
if hasattr(os, 'utime'):
@@ -126,7 +124,7 @@ def copytimes(source, dest):
def copyfile(source, dest):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Copy a file and its modification times, if possible.
Note: ``copyfile`` skips copying if the file has not been changed"""
@@ -144,17 +142,17 @@ project_suffix_re = re.compile(' Documentation$')
def make_filename(string):
- # type: (str) -> unicode
+ # type: (str) -> str
return no_fn_re.sub('', string) or 'sphinx'
def make_filename_from_project(project):
- # type: (str) -> unicode
+ # type: (str) -> str
return make_filename(project_suffix_re.sub('', project)).lower()
def ustrftime(format, *args):
- # type: (unicode, Any) -> unicode
+ # type: (str, Any) -> str
"""[DEPRECATED] strftime for unicode strings."""
warnings.warn('sphinx.util.osutil.ustrtime is deprecated for removal',
RemovedInSphinx30Warning, stacklevel=2)
@@ -176,7 +174,7 @@ def ustrftime(format, *args):
def relpath(path, start=os.curdir):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Return a relative filepath to *path* either from the current directory or
from an optional *start* directory.
@@ -190,11 +188,11 @@ def relpath(path, start=os.curdir):
safe_relpath = relpath # for compatibility
-fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() # type: unicode
+fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
def abspath(pathdir):
- # type: (unicode) -> unicode
+ # type: (str) -> str
pathdir = path.abspath(pathdir)
if isinstance(pathdir, bytes):
try:
@@ -207,7 +205,7 @@ def abspath(pathdir):
def getcwd():
- # type: () -> unicode
+ # type: () -> str
warnings.warn('sphinx.util.osutil.getcwd() is deprecated. '
'Please use os.getcwd() instead.',
RemovedInSphinx40Warning)
@@ -216,7 +214,7 @@ def getcwd():
@contextlib.contextmanager
def cd(target_dir):
- # type: (unicode) -> Iterator[None]
+ # type: (str) -> Iterator[None]
cwd = os.getcwd()
try:
os.chdir(target_dir)
@@ -238,12 +236,12 @@ class FileAvoidWrite:
Objects can be used as context managers.
"""
def __init__(self, path):
- # type: (unicode) -> None
+ # type: (str) -> None
self._path = path
self._io = None # type: Union[StringIO, BytesIO]
def write(self, data):
- # type: (Union[str, unicode]) -> None
+ # type: (Union[str, str]) -> None
if not self._io:
if isinstance(data, text_type):
self._io = StringIO()
@@ -285,7 +283,7 @@ class FileAvoidWrite:
return self
def __exit__(self, type, value, traceback):
- # type: (unicode, unicode, unicode) -> None
+ # type: (str, str, str) -> None
self.close()
def __getattr__(self, name):
@@ -299,7 +297,7 @@ class FileAvoidWrite:
def rmtree(path):
- # type: (unicode) -> None
+ # type: (str) -> None
if os.path.isdir(path):
shutil.rmtree(path)
else:
diff --git a/sphinx/util/parallel.py b/sphinx/util/parallel.py
index 2c670b13c..d847fa163 100644
--- a/sphinx/util/parallel.py
+++ b/sphinx/util/parallel.py
@@ -26,7 +26,6 @@ from sphinx.util import logging
if False:
# For type annotation
from typing import Any, Callable, Dict, List, Sequence # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -136,7 +135,7 @@ class ParallelTasks:
def make_chunks(arguments, nproc, maxbatch=10):
- # type: (Sequence[unicode], int, int) -> List[Any]
+ # type: (Sequence[str], int, int) -> List[Any]
# determine how many documents to read in one go
nargs = len(arguments)
chunksize = nargs // nproc
diff --git a/sphinx/util/png.py b/sphinx/util/png.py
index 6d1d5ea21..374d876a0 100644
--- a/sphinx/util/png.py
+++ b/sphinx/util/png.py
@@ -12,10 +12,6 @@
import binascii
import struct
-if False:
- # For type annotation
- from sphinx.util.typing import unicode # NOQA
-
LEN_IEND = 12
LEN_DEPTH = 22
@@ -26,7 +22,7 @@ IEND_CHUNK = b'\x00\x00\x00\x00IEND\xAE\x42\x60\x82'
def read_png_depth(filename):
- # type: (unicode) -> int
+ # type: (str) -> int
"""Read the special tEXt chunk indicating the depth from a PNG file."""
with open(filename, 'rb') as f:
f.seek(- (LEN_IEND + LEN_DEPTH), 2)
@@ -39,7 +35,7 @@ def read_png_depth(filename):
def write_png_depth(filename, depth):
- # type: (unicode, int) -> None
+ # type: (str, int) -> None
"""Write the special tEXt chunk indicating the depth to a PNG file.
The chunk is placed immediately before the special IEND chunk.
diff --git a/sphinx/util/pycompat.py b/sphinx/util/pycompat.py
index c31044c4e..4190f44ed 100644
--- a/sphinx/util/pycompat.py
+++ b/sphinx/util/pycompat.py
@@ -22,7 +22,6 @@ from sphinx.util import logging
if False:
# For type annotation
from typing import Any, Callable, Generator # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -44,14 +43,14 @@ sys_encoding = sys.getdefaultencoding()
# terminal_safe(): safely encode a string for printing to the terminal
def terminal_safe(s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return s.encode('ascii', 'backslashreplace').decode('ascii')
# convert_with_2to3():
# support for running 2to3 over config files
def convert_with_2to3(filepath):
- # type: (unicode) -> unicode
+ # type: (str) -> str
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
from lib2to3.pgen2.parse import ParseError
fixers = get_fixers_from_package('lib2to3.fixes')
@@ -76,7 +75,7 @@ class UnicodeMixin:
def execfile_(filepath, _globals, open=open):
- # type: (unicode, Any, Callable) -> None
+ # type: (str, Any, Callable) -> None
from sphinx.util.osutil import fs_encoding
with open(filepath, 'rb') as f:
source = f.read()
diff --git a/sphinx/util/requests.py b/sphinx/util/requests.py
index c19e05cec..3a3c2b6cb 100644
--- a/sphinx/util/requests.py
+++ b/sphinx/util/requests.py
@@ -47,7 +47,7 @@ except ImportError:
# try to load requests[security] (but only if SSL is available)
try:
- import ssl
+ import ssl # NOQA
except ImportError:
pass
else:
@@ -55,17 +55,7 @@ else:
pkg_resources.require(['requests[security]'])
except (pkg_resources.DistributionNotFound,
pkg_resources.VersionConflict):
- if not getattr(ssl, 'HAS_SNI', False):
- # don't complain on each url processed about the SSL issue
- if InsecurePlatformWarning:
- requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
- warnings.warn(
- 'Some links may return broken results due to being unable to '
- 'check the Server Name Indication (SNI) in the returned SSL cert '
- 'against the hostname in the url requested. Recommended to '
- 'install "requests[security]" as a dependency or upgrade to '
- 'a python version with SNI support (Python 3 and Python 2.7.9+).'
- )
+ pass # ignored
except pkg_resources.UnknownExtra:
warnings.warn(
'Some links may return broken results due to being unable to '
@@ -78,7 +68,6 @@ if False:
# For type annotation
from typing import Any, Generator, Union # NOQA
from sphinx.config import Config # NOQA
- from sphinx.util.typing import unicode # NOQA
useragent_header = [('User-Agent',
'Mozilla/5.0 (X11; Linux x86_64; rv:25.0) Gecko/20100101 Firefox/25.0')]
@@ -108,7 +97,7 @@ def ignore_insecure_warning(**kwargs):
def _get_tls_cacert(url, config):
- # type: (unicode, Config) -> Union[str, bool]
+ # type: (str, Config) -> Union[str, bool]
"""Get additional CA cert for a specific URL.
This also returns ``False`` if verification is disabled.
@@ -131,7 +120,7 @@ def _get_tls_cacert(url, config):
def get(url, **kwargs):
- # type: (unicode, Any) -> requests.Response
+ # type: (str, Any) -> requests.Response
"""Sends a GET request like requests.get().
This sets up User-Agent header and TLS verification automatically."""
@@ -145,7 +134,7 @@ def get(url, **kwargs):
def head(url, **kwargs):
- # type: (unicode, Any) -> requests.Response
+ # type: (str, Any) -> requests.Response
"""Sends a HEAD request like requests.head().
This sets up User-Agent header and TLS verification automatically."""
diff --git a/sphinx/util/rst.py b/sphinx/util/rst.py
index 1ad7bc468..097e93c74 100644
--- a/sphinx/util/rst.py
+++ b/sphinx/util/rst.py
@@ -25,7 +25,6 @@ if False:
# For type annotation
from typing import Generator # NOQA
from docutils.statemachine import StringList # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -34,7 +33,7 @@ symbols_re = re.compile(r'([!-\-/:-@\[-`{-~])') # symbols without dot(0x2e)
def escape(text):
- # type: (unicode) -> unicode
+ # type: (str) -> str
text = symbols_re.sub(r'\\\1', text)
text = re.sub(r'^\.', r'\.', text) # escape a dot at top
return text
@@ -42,7 +41,7 @@ def escape(text):
@contextmanager
def default_role(docname, name):
- # type: (unicode, unicode) -> Generator
+ # type: (str, str) -> Generator
if name:
dummy_reporter = Reporter('', 4, 4)
role_fn, _ = roles.role(name, english, 0, dummy_reporter)
@@ -57,7 +56,7 @@ def default_role(docname, name):
def prepend_prolog(content, prolog):
- # type: (StringList, unicode) -> None
+ # type: (StringList, str) -> None
"""Prepend a string to content body as prolog."""
if prolog:
pos = 0
@@ -80,7 +79,7 @@ def prepend_prolog(content, prolog):
def append_epilog(content, epilog):
- # type: (StringList, unicode) -> None
+ # type: (StringList, str) -> None
"""Append a string to content body as epilog."""
if epilog:
content.append('', '<generated>', 0)
diff --git a/sphinx/util/smartypants.py b/sphinx/util/smartypants.py
index 6bb5b2464..f62b468d6 100644
--- a/sphinx/util/smartypants.py
+++ b/sphinx/util/smartypants.py
@@ -35,7 +35,6 @@ from sphinx.util.docutils import __version_info__ as docutils_version
if False: # For type annotation
from typing import Generator, Iterable, Tuple # NOQA
- from sphinx.util.typing import unicode # NOQA
langquotes = {'af': u'“”‘’',
@@ -129,7 +128,7 @@ langquotes = {'af': u'“”‘’',
def educateQuotes(text, language='en'):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""
Parameter: - text string (unicode or bytes).
- language (`BCP 47` language tag.)
@@ -244,7 +243,7 @@ def educateQuotes(text, language='en'):
def educate_tokens(text_tokens, attr=smartquotes.default_smartypants_attr, language='en'):
- # type: (Iterable[Tuple[str, unicode]], unicode, unicode) -> Generator[unicode, None, None]
+ # type: (Iterable[Tuple[str, str]], str, str) -> Generator[str, None, None]
"""Return iterator that "educates" the items of `text_tokens`.
This is modified to intercept the ``attr='2'`` as it was used by the
diff --git a/sphinx/util/stemmer/__init__.py b/sphinx/util/stemmer/__init__.py
index 3f85c7726..04b37709c 100644
--- a/sphinx/util/stemmer/__init__.py
+++ b/sphinx/util/stemmer/__init__.py
@@ -11,10 +11,6 @@
from sphinx.util.stemmer.porter import PorterStemmer
-if False:
- # For type annotation
- from sphinx.util.typing import unicode # NOQA
-
try:
from Stemmer import Stemmer as _PyStemmer
PYSTEMMER = True
@@ -24,7 +20,7 @@ except ImportError:
class BaseStemmer:
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
raise NotImplementedError()
@@ -34,7 +30,7 @@ class PyStemmer(BaseStemmer):
self.stemmer = _PyStemmer('porter')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word)
@@ -43,7 +39,7 @@ class StandardStemmer(PorterStemmer, BaseStemmer): # type: ignore
make at least the stem method nicer.
"""
def stem(self, word): # type: ignore
- # type: (unicode) -> unicode
+ # type: (str) -> str
return super(StandardStemmer, self).stem(word, 0, len(word) - 1)
diff --git a/sphinx/util/stemmer/porter.py b/sphinx/util/stemmer/porter.py
index 964b09a12..0c8718799 100644
--- a/sphinx/util/stemmer/porter.py
+++ b/sphinx/util/stemmer/porter.py
@@ -28,10 +28,6 @@
:license: Public Domain ("can be used free of charge for any purpose").
"""
-if False:
- # For type annotation
- from sphinx.util.typing import unicode # NOQA
-
class PorterStemmer:
@@ -47,8 +43,7 @@ class PorterStemmer:
should be done before stem(...) is called.
"""
- self.b = "" # type: unicode
- # buffer for word to be stemmed
+ self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string
@@ -140,7 +135,7 @@ class PorterStemmer:
return 1
def ends(self, s):
- # type: (unicode) -> int
+ # type: (str) -> int
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
@@ -153,7 +148,7 @@ class PorterStemmer:
return 1
def setto(self, s):
- # type: (unicode) -> None
+ # type: (str) -> None
"""setto(s) sets (j+1),...k to the characters in the string s,
readjusting k."""
length = len(s)
@@ -161,7 +156,7 @@ class PorterStemmer:
self.k = self.j + length
def r(self, s):
- # type: (unicode) -> None
+ # type: (str) -> None
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
@@ -402,7 +397,7 @@ class PorterStemmer:
self.k = self.k - 1
def stem(self, p, i, j):
- # type: (unicode, int, int) -> unicode
+ # type: (str, int, int) -> str
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
diff --git a/sphinx/util/tags.py b/sphinx/util/tags.py
index 47295090d..c85dad8c0 100644
--- a/sphinx/util/tags.py
+++ b/sphinx/util/tags.py
@@ -17,7 +17,6 @@ env = Environment()
if False:
# For type annotation
from typing import Iterator, List # NOQA
- from sphinx.util.typing import unicode # NOQA
class BooleanParser(Parser):
@@ -49,29 +48,29 @@ class BooleanParser(Parser):
class Tags:
def __init__(self, tags=None):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
self.tags = dict.fromkeys(tags or [], True)
def has(self, tag):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return tag in self.tags
__contains__ = has
def __iter__(self):
- # type: () -> Iterator[unicode]
+ # type: () -> Iterator[str]
return iter(self.tags)
def add(self, tag):
- # type: (unicode) -> None
+ # type: (str) -> None
self.tags[tag] = True
def remove(self, tag):
- # type: (unicode) -> None
+ # type: (str) -> None
self.tags.pop(tag, None)
def eval_condition(self, condition):
- # type: (unicode) -> bool
+ # type: (str) -> bool
# exceptions are handled by the caller
parser = BooleanParser(env, condition, state='variable')
expr = parser.parse_expression()
diff --git a/sphinx/util/template.py b/sphinx/util/template.py
index 4e078f3c4..49afc2df2 100644
--- a/sphinx/util/template.py
+++ b/sphinx/util/template.py
@@ -22,7 +22,6 @@ if False:
# For type annotation
from typing import Dict # NOQA
from jinja2.loaders import BaseLoader # NOQA
- from sphinx.util.typing import unicode # NOQA
class BaseRenderer:
@@ -33,23 +32,23 @@ class BaseRenderer:
self.env.install_gettext_translations(get_translator()) # type: ignore
def render(self, template_name, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
return self.env.get_template(template_name).render(context)
def render_string(self, source, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
return self.env.from_string(source).render(context)
class FileRenderer(BaseRenderer):
def __init__(self, search_path):
- # type: (unicode) -> None
+ # type: (str) -> None
loader = SphinxFileSystemLoader(search_path)
super(FileRenderer, self).__init__(loader)
@classmethod
def render_from_file(cls, filename, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
return cls(dirname).render(basename, context)
@@ -57,14 +56,14 @@ class FileRenderer(BaseRenderer):
class SphinxRenderer(FileRenderer):
def __init__(self, template_path=None):
- # type: (unicode) -> None
+ # type: (str) -> None
if template_path is None:
template_path = os.path.join(package_dir, 'templates')
super(SphinxRenderer, self).__init__(template_path)
@classmethod
def render_from_file(cls, filename, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
return FileRenderer.render_from_file(filename, context)
diff --git a/sphinx/util/texescape.py b/sphinx/util/texescape.py
index 66b66a7f0..4d5b9b136 100644
--- a/sphinx/util/texescape.py
+++ b/sphinx/util/texescape.py
@@ -14,7 +14,6 @@ from __future__ import unicode_literals
if False:
# For type annotation
from typing import Dict # NOQA
- from sphinx.util.typing import unicode # NOQA
tex_replacements = [
# map TeX special chars
@@ -74,13 +73,13 @@ tex_replacements = [
# OHM SIGN U+2126 is handled by LaTeX textcomp package
]
-tex_escape_map = {} # type: Dict[int, unicode]
+tex_escape_map = {} # type: Dict[int, str]
tex_replace_map = {}
tex_hl_escape_map_new = {}
def escape(s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Escape text for LaTeX output."""
return s.translate(tex_escape_map)
diff --git a/sphinx/util/typing.py b/sphinx/util/typing.py
index 72ef77742..12b7a2642 100644
--- a/sphinx/util/typing.py
+++ b/sphinx/util/typing.py
@@ -13,16 +13,9 @@ from typing import Any, Callable, Dict, List, Tuple, Union
from docutils import nodes
from docutils.parsers.rst.states import Inliner
-from six import PY2, text_type
+from six import text_type
-# a typedef for unicode to make migration to mypy-py3 mode easy
-# Note: It will be removed after migrated (soon).
-if PY2:
- unicode = text_type
-else:
- unicode = str
-
# An entry of Directive.option_spec
DirectiveOption = Callable[[str], Any]
diff --git a/sphinx/versioning.py b/sphinx/versioning.py
index 409dc5e3a..2c6569932 100644
--- a/sphinx/versioning.py
+++ b/sphinx/versioning.py
@@ -11,13 +11,11 @@
"""
import pickle
import warnings
-from itertools import product
+from itertools import product, zip_longest
from operator import itemgetter
from os import path
from uuid import uuid4
-from six.moves import zip_longest
-
from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.transforms import SphinxTransform
@@ -25,7 +23,6 @@ if False:
# For type annotation
from typing import Any, Iterator # NOQA
from docutils import nodes # NOQA
- from sphinx.util.typing import unicode # NOQA
try:
import Levenshtein
@@ -122,7 +119,7 @@ def merge_doctrees(old, new, condition):
def get_ratio(old, new):
- # type: (unicode, unicode) -> float
+ # type: (str, str) -> float
"""Return a "similiarity ratio" (in percent) representing the similarity
between the two strings where 0 is equal and anything above less than equal.
"""
@@ -136,7 +133,7 @@ def get_ratio(old, new):
def levenshtein_distance(a, b):
- # type: (unicode, unicode) -> int
+ # type: (str, str) -> int
"""Return the Levenshtein edit distance between two strings *a* and *b*."""
if a == b:
return 0
diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py
index e2e0797c6..80a06cc26 100644
--- a/sphinx/writers/html.py
+++ b/sphinx/writers/html.py
@@ -30,7 +30,6 @@ if False:
# For type annotation
from typing import Any # NOQA
from sphinx.builders.html import StandaloneHTMLBuilder # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -287,7 +286,7 @@ class HTMLTranslator(SphinxTranslator, BaseTranslator):
# overwritten
def visit_admonition(self, node, name=''):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
self.body.append(self.starttag(
node, 'div', CLASS=('admonition ' + name)))
if name:
@@ -325,7 +324,7 @@ class HTMLTranslator(SphinxTranslator, BaseTranslator):
def add_fignumber(self, node):
# type: (nodes.Element) -> None
def append_fignumber(figtype, figure_id):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
if self.builder.name == 'singlehtml':
key = u"%s/%s" % (self.docnames[-1], figtype)
else:
@@ -351,7 +350,7 @@ class HTMLTranslator(SphinxTranslator, BaseTranslator):
append_fignumber(figtype, node['ids'][0])
def add_permalink_ref(self, node, title):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
if node['ids'] and self.permalink_text and self.builder.add_permalinks:
format = u'<a class="headerlink" href="#%s" title="%s">%s</a>'
self.body.append(format % (node['ids'][0], title, self.permalink_text))
@@ -870,26 +869,26 @@ class HTMLTranslator(SphinxTranslator, BaseTranslator):
self.context[-1] = self.context[-1].replace('&nbsp;', '&#160;')
def visit_math(self, node, math_env=''):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_inline_math_renderers[name]
visit(self, node)
def depart_math(self, node, math_env=''):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_inline_math_renderers[name]
if depart:
depart(self, node)
def visit_math_block(self, node, math_env=''):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_block_math_renderers[name]
visit(self, node)
def depart_math_block(self, node, math_env=''):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_block_math_renderers[name]
if depart:
@@ -903,21 +902,21 @@ class HTMLTranslator(SphinxTranslator, BaseTranslator):
@property
def highlightlang(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('HTMLTranslator.highlightlang is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_language
@property
def highlightlang_base(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('HTMLTranslator.highlightlang_base is deprecated.',
RemovedInSphinx30Warning)
return self.builder.config.highlight_language
@property
def highlightopts(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('HTMLTranslator.highlightopts is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_options
diff --git a/sphinx/writers/html5.py b/sphinx/writers/html5.py
index 3c1f54965..7385c14d6 100644
--- a/sphinx/writers/html5.py
+++ b/sphinx/writers/html5.py
@@ -29,7 +29,6 @@ if False:
# For type annotation
from typing import Any # NOQA
from sphinx.builders.html import StandaloneHTMLBuilder # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -256,7 +255,7 @@ class HTML5Translator(SphinxTranslator, BaseTranslator):
# overwritten
def visit_admonition(self, node, name=''):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
self.body.append(self.starttag(
node, 'div', CLASS=('admonition ' + name)))
if name:
@@ -293,7 +292,7 @@ class HTML5Translator(SphinxTranslator, BaseTranslator):
def add_fignumber(self, node):
# type: (nodes.Element) -> None
def append_fignumber(figtype, figure_id):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
if self.builder.name == 'singlehtml':
key = u"%s/%s" % (self.docnames[-1], figtype)
else:
@@ -319,7 +318,7 @@ class HTML5Translator(SphinxTranslator, BaseTranslator):
append_fignumber(figtype, node['ids'][0])
def add_permalink_ref(self, node, title):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
if node['ids'] and self.permalink_text and self.builder.add_permalinks:
format = u'<a class="headerlink" href="#%s" title="%s">%s</a>'
self.body.append(format % (node['ids'][0], title, self.permalink_text))
@@ -807,26 +806,26 @@ class HTML5Translator(SphinxTranslator, BaseTranslator):
node['classes'].append('field-odd')
def visit_math(self, node, math_env=''):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_inline_math_renderers[name]
visit(self, node)
def depart_math(self, node, math_env=''):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_inline_math_renderers[name]
if depart:
depart(self, node)
def visit_math_block(self, node, math_env=''):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_block_math_renderers[name]
visit(self, node)
def depart_math_block(self, node, math_env=''):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_block_math_renderers[name]
if depart:
@@ -840,21 +839,21 @@ class HTML5Translator(SphinxTranslator, BaseTranslator):
@property
def highlightlang(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('HTMLTranslator.highlightlang is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_language
@property
def highlightlang_base(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('HTMLTranslator.highlightlang_base is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_language
@property
def highlightopts(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('HTMLTranslator.highlightopts is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_options
diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py
index a4cd94fe9..065e0bf5a 100644
--- a/sphinx/writers/latex.py
+++ b/sphinx/writers/latex.py
@@ -50,7 +50,6 @@ if False:
captioned_literal_block, footnotemark, footnotetext, math_reference, thebibliography
)
from sphinx.domains import IndexEntry # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -71,7 +70,7 @@ ENUMERATE_LIST_STYLE = defaultdict(lambda: r'\arabic',
'upperalpha': r'\Alph',
'lowerroman': r'\roman',
'upperroman': r'\Roman',
- }) # type: Dict[unicode, unicode]
+ })
PDFLATEX_DEFAULT_FONTPKG = r'''
\usepackage{times}
\expandafter\ifx\csname T@LGR\endcsname\relax
@@ -173,7 +172,7 @@ DEFAULT_SETTINGS = {
'figure_align': 'htbp',
'tocdepth': '',
'secnumdepth': '',
-} # type: Dict[unicode, unicode]
+}
ADDITIONAL_SETTINGS = {
'pdflatex': {
@@ -222,7 +221,7 @@ ADDITIONAL_SETTINGS = {
'fncychap': '',
'geometry': '\\usepackage[dvipdfm]{geometry}',
},
-} # type: Dict[unicode, Dict[unicode, unicode]]
+}
EXTRA_RE = re.compile(r'^(.*\S)\s+\(([^()]*)\)\s*$')
@@ -266,14 +265,14 @@ class ExtBabel(Babel):
cyrillic_languages = ('bulgarian', 'kazakh', 'mongolian', 'russian', 'ukrainian')
def __init__(self, language_code, use_polyglossia=False):
- # type: (unicode, bool) -> None
+ # type: (str, bool) -> None
self.language_code = language_code
self.use_polyglossia = use_polyglossia
self.supported = True
super(ExtBabel, self).__init__(language_code or '')
def get_shorthandoff(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('ExtBabel.get_shorthandoff() is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return SHORTHANDOFF
@@ -287,7 +286,7 @@ class ExtBabel(Babel):
return self.supported
def language_name(self, language_code):
- # type: (unicode) -> unicode
+ # type: (str) -> str
language = super(ExtBabel, self).language_name(language_code)
if language == 'ngerman' and self.use_polyglossia:
# polyglossia calls new orthography (Neue Rechtschreibung) as
@@ -300,7 +299,7 @@ class ExtBabel(Babel):
return language
def get_mainlanguage_options(self):
- # type: () -> unicode
+ # type: () -> str
"""Return options for polyglossia's ``\\setmainlanguage``."""
if self.use_polyglossia is False:
return None
@@ -319,16 +318,16 @@ class Table:
def __init__(self, node):
# type: (nodes.Element) -> None
- self.header = [] # type: List[unicode]
- self.body = [] # type: List[unicode]
+ self.header = [] # type: List[str]
+ self.body = [] # type: List[str]
self.align = node.get('align')
self.colcount = 0
- self.colspec = None # type: unicode
+ self.colspec = None # type: str
self.colwidths = [] # type: List[int]
self.has_problematic = False
self.has_oldproblematic = False
self.has_verbatim = False
- self.caption = None # type: List[unicode]
+ self.caption = None # type: List[str]
self.stubs = [] # type: List[int]
# current position
@@ -336,7 +335,7 @@ class Table:
self.row = 0
# for internal use
- self.classes = node.get('classes', []) # type: List[unicode]
+ self.classes = node.get('classes', []) # type: List[str]
self.cells = defaultdict(int) # type: Dict[Tuple[int, int], int]
# it maps table location to cell_id
# (cell = rectangular area)
@@ -344,14 +343,14 @@ class Table:
@property
def caption_footnotetexts(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
warnings.warn('table.caption_footnotetexts is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return []
@property
def header_footnotetexts(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
warnings.warn('table.header_footnotetexts is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return []
@@ -362,7 +361,7 @@ class Table:
return self.row > 30 or 'longtable' in self.classes
def get_table_type(self):
- # type: () -> unicode
+ # type: () -> str
"""Returns the LaTeX environment name for the table.
The class currently supports:
@@ -383,7 +382,7 @@ class Table:
return 'tabulary'
def get_colspec(self):
- # type: () -> unicode
+ # type: () -> str
"""Returns a column spec of table.
This is what LaTeX calls the 'preamble argument' of the used table environment.
@@ -474,13 +473,13 @@ class TableCell:
def escape_abbr(text):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Adjust spacing after abbreviations."""
return re.sub(r'\.(?=\s|$)', r'.\@', text)
def rstdim_to_latexdim(width_str):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Convert `width_str` with rst length to LaTeX length."""
match = re.match(r'^(\d*\.?\d*)\s*(\S*)$', width_str)
if not match:
@@ -510,7 +509,7 @@ class LaTeXTranslator(SphinxTranslator):
def __init__(self, document, builder):
# type: (nodes.document, LaTeXBuilder) -> None
super(LaTeXTranslator, self).__init__(builder, document)
- self.body = [] # type: List[unicode]
+ self.body = [] # type: List[str]
# flags
self.in_title = 0
@@ -592,9 +591,9 @@ class LaTeXTranslator(SphinxTranslator):
self.elements['logo'] = '\\sphinxincludegraphics{%s}\\par' % \
path.basename(self.config.latex_logo)
- if (self.config.language and self.config.language != 'ja' and
+ if (self.config.language not in {None, 'en', 'ja'} and
'fncychap' not in self.config.latex_elements):
- # use Sonny style if any language specified
+ # use Sonny style if any language specified (except English)
self.elements['fncychap'] = ('\\usepackage[Sonny]{fncychap}\n'
'\\ChNameVar{\\Large\\normalfont'
'\\sffamily}\n\\ChTitleVar{\\Large'
@@ -648,7 +647,7 @@ class LaTeXTranslator(SphinxTranslator):
if getattr(self.builder, 'usepackages', None):
def declare_package(packagename, options=None):
- # type:(unicode, unicode) -> unicode
+ # type:(str, str) -> str
if options:
return '\\usepackage[%s]{%s}' % (options, packagename)
else:
@@ -702,22 +701,22 @@ class LaTeXTranslator(SphinxTranslator):
self.highlighter = highlighting.PygmentsBridge('latex', self.config.pygments_style)
self.context = [] # type: List[Any]
- self.descstack = [] # type: List[unicode]
+ self.descstack = [] # type: List[str]
self.table = None # type: Table
- self.next_table_colspec = None # type: unicode
- self.bodystack = [] # type: List[List[unicode]]
+ self.next_table_colspec = None # type: str
+ self.bodystack = [] # type: List[List[str]]
self.footnote_restricted = None # type: nodes.Element
self.pending_footnotes = [] # type: List[nodes.footnote_reference]
- self.curfilestack = [] # type: List[unicode]
- self.handled_abbrs = set() # type: Set[unicode]
+ self.curfilestack = [] # type: List[str]
+ self.handled_abbrs = set() # type: Set[str]
def pushbody(self, newbody):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
self.bodystack.append(self.body)
self.body = newbody
def popbody(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
body = self.body
self.body = self.bodystack.pop()
return body
@@ -744,7 +743,7 @@ class LaTeXTranslator(SphinxTranslator):
self.pending_footnotes = []
def format_docclass(self, docclass):
- # type: (unicode) -> unicode
+ # type: (str) -> str
""" prepends prefix to sphinx document classes
"""
if docclass in self.docclasses:
@@ -752,7 +751,7 @@ class LaTeXTranslator(SphinxTranslator):
return docclass
def astext(self):
- # type: () -> unicode
+ # type: () -> str
self.elements.update({
'body': u''.join(self.body),
'indices': self.generate_indices()
@@ -760,14 +759,14 @@ class LaTeXTranslator(SphinxTranslator):
return self.render('latex.tex_t', self.elements)
def hypertarget(self, id, withdoc=True, anchor=True):
- # type: (unicode, bool, bool) -> unicode
+ # type: (str, bool, bool) -> str
if withdoc:
id = self.curfilestack[-1] + ':' + id
return (anchor and '\\phantomsection' or '') + \
'\\label{%s}' % self.idescape(id)
def hypertarget_to(self, node, anchor=False):
- # type: (nodes.Element, bool) -> unicode
+ # type: (nodes.Element, bool) -> str
labels = ''.join(self.hypertarget(node_id, anchor=False) for node_id in node['ids'])
if anchor:
return r'\phantomsection' + labels
@@ -775,21 +774,21 @@ class LaTeXTranslator(SphinxTranslator):
return labels
def hyperlink(self, id):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return '{\\hyperref[%s]{' % self.idescape(id)
def hyperpageref(self, id):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return '\\autopageref*{%s}' % self.idescape(id)
def idescape(self, id):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return '\\detokenize{%s}' % text_type(id).translate(tex_replace_map).\
encode('ascii', 'backslashreplace').decode('ascii').\
replace('\\', '_')
def babel_renewcommand(self, command, definition):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if self.elements['multilingual']:
prefix = '\\addto\\captions%s{' % self.babel.get_language()
suffix = '}'
@@ -800,8 +799,8 @@ class LaTeXTranslator(SphinxTranslator):
return ('%s\\renewcommand{%s}{%s}%s\n' % (prefix, command, definition, suffix))
def generate_numfig_format(self, builder):
- # type: (LaTeXBuilder) -> unicode
- ret = [] # type: List[unicode]
+ # type: (LaTeXBuilder) -> str
+ ret = [] # type: List[str]
figure = self.builder.config.numfig_format['figure'].split('%s', 1)
if len(figure) == 1:
ret.append('\\def\\fnum@figure{%s}\n' %
@@ -840,9 +839,9 @@ class LaTeXTranslator(SphinxTranslator):
return ''.join(ret)
def generate_indices(self):
- # type: () -> unicode
+ # type: () -> str
def generate(content, collapsed):
- # type: (List[Tuple[unicode, List[IndexEntry]]], bool) -> None
+ # type: (List[Tuple[str, List[IndexEntry]]], bool) -> None
ret.append('\\begin{sphinxtheindex}\n')
ret.append('\\let\\bigletter\\sphinxstyleindexlettergroup\n')
for i, (letter, entries) in enumerate(content):
@@ -883,7 +882,7 @@ class LaTeXTranslator(SphinxTranslator):
return ''.join(ret)
def render(self, template_name, variables):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
for template_dir in self.builder.config.templates_path:
template = path.join(self.builder.confdir, template_dir,
template_name)
@@ -916,7 +915,7 @@ class LaTeXTranslator(SphinxTranslator):
self.curfilestack.append(node['docname'])
def collect_footnotes(self, node):
- # type: (nodes.Element) -> Dict[unicode, List[Union[collected_footnote, bool]]]
+ # type: (nodes.Element) -> Dict[str, List[Union[collected_footnote, bool]]]
def footnotes_under(n):
# type: (nodes.Element) -> Iterator[nodes.footnote]
if isinstance(n, nodes.footnote):
@@ -926,10 +925,9 @@ class LaTeXTranslator(SphinxTranslator):
if isinstance(c, addnodes.start_of_file):
continue
elif isinstance(c, nodes.Element):
- for k in footnotes_under(c):
- yield k
+ yield from footnotes_under(c)
- fnotes = {} # type: Dict[unicode, List[Union[collected_footnote, bool]]]
+ fnotes = {} # type: Dict[str, List[Union[collected_footnote, bool]]]
for fn in footnotes_under(node):
label = cast(nodes.label, fn[0])
num = label.astext().strip()
@@ -1469,7 +1467,7 @@ class LaTeXTranslator(SphinxTranslator):
def visit_enumerated_list(self, node):
# type: (nodes.Element) -> None
def get_enumtype(node):
- # type: (nodes.Element) -> unicode
+ # type: (nodes.Element) -> str
enumtype = node.get('enumtype', 'arabic')
if 'alpha' in enumtype and 26 < node.get('start', 0) + len(node):
# fallback to arabic if alphabet counter overflows
@@ -1538,7 +1536,7 @@ class LaTeXTranslator(SphinxTranslator):
def visit_term(self, node):
# type: (nodes.Element) -> None
self.in_term += 1
- ctx = '' # type: unicode
+ ctx = ''
if node.get('ids'):
ctx = '\\phantomsection'
for node_id in node['ids']:
@@ -1645,7 +1643,7 @@ class LaTeXTranslator(SphinxTranslator):
pass
def latex_image_length(self, width_str):
- # type: (unicode) -> unicode
+ # type: (str) -> str
try:
return rstdim_to_latexdim(width_str)
except ValueError:
@@ -1660,9 +1658,9 @@ class LaTeXTranslator(SphinxTranslator):
def visit_image(self, node):
# type: (nodes.Element) -> None
attrs = node.attributes
- pre = [] # type: List[unicode]
+ pre = [] # type: List[str]
# in reverse order
- post = [] # type: List[unicode]
+ post = [] # type: List[str]
include_graphics_options = []
is_inline = self.is_inline(node)
if 'width' in attrs:
@@ -1851,7 +1849,7 @@ class LaTeXTranslator(SphinxTranslator):
def visit_target(self, node):
# type: (nodes.Element) -> None
def add_target(id):
- # type: (unicode) -> None
+ # type: (str) -> None
# indexing uses standard LaTeX index markup, so the targets
# will be generated differently
if id.startswith('index-'):
@@ -2494,7 +2492,7 @@ class LaTeXTranslator(SphinxTranslator):
# text handling
def encode(self, text):
- # type: (unicode) -> unicode
+ # type: (str) -> str
text = text_type(text).translate(tex_escape_map)
if self.literal_whitespace:
# Insert a blank before the newline, to avoid
@@ -2506,7 +2504,7 @@ class LaTeXTranslator(SphinxTranslator):
return text
def encode_uri(self, text):
- # type: (unicode) -> unicode
+ # type: (str) -> str
# in \href, the tilde is allowed and must be represented literally
return self.encode(text).replace('\\textasciitilde{}', '~')
@@ -2588,14 +2586,14 @@ class LaTeXTranslator(SphinxTranslator):
@property
def footnotestack(self):
- # type: () -> List[Dict[unicode, List[Union[collected_footnote, bool]]]]
+ # type: () -> List[Dict[str, List[Union[collected_footnote, bool]]]]
warnings.warn('LaTeXWriter.footnotestack is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return []
@property
def bibitems(self):
- # type: () -> List[List[unicode]]
+ # type: () -> List[List[str]]
warnings.warn('LaTeXTranslator.bibitems() is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return []
@@ -2609,7 +2607,7 @@ class LaTeXTranslator(SphinxTranslator):
@property
def next_section_ids(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
warnings.warn('LaTeXTranslator.next_section_ids is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return set()
@@ -2622,20 +2620,20 @@ class LaTeXTranslator(SphinxTranslator):
return {}
def push_hyperlink_ids(self, figtype, ids):
- # type: (unicode, Set[unicode]) -> None
+ # type: (str, Set[str]) -> None
warnings.warn('LaTeXTranslator.push_hyperlink_ids() is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
pass
def pop_hyperlink_ids(self, figtype):
- # type: (unicode) -> Set[unicode]
+ # type: (str) -> Set[str]
warnings.warn('LaTeXTranslator.pop_hyperlink_ids() is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return set()
@property
def hlsettingstack(self):
- # type: () -> List[List[Union[unicode, int]]]
+ # type: () -> List[List[Union[str, int]]]
warnings.warn('LaTeXTranslator.hlsettingstack is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return [[self.builder.config.highlight_language, sys.maxsize]]
@@ -2651,7 +2649,7 @@ class LaTeXTranslator(SphinxTranslator):
logger.warning(msg % key)
def babel_defmacro(self, name, definition):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
warnings.warn('babel_defmacro() is deprecated.',
RemovedInSphinx40Warning)
@@ -2665,7 +2663,7 @@ class LaTeXTranslator(SphinxTranslator):
return ('%s\\def%s{%s}%s\n' % (prefix, name, definition, suffix))
def _make_visit_admonition(name): # type: ignore
- # type: (unicode) -> Callable[[LaTeXTranslator, nodes.Element], None]
+ # type: (str) -> Callable[[LaTeXTranslator, nodes.Element], None]
warnings.warn('LaTeXTranslator._make_visit_admonition() is deprecated.',
RemovedInSphinx30Warning)
diff --git a/sphinx/writers/manpage.py b/sphinx/writers/manpage.py
index 2ac2cab7b..1b7e85928 100644
--- a/sphinx/writers/manpage.py
+++ b/sphinx/writers/manpage.py
@@ -28,7 +28,6 @@ if False:
# For type annotation
from typing import Any, Dict # NOQA
from sphinx.builders import Builder # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -84,7 +83,7 @@ class ManualPageTranslator(SphinxTranslator, BaseTranslator):
Custom translator.
"""
- _docinfo = {} # type: Dict[unicode, Any]
+ _docinfo = {} # type: Dict[str, Any]
def __init__(self, builder, document):
# type: (Builder, nodes.document) -> None
@@ -120,7 +119,7 @@ class ManualPageTranslator(SphinxTranslator, BaseTranslator):
# overwritten -- added quotes around all .TH arguments
def header(self):
- # type: () -> unicode
+ # type: () -> str
tmpl = (".TH \"%(title_upper)s\" \"%(manual_section)s\""
" \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
".SH NAME\n"
diff --git a/sphinx/writers/texinfo.py b/sphinx/writers/texinfo.py
index 3c5d8bbcc..b9b7cabfe 100644
--- a/sphinx/writers/texinfo.py
+++ b/sphinx/writers/texinfo.py
@@ -31,7 +31,6 @@ if False:
from typing import Any, Callable, Dict, Iterator, List, Pattern, Set, Tuple, Union # NOQA
from sphinx.builders.texinfo import TexinfoBuilder # NOQA
from sphinx.domains import IndexEntry # NOQA
- from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@@ -105,7 +104,7 @@ def find_subsections(section):
def smart_capwords(s, sep=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Like string.capwords() but does not capitalize words that already
contain a capital letter."""
words = s.split(sep)
@@ -125,11 +124,11 @@ class TexinfoWriter(writers.Writer):
('Dir entry', ['--texinfo-dir-entry'], {'default': ''}),
('Description', ['--texinfo-dir-description'], {'default': ''}),
('Category', ['--texinfo-dir-category'], {'default':
- 'Miscellaneous'}))) # type: Tuple[unicode, Any, Tuple[Tuple[unicode, List[unicode], Dict[unicode, unicode]], ...]] # NOQA
+ 'Miscellaneous'}))) # type: Tuple[str, Any, Tuple[Tuple[str, List[str], Dict[str, str]], ...]] # NOQA
settings_defaults = {} # type: Dict
- output = None # type: unicode
+ output = None # type: str
visitor_attributes = ('output', 'fragment')
@@ -173,19 +172,19 @@ class TexinfoTranslator(SphinxTranslator):
super(TexinfoTranslator, self).__init__(builder, document)
self.init_settings()
- self.written_ids = set() # type: Set[unicode]
+ self.written_ids = set() # type: Set[str]
# node names and anchors in output
# node names and anchors that should be in output
- self.referenced_ids = set() # type: Set[unicode]
- self.indices = [] # type: List[Tuple[unicode, unicode]]
+ self.referenced_ids = set() # type: Set[str]
+ self.indices = [] # type: List[Tuple[str, str]]
# (node name, content)
- self.short_ids = {} # type: Dict[unicode, unicode]
+ self.short_ids = {} # type: Dict[str, str]
# anchors --> short ids
- self.node_names = {} # type: Dict[unicode, unicode]
+ self.node_names = {} # type: Dict[str, str]
# node name --> node's name to display
- self.node_menus = {} # type: Dict[unicode, List[unicode]]
+ self.node_menus = {} # type: Dict[str, List[str]]
# node name --> node's menu entries
- self.rellinks = {} # type: Dict[unicode, List[unicode]]
+ self.rellinks = {} # type: Dict[str, List[str]]
# node name --> (next, previous, up)
self.collect_indices()
@@ -193,18 +192,18 @@ class TexinfoTranslator(SphinxTranslator):
self.collect_node_menus()
self.collect_rellinks()
- self.body = [] # type: List[unicode]
- self.context = [] # type: List[unicode]
+ self.body = [] # type: List[str]
+ self.context = [] # type: List[str]
self.previous_section = None # type: nodes.section
self.section_level = 0
self.seen_title = False
- self.next_section_ids = set() # type: Set[unicode]
+ self.next_section_ids = set() # type: Set[str]
self.escape_newlines = 0
self.escape_hyphens = 0
- self.curfilestack = [] # type: List[unicode]
- self.footnotestack = [] # type: List[Dict[unicode, List[Union[collected_footnote, bool]]]] # NOQA
+ self.curfilestack = [] # type: List[str]
+ self.footnotestack = [] # type: List[Dict[str, List[Union[collected_footnote, bool]]]] # NOQA
self.in_footnote = 0
- self.handled_abbrs = set() # type: Set[unicode]
+ self.handled_abbrs = set() # type: Set[str]
self.colwidths = None # type: List[int]
def finish(self):
@@ -246,7 +245,7 @@ class TexinfoTranslator(SphinxTranslator):
language=self.builder.config.language))
})
# title
- title = self.settings.title # type: unicode
+ title = self.settings.title # type: str
if not title:
title_node = self.document.next_node(nodes.title)
title = (title and title_node.astext()) or '<untitled>'
@@ -279,7 +278,7 @@ class TexinfoTranslator(SphinxTranslator):
Assigns the attribute ``node_name`` to each section."""
def add_node_name(name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
node_id = self.escape_id(name)
nth, suffix = 1, ''
while node_id + suffix in self.written_ids or \
@@ -365,7 +364,7 @@ class TexinfoTranslator(SphinxTranslator):
# characters.
def escape(self, s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Return a string with Texinfo command characters escaped."""
s = s.replace('@', '@@')
s = s.replace('{', '@{')
@@ -376,7 +375,7 @@ class TexinfoTranslator(SphinxTranslator):
return s
def escape_arg(self, s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Return an escaped string suitable for use as an argument
to a Texinfo command."""
s = self.escape(s)
@@ -387,7 +386,7 @@ class TexinfoTranslator(SphinxTranslator):
return s
def escape_id(self, s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Return an escaped string suitable for node names and anchors."""
bad_chars = ',:.()'
for bc in bad_chars:
@@ -396,7 +395,7 @@ class TexinfoTranslator(SphinxTranslator):
return self.escape(s)
def escape_menu(self, s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Return an escaped string suitable for menu entries."""
s = self.escape_arg(s)
s = s.replace(':', ';')
@@ -410,7 +409,7 @@ class TexinfoTranslator(SphinxTranslator):
self.body.append('\n')
def format_menu_entry(self, name, node_name, desc):
- # type: (unicode, unicode, unicode) -> unicode
+ # type: (str, str, str) -> str
if name == node_name:
s = '* %s:: ' % (name,)
else:
@@ -421,7 +420,7 @@ class TexinfoTranslator(SphinxTranslator):
return s + wdesc.strip() + '\n'
def add_menu_entries(self, entries, reg=re.compile(r'\s+---?\s+')):
- # type: (List[unicode], Pattern) -> None
+ # type: (List[str], Pattern) -> None
for entry in entries:
name = self.node_names[entry]
# special formatting for entries that are divided by an em-dash
@@ -439,7 +438,7 @@ class TexinfoTranslator(SphinxTranslator):
self.body.append(self.format_menu_entry(name, entry, desc))
def add_menu(self, node_name):
- # type: (unicode) -> None
+ # type: (str) -> None
entries = self.node_menus[node_name]
if not entries:
return
@@ -452,7 +451,7 @@ class TexinfoTranslator(SphinxTranslator):
return
def _add_detailed_menu(name):
- # type: (unicode) -> None
+ # type: (str) -> None
entries = self.node_menus[name]
if not entries:
return
@@ -469,7 +468,7 @@ class TexinfoTranslator(SphinxTranslator):
'@end menu\n')
def tex_image_length(self, width_str):
- # type: (unicode) -> unicode
+ # type: (str) -> str
match = re.match(r'(\d*\.?\d*)\s*(\S*)', width_str)
if not match:
# fallback
@@ -487,8 +486,8 @@ class TexinfoTranslator(SphinxTranslator):
def collect_indices(self):
# type: () -> None
def generate(content, collapsed):
- # type: (List[Tuple[unicode, List[IndexEntry]]], bool) -> unicode
- ret = ['\n@menu\n'] # type: List[unicode]
+ # type: (List[Tuple[str, List[IndexEntry]]], bool) -> str
+ ret = ['\n@menu\n']
for letter, entries in content:
for entry in entries:
if not entry[3]:
@@ -525,7 +524,7 @@ class TexinfoTranslator(SphinxTranslator):
# TODO: move this to sphinx.util
def collect_footnotes(self, node):
- # type: (nodes.Element) -> Dict[unicode, List[Union[collected_footnote, bool]]]
+ # type: (nodes.Element) -> Dict[str, List[Union[collected_footnote, bool]]]
def footnotes_under(n):
# type: (nodes.Element) -> Iterator[nodes.footnote]
if isinstance(n, nodes.footnote):
@@ -535,9 +534,8 @@ class TexinfoTranslator(SphinxTranslator):
if isinstance(c, addnodes.start_of_file):
continue
elif isinstance(c, nodes.Element):
- for k in footnotes_under(c):
- yield k
- fnotes = {} # type: Dict[unicode, List[Union[collected_footnote, bool]]]
+ yield from footnotes_under(c)
+ fnotes = {} # type: Dict[str, List[Union[collected_footnote, bool]]]
for fn in footnotes_under(node):
label = cast(nodes.label, fn[0])
num = label.astext().strip()
@@ -547,7 +545,7 @@ class TexinfoTranslator(SphinxTranslator):
# -- xref handling
def get_short_id(self, id):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Return a shorter 'id' associated with ``id``."""
# Shorter ids improve paragraph filling in places
# that the id is hidden by Emacs.
@@ -559,7 +557,7 @@ class TexinfoTranslator(SphinxTranslator):
return sid
def add_anchor(self, id, node):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.Node) -> None
if id.startswith('index-'):
return
id = self.curfilestack[-1] + ':' + id
@@ -571,7 +569,7 @@ class TexinfoTranslator(SphinxTranslator):
self.written_ids.add(id)
def add_xref(self, id, name, node):
- # type: (unicode, unicode, nodes.Node) -> None
+ # type: (str, str, nodes.Node) -> None
name = self.escape_menu(name)
sid = self.get_short_id(id)
self.body.append('@ref{%s,,%s}' % (sid, name))
@@ -636,13 +634,13 @@ class TexinfoTranslator(SphinxTranslator):
'@section',
'@subsection',
'@subsubsection',
- ) # type: Tuple[unicode, ...]
+ )
rubrics = (
'@heading',
'@subheading',
'@subsubheading',
- ) # type: Tuple[unicode, ...]
+ )
def visit_title(self, node):
# type: (nodes.Element) -> None
@@ -1204,7 +1202,7 @@ class TexinfoTranslator(SphinxTranslator):
# -- Admonitions
def visit_admonition(self, node, name=''):
- # type: (nodes.Element, unicode) -> None
+ # type: (nodes.Element, str) -> None
if not name:
title = cast(nodes.title, node[0])
name = self.escape(title.astext())
@@ -1746,7 +1744,7 @@ class TexinfoTranslator(SphinxTranslator):
raise nodes.SkipNode
def _make_visit_admonition(name): # type: ignore
- # type: (unicode) -> Callable[[TexinfoTranslator, nodes.Element], None]
+ # type: (str) -> Callable[[TexinfoTranslator, nodes.Element], None]
warnings.warn('TexinfoTranslator._make_visit_admonition() is deprecated.',
RemovedInSphinx30Warning)
diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py
index bf424e015..1815e18fb 100644
--- a/sphinx/writers/text.py
+++ b/sphinx/writers/text.py
@@ -28,7 +28,6 @@ if False:
# For type annotation
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union # NOQA
from sphinx.builders.text import TextBuilder # NOQA
- from sphinx.util.typing import unicode # NOQA
class Cell:
@@ -37,7 +36,7 @@ class Cell:
"""
def __init__(self, text="", rowspan=1, colspan=1):
self.text = text
- self.wrapped = [] # type: List[unicode]
+ self.wrapped = [] # type: List[str]
self.rowspan = rowspan
self.colspan = colspan
self.col = None
@@ -206,11 +205,11 @@ class Table:
self.rewrap()
def writesep(char="-", lineno=None):
- # type: (unicode, Optional[int]) -> unicode
+ # type: (str, Optional[int]) -> str
"""Called on the line *before* lineno.
Called with no *lineno* for the last sep.
"""
- out = [] # type: List[unicode]
+ out = [] # type: List[str]
for colno, width in enumerate(self.measured_widths):
if (
lineno is not None and
@@ -267,14 +266,13 @@ class TextWrapper(textwrap.TextWrapper):
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
def _wrap_chunks(self, chunks):
- # type: (List[unicode]) -> List[unicode]
+ # type: (List[str]) -> List[str]
"""_wrap_chunks(chunks : [string]) -> [string]
The original _wrap_chunks uses len() to calculate width.
This method respects wide/fullwidth characters for width adjustment.
"""
- drop_whitespace = getattr(self, 'drop_whitespace', True) # py25 compat
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
@@ -291,7 +289,7 @@ class TextWrapper(textwrap.TextWrapper):
width = self.width - column_width(indent)
- if drop_whitespace and chunks[-1].strip() == '' and lines:
+ if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
@@ -307,7 +305,7 @@ class TextWrapper(textwrap.TextWrapper):
if chunks and column_width(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
- if drop_whitespace and cur_line and cur_line[-1].strip() == '':
+ if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
if cur_line:
@@ -316,7 +314,7 @@ class TextWrapper(textwrap.TextWrapper):
return lines
def _break_word(self, word, space_left):
- # type: (unicode, int) -> Tuple[unicode, unicode]
+ # type: (str, int) -> Tuple[str, str]
"""_break_word(word : string, space_left : int) -> (string, string)
Break line by unicode width instead of len(word).
@@ -329,16 +327,16 @@ class TextWrapper(textwrap.TextWrapper):
return word, ''
def _split(self, text):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
"""_split(text : string) -> [string]
Override original method that only split by 'wordsep_re'.
This '_split' split wide-characters into chunk by one character.
"""
def split(t):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
return super(TextWrapper, self)._split(t)
- chunks = [] # type: List[unicode]
+ chunks = [] # type: List[str]
for chunk in split(text):
for w, g in groupby(chunk, column_width):
if w == 1:
@@ -348,7 +346,7 @@ class TextWrapper(textwrap.TextWrapper):
return chunks
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
- # type: (List[unicode], List[unicode], int, int) -> None
+ # type: (List[str], List[str], int, int) -> None
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
@@ -370,7 +368,7 @@ STDINDENT = 3
def my_wrap(text, width=MAXWIDTH, **kwargs):
- # type: (unicode, int, Any) -> List[unicode]
+ # type: (str, int, Any) -> List[str]
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
@@ -411,7 +409,7 @@ class TextTranslator(SphinxTranslator):
self.sectionchars = self.config.text_sectionchars
self.add_secnumbers = self.config.text_add_secnumbers
self.secnumber_suffix = self.config.text_secnumber_suffix
- self.states = [[]] # type: List[List[Tuple[int, Union[unicode, List[unicode]]]]]
+ self.states = [[]] # type: List[List[Tuple[int, Union[str, List[str]]]]]
self.stateindent = [0]
self.list_counter = [] # type: List[int]
self.sectionlevel = 0
@@ -419,7 +417,7 @@ class TextTranslator(SphinxTranslator):
self.table = None # type: Table
def add_text(self, text):
- # type: (unicode) -> None
+ # type: (str) -> None
self.states[-1].append((-1, text))
def new_state(self, indent=STDINDENT):
@@ -428,12 +426,12 @@ class TextTranslator(SphinxTranslator):
self.stateindent.append(indent)
def end_state(self, wrap=True, end=[''], first=None):
- # type: (bool, List[unicode], unicode) -> None
+ # type: (bool, List[str], str) -> None
content = self.states.pop()
maxindent = sum(self.stateindent)
indent = self.stateindent.pop()
- result = [] # type: List[Tuple[int, List[unicode]]]
- toformat = [] # type: List[unicode]
+ result = [] # type: List[Tuple[int, List[str]]]
+ toformat = [] # type: List[str]
def do_format():
# type: () -> None
@@ -532,7 +530,7 @@ class TextTranslator(SphinxTranslator):
self.new_state(0)
def get_section_number_string(self, node):
- # type: (nodes.Element) -> unicode
+ # type: (nodes.Element) -> str
if isinstance(node.parent, nodes.section):
anchorname = '#' + node.parent['ids'][0]
numbers = self.builder.secnumbers.get(anchorname)
@@ -548,12 +546,12 @@ class TextTranslator(SphinxTranslator):
char = self._title_char
else:
char = '^'
- text = None # type: unicode
+ text = ''
text = ''.join(x[1] for x in self.states.pop() if x[0] == -1) # type: ignore
if self.add_secnumbers:
text = self.get_section_number_string(node) + text
self.stateindent.pop()
- title = ['', text, '%s' % (char * column_width(text)), ''] # type: List[unicode]
+ title = ['', text, '%s' % (char * column_width(text)), '']
if len(self.states) == 2 and len(self.states[-1]) == 0:
# remove an empty line before title if it is first section title in the document
title.pop(0)
@@ -1373,7 +1371,7 @@ class TextTranslator(SphinxTranslator):
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
def _make_depart_admonition(name): # type: ignore
- # type: (unicode) -> Callable[[TextTranslator, nodes.Element], None]
+ # type: (str) -> Callable[[TextTranslator, nodes.Element], None]
warnings.warn('TextTranslator._make_depart_admonition() is deprecated.',
RemovedInSphinx30Warning)
diff --git a/sphinx/writers/xml.py b/sphinx/writers/xml.py
index 28510c587..85ca083a7 100644
--- a/sphinx/writers/xml.py
+++ b/sphinx/writers/xml.py
@@ -15,7 +15,6 @@ if False:
# For type annotation
from typing import Any, Tuple # NOQA
from sphinx.builders import Builder # NOQA
- from sphinx.util.typing import unicode # NOQA
class XMLWriter(BaseXMLWriter):
@@ -42,7 +41,7 @@ class PseudoXMLWriter(BaseXMLWriter):
"""Formats this writer supports."""
config_section = 'pseudoxml writer'
- config_section_dependencies = ('writers',) # type: Tuple[unicode]
+ config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
@@ -57,6 +56,6 @@ class PseudoXMLWriter(BaseXMLWriter):
self.output = self.document.pformat()
def supports(self, format):
- # type: (unicode) -> bool
+ # type: (str) -> bool
"""This writer supports all format-specific elements."""
return True
diff --git a/tests/roots/test-ext-autodoc/target/__init__.py b/tests/roots/test-ext-autodoc/target/__init__.py
index c97269d35..908411c22 100644
--- a/tests/roots/test-ext-autodoc/target/__init__.py
+++ b/tests/roots/test-ext-autodoc/target/__init__.py
@@ -1,8 +1,7 @@
# -*- coding: utf-8 -*-
import enum
-
-from six import StringIO, add_metaclass
+from io import StringIO
from sphinx.util import save_traceback # NOQA
@@ -45,9 +44,9 @@ class CustomDataDescriptorMeta(type):
"""Descriptor metaclass docstring."""
-@add_metaclass(CustomDataDescriptorMeta)
class CustomDataDescriptor2(CustomDataDescriptor):
"""Descriptor class with custom metaclass docstring."""
+ __metaclass__ = CustomDataDescriptorMeta
def _funky_classmethod(name, b, c, d, docstring=None):
diff --git a/tests/roots/test-root/autodoc_target.py b/tests/roots/test-root/autodoc_target.py
index 4f14afc03..6aaf9ee16 100644
--- a/tests/roots/test-root/autodoc_target.py
+++ b/tests/roots/test-root/autodoc_target.py
@@ -1,8 +1,7 @@
# -*- coding: utf-8 -*-
import enum
-
-from six import StringIO, add_metaclass
+from io import StringIO
__all__ = ['Class']
@@ -43,9 +42,9 @@ class CustomDataDescriptorMeta(type):
"""Descriptor metaclass docstring."""
-@add_metaclass(CustomDataDescriptorMeta)
class CustomDataDescriptor2(CustomDataDescriptor):
"""Descriptor class with custom metaclass docstring."""
+ __metaclass__ = CustomDataDescriptorMeta
def _funky_classmethod(name, b, c, d, docstring=None):
diff --git a/tests/test_autodoc.py b/tests/test_autodoc.py
index feaab19f4..4fd353dc1 100644
--- a/tests/test_autodoc.py
+++ b/tests/test_autodoc.py
@@ -1028,16 +1028,13 @@ def test_autodoc_member_order(app):
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autodoc_module_scope(app):
- def convert(s):
- return re.sub('<.*>', '<FILTERED>', s) # for py2/py3
-
app.env.temp_data['autodoc:module'] = 'target'
actual = do_autodoc(app, 'attribute', 'Class.mdocattr')
- assert list(map(convert, actual)) == [
+ assert list(actual) == [
u'',
u'.. py:attribute:: Class.mdocattr',
u' :module: target',
- u' :annotation: = <FILTERED>',
+ u' :annotation: = <_io.StringIO object>',
u'',
u' should be documented as well - süß',
u' '
@@ -1046,17 +1043,14 @@ def test_autodoc_module_scope(app):
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autodoc_class_scope(app):
- def convert(s):
- return re.sub('<.*>', '<FILTERED>', s) # for py2/py3
-
app.env.temp_data['autodoc:module'] = 'target'
app.env.temp_data['autodoc:class'] = 'Class'
actual = do_autodoc(app, 'attribute', 'mdocattr')
- assert list(map(convert, actual)) == [
+ assert list(actual) == [
u'',
u'.. py:attribute:: Class.mdocattr',
u' :module: target',
- u' :annotation: = <FILTERED>',
+ u' :annotation: = <_io.StringIO object>',
u'',
u' should be documented as well - süß',
u' '
diff --git a/tests/test_build_gettext.py b/tests/test_build_gettext.py
index ec09eaf21..345c3047d 100644
--- a/tests/test_build_gettext.py
+++ b/tests/test_build_gettext.py
@@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import gettext
import os
diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py
index c1506e4db..c8edf02ce 100644
--- a/tests/test_build_latex.py
+++ b/tests/test_build_latex.py
@@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
import re
diff --git a/tests/test_build_linkcheck.py b/tests/test_build_linkcheck.py
index 1716a0c42..12ded8ecc 100644
--- a/tests/test_build_linkcheck.py
+++ b/tests/test_build_linkcheck.py
@@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import pytest
diff --git a/tests/test_build_manpage.py b/tests/test_build_manpage.py
index 8a3777a30..1c8ebec70 100644
--- a/tests/test_build_manpage.py
+++ b/tests/test_build_manpage.py
@@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import pytest
diff --git a/tests/test_build_texinfo.py b/tests/test_build_texinfo.py
index 63b9a69ba..90a744d4c 100644
--- a/tests/test_build_texinfo.py
+++ b/tests/test_build_texinfo.py
@@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
import re
diff --git a/tests/test_ext_apidoc.py b/tests/test_ext_apidoc.py
index 3d1517929..dd39c8fd3 100644
--- a/tests/test_ext_apidoc.py
+++ b/tests/test_ext_apidoc.py
@@ -9,8 +9,6 @@
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
-
from collections import namedtuple
import pytest
diff --git a/tests/test_ext_autosummary.py b/tests/test_ext_autosummary.py
index e75fb71fd..069fdb722 100644
--- a/tests/test_ext_autosummary.py
+++ b/tests/test_ext_autosummary.py
@@ -9,8 +9,9 @@
:license: BSD, see LICENSE for details.
"""
+from io import StringIO
+
import pytest
-from six import StringIO
from sphinx.ext.autosummary import mangle_signature, import_by_name, extract_summary
from sphinx.testing.util import etree_parse
diff --git a/tests/test_intl.py b/tests/test_intl.py
index 942248cfc..2d82ac1ff 100644
--- a/tests/test_intl.py
+++ b/tests/test_intl.py
@@ -9,7 +9,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
import pickle
diff --git a/tests/test_io.py b/tests/test_io.py
index 1c8fee86b..2e81035dc 100644
--- a/tests/test_io.py
+++ b/tests/test_io.py
@@ -9,8 +9,9 @@
:license: BSD, see LICENSE for details.
"""
+from io import StringIO
+
import pytest
-from six import StringIO
from sphinx.io import SphinxRSTFileInput
diff --git a/tests/test_quickstart.py b/tests/test_quickstart.py
index 31533cff0..248b91d64 100644
--- a/tests/test_quickstart.py
+++ b/tests/test_quickstart.py
@@ -11,9 +11,10 @@
import sys
import time
+from io import StringIO
import pytest
-from six import text_type, StringIO
+from six import text_type
from sphinx import application
from sphinx.cmd import quickstart as qs
diff --git a/tests/test_util.py b/tests/test_util.py
index b07ee1229..d5305c0a6 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -15,8 +15,6 @@ import tempfile
import pytest
from mock import patch
-from six import PY2
-
import sphinx
from sphinx.errors import PycodeError
from sphinx.testing.util import strip_escseq
@@ -65,10 +63,7 @@ def test_display_chunk():
def test_get_module_source():
- if PY2:
- assert get_module_source('sphinx') == ('file', sphinx.__file__.replace('.pyc', '.py'))
- else:
- assert get_module_source('sphinx') == ('file', sphinx.__file__)
+ assert get_module_source('sphinx') == ('file', sphinx.__file__)
# failed to obtain source information from builtin modules
with pytest.raises(PycodeError):
diff --git a/tests/test_util_i18n.py b/tests/test_util_i18n.py
index 63496bccb..bcc6a2cd9 100644
--- a/tests/test_util_i18n.py
+++ b/tests/test_util_i18n.py
@@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import datetime
import os
diff --git a/tests/test_util_images.py b/tests/test_util_images.py
index cc5562404..a7ecff371 100644
--- a/tests/test_util_images.py
+++ b/tests/test_util_images.py
@@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import pytest
diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py
index fa7921cd1..09d25b779 100644
--- a/tests/test_util_logging.py
+++ b/tests/test_util_logging.py
@@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import codecs
import os
diff --git a/tests/test_writer_latex.py b/tests/test_writer_latex.py
index b0a84e944..e8e8cb191 100644
--- a/tests/test_writer_latex.py
+++ b/tests/test_writer_latex.py
@@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import pytest